feat: knowledge workflow (#4399)
* feat: init knowledge workflow * feat: add knowledge workflow and version models, serializers, and API views * feat: knowledge workflow * feat: knowledge workflow * feat: add KnowledgeWorkflowModelSerializer and Operate class for workflow management * fix: route * feat: knowledge workflow * feat: Knowledge workflow permission * feat: knowledge workflow * feat: knowledge workflow * feat: knowledge workflow * feat: knowledge workflow * feat: Data source web node * fix: Back route * feat: knowledge workflow * feat: knowledge workflow * feat: Knowledge write node * feat: add Data Source tool functionality and localization * feat: add Data Source tool functionality and localization * feat: knowledge workflow * feat: knowledge workflow * fix: simplify export tool permission check in ToolListContainer.vue * fix: simplify export condition in ToolResourceIndex.vue * fix: simplify condition for copying tool in ToolListContainer * feat: knowledge workflow * fix: Upload local files and add output fields * feat: Knowledge write * feat: add Document Split Node functionality and localization * feat: add Document Split Node functionality and localization * feat: Knowledge write * feat: enhance Document Split Node with result processing and problem list generation * fix: Allow problem be blank * feat: enhance Document Split Node with result processing and problem list generation * feat: tool datasource * fix: Optimization of knowledge base workflow execution logic * refactor: streamline image handling by updating application and knowledge ID management * refactor: streamline image handling by updating application and knowledge ID management * feat: extend support modes in variable aggregation node to include knowledge workflows * feat: Chunks stored * refactor: simplify file handling in document extraction by removing unnecessary byte conversion and enhancing file saving logic * refactor: update file ID assignment in document extraction to use provided metadata * feat: Workflow menu that distinguishes between applications and knowledge bases * refactor: update file ID assignment in document extraction to use provided metadata * fix: Add workspace ID as workflow execution parameter * feat: add code template for Data Source tool form functionality * refactor: remove unused sys import and improve module handling * feat: Execution details support loading status * refactor: update tool type handling and improve category merging logic * feat: Alter fork depth * fix: ensure filterList is properly initialized and updated in getList function * refactor: simplify ToolStoreDialog by removing unused toolType logic * perf: Optimize the style * style: adjust div width for improved layout in Tree component * refactor: improve polling mechanism for knowledge workflow action * fix: Get workspace_id from workflow params * fix: filter out 'file_bytes' from result in get_details method * feat: add recursive filtering for file_bytes in context data * fix: append results to paragraph_list instead of replacing it * perf: Optimize translation files * fix: include document name in bytes_to_uploaded_file call for better file handling * refactor: optimize buffer retrieval in document processing * refactor: remove redundant parameter from bytes_to_uploaded_file call * fix: Page style optimization * feat: add slider for setting limit in document rules form * feat: add workflow knowledge management endpoints and related functionality * fix: swap file size and file count limits in form inputs * refactor: update tool_config args to use list format for improved readability * feat: Node supports knowledge base workflow * feat: Node supports knowledge base workflow * fix: Basic node data cannot be obtained in the workflow * style: Knowledge base workflow debugging page style adjustment * fix: Loop nodes cannot be used in the knowledge base workflow * fix: Knowledge base workflow variable assignment node * feat: add chunk size slider to form for custom split strategy * fix: Workflow style optimization --------- Co-authored-by: CaptainB <bin@fit2cloud.com> Co-authored-by: zhangzhanwei <zhanwei.zhang@fit2cloud.com> Co-authored-by: wangdan-fit2cloud <dan.wang@fit2cloud.com>
|
|
@ -6,7 +6,7 @@
|
|||
@date:2024/12/11 17:57
|
||||
@desc:
|
||||
"""
|
||||
|
||||
from enum import Enum
|
||||
from typing import List, Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
|
|
@ -90,6 +90,16 @@ class EdgeNode:
|
|||
self.node = node
|
||||
|
||||
|
||||
class WorkflowMode(Enum):
|
||||
APPLICATION = "application"
|
||||
|
||||
APPLICATION_LOOP = "application-loop"
|
||||
|
||||
KNOWLEDGE = "knowledge"
|
||||
|
||||
KNOWLEDGE_LOOP = "knowledge-loop"
|
||||
|
||||
|
||||
class Workflow:
|
||||
"""
|
||||
节点列表
|
||||
|
|
@ -112,7 +122,10 @@ class Workflow:
|
|||
"""
|
||||
next_node_map: Dict[str, List[EdgeNode]]
|
||||
|
||||
def __init__(self, nodes: List[Node], edges: List[Edge]):
|
||||
workflow_mode: WorkflowMode
|
||||
|
||||
def __init__(self, nodes: List[Node], edges: List[Edge],
|
||||
workflow_mode: WorkflowMode = WorkflowMode.APPLICATION.value):
|
||||
self.nodes = nodes
|
||||
self.edges = edges
|
||||
self.node_map = {node.id: node for node in nodes}
|
||||
|
|
@ -125,6 +138,7 @@ class Workflow:
|
|||
self.next_node_map = {key: [EdgeNode(edge, self.node_map.get(edge.targetNodeId)) for edge in edges] for
|
||||
key, edges in
|
||||
group_by(edges, key=lambda edge: edge.sourceNodeId).items()}
|
||||
self.workflow_mode = workflow_mode
|
||||
|
||||
def get_node(self, node_id):
|
||||
"""
|
||||
|
|
@ -167,13 +181,13 @@ class Workflow:
|
|||
return [en.node for en in self.next_node_map.get(node_id, [])]
|
||||
|
||||
@staticmethod
|
||||
def new_instance(flow_obj: Dict):
|
||||
def new_instance(flow_obj: Dict, workflow_mode: WorkflowMode = WorkflowMode.APPLICATION):
|
||||
nodes = flow_obj.get('nodes')
|
||||
edges = flow_obj.get('edges')
|
||||
nodes = [Node(node.get('id'), node.get('type'), **node)
|
||||
for node in nodes]
|
||||
edges = [Edge(edge.get('id'), edge.get('type'), **edge) for edge in edges]
|
||||
return Workflow(nodes, edges)
|
||||
return Workflow(nodes, edges, workflow_mode)
|
||||
|
||||
def get_start_node(self):
|
||||
return self.get_node('start-node')
|
||||
|
|
@ -190,10 +204,9 @@ class Workflow:
|
|||
self.is_valid_base_node()
|
||||
self.is_valid_work_flow()
|
||||
|
||||
@staticmethod
|
||||
def is_valid_node_params(node: Node):
|
||||
def is_valid_node_params(self, node: Node):
|
||||
from application.flow.step_node import get_node
|
||||
get_node(node.type)(node, None, None)
|
||||
get_node(node.type, self.workflow_mode)(node, None, None)
|
||||
|
||||
def is_valid_node(self, node: Node):
|
||||
self.is_valid_node_params(node)
|
||||
|
|
|
|||
|
|
@ -21,6 +21,7 @@ from application.flow.common import Answer, NodeChunk
|
|||
from application.models import ApplicationChatUserStats
|
||||
from application.models import ChatRecord, ChatUserType
|
||||
from common.field.common import InstanceField
|
||||
from knowledge.models.knowledge_action import KnowledgeAction, State
|
||||
|
||||
chat_cache = cache
|
||||
|
||||
|
|
@ -78,7 +79,8 @@ class WorkFlowPostHandler:
|
|||
message_tokens=message_tokens,
|
||||
answer_tokens=answer_tokens,
|
||||
answer_text_list=answer_text_list,
|
||||
run_time=time.time() - workflow.context['start_time'],
|
||||
run_time=time.time() - workflow.context.get('start_time') if workflow.context.get(
|
||||
'start_time') is not None else 0,
|
||||
index=0)
|
||||
|
||||
self.chat_info.append_chat_record(chat_record)
|
||||
|
|
@ -97,6 +99,16 @@ class WorkFlowPostHandler:
|
|||
self.chat_info = None
|
||||
|
||||
|
||||
class KnowledgeWorkflowPostHandler(WorkFlowPostHandler):
|
||||
def __init__(self, chat_info, knowledge_action_id):
|
||||
super().__init__(chat_info)
|
||||
self.knowledge_action_id = knowledge_action_id
|
||||
|
||||
def handler(self, workflow):
|
||||
QuerySet(KnowledgeAction).filter(id=self.knowledge_action_id).update(
|
||||
state=State.SUCCESS)
|
||||
|
||||
|
||||
class NodeResult:
|
||||
def __init__(self, node_variable: Dict, workflow_variable: Dict,
|
||||
_write_context=write_context, _is_interrupt=is_interrupt):
|
||||
|
|
@ -153,6 +165,14 @@ class FlowParamsSerializer(serializers.Serializer):
|
|||
debug = serializers.BooleanField(required=True, label="是否debug")
|
||||
|
||||
|
||||
class KnowledgeFlowParamsSerializer(serializers.Serializer):
|
||||
knowledge_id = serializers.UUIDField(required=True, label="知识库id")
|
||||
workspace_id = serializers.CharField(required=True, label="工作空间id")
|
||||
knowledge_action_id = serializers.UUIDField(required=True, label="知识库任务执行器id")
|
||||
data_source = serializers.DictField(required=True, label="数据源")
|
||||
knowledge_base = serializers.DictField(required=False, label="知识库设置")
|
||||
|
||||
|
||||
class INode:
|
||||
view_type = 'many_view'
|
||||
|
||||
|
|
@ -165,7 +185,8 @@ class INode:
|
|||
return None
|
||||
reasoning_content_enable = self.context.get('model_setting', {}).get('reasoning_content_enable', False)
|
||||
return [
|
||||
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {},
|
||||
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params.get('chat_record_id'),
|
||||
{},
|
||||
self.runtime_node_id, self.context.get('reasoning_content', '') if reasoning_content_enable else '')]
|
||||
|
||||
def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None,
|
||||
|
|
@ -222,13 +243,14 @@ class INode:
|
|||
pass
|
||||
|
||||
def get_flow_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return FlowParamsSerializer
|
||||
return self.workflow_manage.get_params_serializer_class()
|
||||
|
||||
def get_write_error_context(self, e):
|
||||
self.status = 500
|
||||
self.answer_text = str(e)
|
||||
self.err_message = str(e)
|
||||
self.context['run_time'] = time.time() - self.context['start_time']
|
||||
current_time = time.time()
|
||||
self.context['run_time'] = current_time - (self.context.get('start_time') or current_time)
|
||||
|
||||
def write_error_context(answer, status=200):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: maxkb
|
||||
@Author:虎
|
||||
@file: workflow_manage.py
|
||||
@date:2024/1/9 17:40
|
||||
@desc:
|
||||
"""
|
||||
from application.flow.i_step_node import KnowledgeFlowParamsSerializer
|
||||
from application.flow.loop_workflow_manage import LoopWorkflowManage
|
||||
|
||||
|
||||
class KnowledgeLoopWorkflowManage(LoopWorkflowManage):
|
||||
def get_params_serializer_class(self):
|
||||
return KnowledgeFlowParamsSerializer
|
||||
|
|
@ -0,0 +1,102 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎虎
|
||||
@file: Knowledge_workflow_manage.py
|
||||
@date:2025/11/13 19:02
|
||||
@desc:
|
||||
"""
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from django.utils.translation import get_language
|
||||
|
||||
from application.flow.common import Workflow
|
||||
from application.flow.i_step_node import WorkFlowPostHandler, KnowledgeFlowParamsSerializer
|
||||
from application.flow.workflow_manage import WorkflowManage
|
||||
from common.handle.base_to_response import BaseToResponse
|
||||
from common.handle.impl.response.system_to_response import SystemToResponse
|
||||
from knowledge.models.knowledge_action import KnowledgeAction, State
|
||||
|
||||
executor = ThreadPoolExecutor(max_workers=200)
|
||||
|
||||
|
||||
class KnowledgeWorkflowManage(WorkflowManage):
|
||||
|
||||
def __init__(self, flow: Workflow,
|
||||
params,
|
||||
work_flow_post_handler: WorkFlowPostHandler,
|
||||
base_to_response: BaseToResponse = SystemToResponse(),
|
||||
start_node_id=None,
|
||||
start_node_data=None, chat_record=None, child_node=None):
|
||||
super().__init__(flow, params, work_flow_post_handler, base_to_response, None, None, None,
|
||||
None,
|
||||
None, None, start_node_id, start_node_data, chat_record, child_node)
|
||||
|
||||
def get_params_serializer_class(self):
|
||||
return KnowledgeFlowParamsSerializer
|
||||
|
||||
def get_start_node(self):
|
||||
start_node_list = [node for node in self.flow.nodes if
|
||||
self.params.get('data_source', {}).get('node_id') == node.id]
|
||||
return start_node_list[0]
|
||||
|
||||
def run(self):
|
||||
executor.submit(self._run)
|
||||
|
||||
def _run(self):
|
||||
QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update(
|
||||
state=State.STARTED)
|
||||
language = get_language()
|
||||
self.run_chain_async(self.start_node, None, language)
|
||||
while self.is_run():
|
||||
pass
|
||||
self.work_flow_post_handler.handler(self)
|
||||
|
||||
@staticmethod
|
||||
def get_node_details(current_node, node, index):
|
||||
if current_node == node:
|
||||
return {
|
||||
'name': node.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': 0,
|
||||
'type': node.type,
|
||||
'status': 202,
|
||||
'err_message': ""
|
||||
}
|
||||
|
||||
return node.get_details(index)
|
||||
|
||||
def run_chain(self, current_node, node_result_future=None):
|
||||
QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update(
|
||||
details=self.get_runtime_details(lambda node, index: self.get_node_details(current_node, node, index)))
|
||||
if node_result_future is None:
|
||||
node_result_future = self.run_node_future(current_node)
|
||||
try:
|
||||
result = self.hand_node_result(current_node, node_result_future)
|
||||
return result
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def hand_node_result(self, current_node, node_result_future):
|
||||
try:
|
||||
current_result = node_result_future.result()
|
||||
result = current_result.write_context(current_node, self)
|
||||
if result is not None:
|
||||
# 阻塞获取结果
|
||||
list(result)
|
||||
return current_result
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
self.status = 500
|
||||
current_node.get_write_error_context(e)
|
||||
self.answer += str(e)
|
||||
QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update(
|
||||
details=self.get_runtime_details(),
|
||||
state=State.FAILURE)
|
||||
finally:
|
||||
current_node.node_chunk.end()
|
||||
QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update(
|
||||
details=self.get_runtime_details())
|
||||
|
|
@ -105,10 +105,10 @@ class LoopWorkflowManage(WorkflowManage):
|
|||
get_node_params=lambda node: node.properties.get('node_data')):
|
||||
for node in self.flow.nodes:
|
||||
if node.id == node_id:
|
||||
node_instance = get_node(node.type)(node,
|
||||
self.params, self, up_node_id_list,
|
||||
get_node_params,
|
||||
salt=self.get_index())
|
||||
node_instance = get_node(node.type, self.flow.workflow_mode)(node,
|
||||
self.params, self, up_node_id_list,
|
||||
get_node_params,
|
||||
salt=self.get_index())
|
||||
return node_instance
|
||||
return None
|
||||
|
||||
|
|
@ -116,7 +116,7 @@ class LoopWorkflowManage(WorkflowManage):
|
|||
close_old_connections()
|
||||
language = get_language()
|
||||
self.run_chain_async(self.start_node, None, language)
|
||||
return self.await_result()
|
||||
return self.await_result(is_cleanup=False)
|
||||
|
||||
def get_index(self):
|
||||
return self.loop_params.get('index')
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@
|
|||
from .ai_chat_step_node import *
|
||||
from .application_node import BaseApplicationNode
|
||||
from .condition_node import *
|
||||
from .data_source_local_node.impl.base_data_source_local_node import BaseDataSourceLocalNode
|
||||
from .data_source_web_node.impl.base_data_source_web_node import BaseDataSourceWebNode
|
||||
from .direct_reply_node import *
|
||||
from .document_extract_node import *
|
||||
from .form_node import *
|
||||
|
|
@ -16,6 +18,7 @@ from .image_generate_step_node import *
|
|||
from .image_to_video_step_node import BaseImageToVideoNode
|
||||
from .image_understand_step_node import *
|
||||
from .intent_node import *
|
||||
from .knowledge_write_node.impl.base_knowledge_write_node import BaseKnowledgeWriteNode
|
||||
from .loop_break_node import BaseLoopBreakNode
|
||||
from .loop_continue_node import BaseLoopContinueNode
|
||||
from .loop_node import *
|
||||
|
|
@ -36,6 +39,7 @@ from .variable_aggregation_node.impl.base_variable_aggregation_node import BaseV
|
|||
from .variable_assign_node import BaseVariableAssignNode
|
||||
from .variable_splitting_node import BaseVariableSplittingNode
|
||||
from .video_understand_step_node import BaseVideoUnderstandNode
|
||||
from .document_split_node import BaseDocumentSplitNode
|
||||
|
||||
node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseSearchDocumentNode, BaseQuestionNode,
|
||||
BaseConditionNode, BaseReplyNode,
|
||||
|
|
@ -46,11 +50,11 @@ node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseSearc
|
|||
BaseVideoUnderstandNode,
|
||||
BaseIntentNode, BaseLoopNode, BaseLoopStartStepNode,
|
||||
BaseLoopContinueNode,
|
||||
BaseLoopBreakNode, BaseVariableSplittingNode, BaseParameterExtractionNode, BaseVariableAggregationNode]
|
||||
BaseLoopBreakNode, BaseVariableSplittingNode, BaseParameterExtractionNode, BaseVariableAggregationNode,
|
||||
BaseDataSourceLocalNode, BaseDataSourceWebNode, BaseKnowledgeWriteNode, BaseDocumentSplitNode]
|
||||
|
||||
node_map = {n.type: {w: n for w in n.support} for n in node_list}
|
||||
|
||||
|
||||
def get_node(node_type):
|
||||
find_list = [node for node in node_list if node.type == node_type]
|
||||
if len(find_list) > 0:
|
||||
return find_list[0]
|
||||
return None
|
||||
def get_node(node_type, workflow_model):
|
||||
return node_map.get(node_type).get(workflow_model)
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -34,7 +35,8 @@ class ChatNodeSerializer(serializers.Serializer):
|
|||
mcp_enable = serializers.BooleanField(required=False, label=_("Whether to enable MCP"))
|
||||
mcp_servers = serializers.JSONField(required=False, label=_("MCP Server"))
|
||||
mcp_tool_id = serializers.CharField(required=False, allow_blank=True, allow_null=True, label=_("MCP Tool ID"))
|
||||
mcp_tool_ids = serializers.ListField(child=serializers.UUIDField(), required=False, allow_empty=True, label=_("MCP Tool IDs"), )
|
||||
mcp_tool_ids = serializers.ListField(child=serializers.UUIDField(), required=False, allow_empty=True,
|
||||
label=_("MCP Tool IDs"), )
|
||||
mcp_source = serializers.CharField(required=False, allow_blank=True, allow_null=True, label=_("MCP Source"))
|
||||
|
||||
tool_enable = serializers.BooleanField(required=False, default=False, label=_("Whether to enable tools"))
|
||||
|
|
@ -42,14 +44,22 @@ class ChatNodeSerializer(serializers.Serializer):
|
|||
label=_("Tool IDs"), )
|
||||
mcp_output_enable = serializers.BooleanField(required=False, default=True, label=_("Whether to enable MCP output"))
|
||||
|
||||
|
||||
class IChatNode(INode):
|
||||
type = 'ai-chat-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP,
|
||||
WorkflowMode.KNOWLEDGE]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ChatNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
**{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None})
|
||||
else:
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id,
|
||||
chat_record_id,
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
|
@ -25,6 +26,7 @@ class ApplicationNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IApplicationNode(INode):
|
||||
type = 'application-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ApplicationNodeSerializer
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode
|
||||
|
||||
|
||||
|
|
@ -36,3 +37,5 @@ class IConditionNode(INode):
|
|||
return ConditionNodeParamsSerializer
|
||||
|
||||
type = 'condition-node'
|
||||
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎虎
|
||||
@file: __init__.py.py
|
||||
@date:2025/11/11 10:06
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,42 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎虎
|
||||
@file: i_data_source_local_node.py
|
||||
@date:2025/11/11 10:06
|
||||
@desc:
|
||||
"""
|
||||
from abc import abstractmethod
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class DataSourceLocalNodeParamsSerializer(serializers.Serializer):
|
||||
file_type_list = serializers.ListField(child=serializers.CharField(label=('')), label='')
|
||||
file_size_limit = serializers.IntegerField(required=True, label=_("Number of uploaded files"))
|
||||
file_count_limit = serializers.IntegerField(required=True, label=_("Upload file size"))
|
||||
|
||||
|
||||
class IDataSourceLocalNode(INode):
|
||||
type = 'data-source-local-node'
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_form_list(node):
|
||||
pass
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return DataSourceLocalNodeParamsSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, file_type_list, file_size_limit, file_count_limit, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
||||
support = [WorkflowMode.KNOWLEDGE]
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎虎
|
||||
@file: __init__.py.py
|
||||
@date:2025/11/11 10:08
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,51 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎虎
|
||||
@file: base_data_source_local_node.py
|
||||
@date:2025/11/11 10:30
|
||||
@desc:
|
||||
"""
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.data_source_local_node.i_data_source_local_node import IDataSourceLocalNode
|
||||
from common import forms
|
||||
from common.forms import BaseForm
|
||||
|
||||
|
||||
class BaseDataSourceLocalNodeForm(BaseForm):
|
||||
api_key = forms.PasswordInputField('API Key', required=True)
|
||||
|
||||
|
||||
class BaseDataSourceLocalNode(IDataSourceLocalNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_form_list(node):
|
||||
node_data = node.get('properties').get('node_data')
|
||||
return [{
|
||||
'field': 'file_list',
|
||||
'input_type': 'LocalFileUpload',
|
||||
'attrs': {
|
||||
'file_count_limit': node_data.get('file_count_limit') or 10,
|
||||
'file_size_limit': node_data.get('file_size_limit') or 100,
|
||||
'file_type_list': node_data.get('file_type_list'),
|
||||
},
|
||||
'label': '',
|
||||
}]
|
||||
|
||||
def execute(self, file_type_list, file_size_limit, file_count_limit, **kwargs) -> NodeResult:
|
||||
return NodeResult({'file_list': self.workflow_manage.params.get('data_source', {}).get('file_list')},
|
||||
self.workflow_manage.params.get('knowledge_base') or {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'file_list': self.context.get('file_list'),
|
||||
'knowledge_base': self.workflow_params.get('knowledge_base'),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message
|
||||
}
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: __init__.py.py
|
||||
@date:2025/11/12 13:43
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: i_data_source_web_node.py
|
||||
@date:2025/11/12 13:47
|
||||
@desc:
|
||||
"""
|
||||
from abc import abstractmethod
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class IDataSourceWebNode(INode):
|
||||
type = 'data-source-web-node'
|
||||
support = [WorkflowMode.KNOWLEDGE]
|
||||
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_form_list(node):
|
||||
pass
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: __init__.py
|
||||
@date:2025/11/12 13:44
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,86 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: base_data_source_web_node.py
|
||||
@date:2025/11/12 13:47
|
||||
@desc:
|
||||
"""
|
||||
import traceback
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.data_source_web_node.i_data_source_web_node import IDataSourceWebNode
|
||||
from common import forms
|
||||
from common.forms import BaseForm
|
||||
from common.utils.fork import ForkManage, Fork, ChildLink
|
||||
from common.utils.logger import maxkb_logger
|
||||
|
||||
|
||||
class BaseDataSourceWebNodeForm(BaseForm):
|
||||
source_url = forms.TextInputField('source url', required=True)
|
||||
selector = forms.TextInputField('knowledge selector', required=False,default_value="body")
|
||||
|
||||
|
||||
def get_collect_handler():
|
||||
results = []
|
||||
|
||||
def handler(child_link: ChildLink, response: Fork.Response):
|
||||
if response.status == 200:
|
||||
try:
|
||||
document_name = child_link.tag.text if child_link.tag is not None and len(
|
||||
child_link.tag.text.strip()) > 0 else child_link.url
|
||||
results.append({
|
||||
"name": document_name.strip(),
|
||||
"content": response.content,
|
||||
})
|
||||
except Exception as e:
|
||||
maxkb_logger.error(f'{str(e)}:{traceback.format_exc()}')
|
||||
|
||||
return handler,results
|
||||
|
||||
|
||||
class BaseDataSourceWebNode(IDataSourceWebNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_form_list(node):
|
||||
return BaseDataSourceWebNodeForm().to_form_list()
|
||||
|
||||
def execute(self, **kwargs) -> NodeResult:
|
||||
BaseDataSourceWebNodeForm().valid_form(self.workflow_params.get("data_source"))
|
||||
|
||||
data_source = self.workflow_params.get("data_source")
|
||||
|
||||
node_id = data_source.get("node_id")
|
||||
source_url = data_source.get("source_url")
|
||||
selector = data_source.get("selector") or "body"
|
||||
|
||||
collect_handler, document_list = get_collect_handler()
|
||||
|
||||
try:
|
||||
ForkManage(source_url,selector.split(" ") if selector is not None else []).fork(3,set(),collect_handler)
|
||||
|
||||
return NodeResult({'document_list': document_list},
|
||||
self.workflow_manage.params.get('knowledge_base') or {})
|
||||
|
||||
except Exception as e:
|
||||
maxkb_logger.error(_('data source web node:{node_id} error{error}{traceback}').format(
|
||||
knowledge_id=node_id, error=str(e), traceback=traceback.format_exc()))
|
||||
|
||||
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'input_params': {"source_url": self.context.get("source_url"),"selector": self.context.get('selector')},
|
||||
'output_params': self.context.get('document_list'),
|
||||
'knowledge_base': self.workflow_params.get('knowledge_base'),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message
|
||||
}
|
||||
|
|
@ -10,6 +10,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.exception.app_exception import AppApiException
|
||||
|
||||
|
|
@ -38,12 +39,19 @@ class ReplyNodeParamsSerializer(serializers.Serializer):
|
|||
|
||||
class IReplyNode(INode):
|
||||
type = 'reply-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP,
|
||||
WorkflowMode.KNOWLEDGE]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ReplyNodeParamsSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
**{'stream': True})
|
||||
else:
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -14,7 +15,8 @@ class DocumentExtractNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IDocumentExtractNode(INode):
|
||||
type = 'document-extract-node'
|
||||
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP,
|
||||
WorkflowMode.KNOWLEDGE]
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return DocumentExtractNodeSerializer
|
||||
|
||||
|
|
@ -23,5 +25,5 @@ class IDocumentExtractNode(INode):
|
|||
self.node_params_serializer.data.get('document_list')[1:])
|
||||
return self.execute(document=res, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, document, chat_id, **kwargs) -> NodeResult:
|
||||
def execute(self, document, chat_id=None, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -1,39 +1,14 @@
|
|||
# coding=utf-8
|
||||
import ast
|
||||
import io
|
||||
import mimetypes
|
||||
|
||||
from django.core.files.uploadedfile import InMemoryUploadedFile
|
||||
import uuid_utils.compat as uuid
|
||||
from django.db.models import QuerySet
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.document_extract_node.i_document_extract_node import IDocumentExtractNode
|
||||
from knowledge.models import File, FileSourceType
|
||||
from knowledge.serializers.document import split_handles, parse_table_handle_list, FileBufferHandle
|
||||
from oss.serializers.file import FileSerializer
|
||||
|
||||
|
||||
def bytes_to_uploaded_file(file_bytes, file_name="file.txt"):
|
||||
content_type, _ = mimetypes.guess_type(file_name)
|
||||
if content_type is None:
|
||||
# 如果未能识别,设置为默认的二进制文件类型
|
||||
content_type = "application/octet-stream"
|
||||
# 创建一个内存中的字节流对象
|
||||
file_stream = io.BytesIO(file_bytes)
|
||||
|
||||
# 获取文件大小
|
||||
file_size = len(file_bytes)
|
||||
|
||||
# 创建 InMemoryUploadedFile 对象
|
||||
uploaded_file = InMemoryUploadedFile(
|
||||
file=file_stream,
|
||||
field_name=None,
|
||||
name=file_name,
|
||||
content_type=content_type,
|
||||
size=file_size,
|
||||
charset=None,
|
||||
)
|
||||
return uploaded_file
|
||||
|
||||
|
||||
splitter = '\n`-----------------------------------`\n'
|
||||
|
||||
|
|
@ -42,35 +17,69 @@ class BaseDocumentExtractNode(IDocumentExtractNode):
|
|||
def save_context(self, details, workflow_manage):
|
||||
self.context['content'] = details.get('content')
|
||||
|
||||
def execute(self, document, chat_id, **kwargs):
|
||||
def execute(self, document, chat_id=None, **kwargs):
|
||||
get_buffer = FileBufferHandle().get_buffer
|
||||
|
||||
self.context['document_list'] = document
|
||||
content = []
|
||||
if document is None or not isinstance(document, list):
|
||||
return NodeResult({'content': ''}, {})
|
||||
return NodeResult({'content': '', 'document_list': []}, {})
|
||||
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
# 安全获取 application
|
||||
application_id = None
|
||||
if (self.workflow_manage and
|
||||
self.workflow_manage.work_flow_post_handler and
|
||||
self.workflow_manage.work_flow_post_handler.chat_info):
|
||||
application_id = self.workflow_manage.work_flow_post_handler.chat_info.application.id
|
||||
knowledge_id = self.workflow_params.get('knowledge_id')
|
||||
|
||||
# doc文件中的图片保存
|
||||
def save_image(image_list):
|
||||
for image in image_list:
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'debug': False if (application_id or knowledge_id) else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
'application_id': str(application_id) if application_id else None,
|
||||
'knowledge_id': str(knowledge_id) if knowledge_id else None,
|
||||
'file_id': str(image.id)
|
||||
}
|
||||
file_bytes = image.meta.pop('content')
|
||||
f = bytes_to_uploaded_file(file_bytes, image.file_name)
|
||||
FileSerializer(data={
|
||||
'file': f,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
new_file = File(
|
||||
id=meta['file_id'],
|
||||
file_name=image.file_name,
|
||||
file_size=len(file_bytes),
|
||||
source_type=FileSourceType.APPLICATION.value if meta[
|
||||
'application_id'] else FileSourceType.KNOWLEDGE.value,
|
||||
source_id=meta['application_id'] if meta['application_id'] else meta['knowledge_id'],
|
||||
meta=meta
|
||||
)
|
||||
new_file.save(file_bytes)
|
||||
|
||||
document_list = []
|
||||
for doc in document:
|
||||
if 'file_bytes' in doc:
|
||||
file_bytes = doc['file_bytes']
|
||||
# 如果是字符串,转换为字节
|
||||
if isinstance(file_bytes, str):
|
||||
file_bytes = ast.literal_eval(file_bytes)
|
||||
doc['file_id'] = doc.get('file_id') or uuid.uuid7()
|
||||
meta = {
|
||||
'debug': False if (application_id or knowledge_id) else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application_id) if application_id else None,
|
||||
'knowledge_id': str(knowledge_id) if knowledge_id else None,
|
||||
'file_id': str(doc['file_id'])
|
||||
}
|
||||
new_file = File(
|
||||
id=doc['file_id'],
|
||||
file_name=doc['name'],
|
||||
file_size=len(file_bytes),
|
||||
source_type=FileSourceType.APPLICATION.value if meta[
|
||||
'application_id'] else FileSourceType.KNOWLEDGE.value,
|
||||
source_id=meta['application_id'] if meta['application_id'] else meta['knowledge_id'],
|
||||
meta={}
|
||||
)
|
||||
new_file.save(file_bytes)
|
||||
file = QuerySet(File).filter(id=doc['file_id']).first()
|
||||
buffer = io.BytesIO(file.get_bytes())
|
||||
buffer.name = doc['name'] # this is the important line
|
||||
|
|
@ -81,9 +90,10 @@ class BaseDocumentExtractNode(IDocumentExtractNode):
|
|||
buffer.seek(0)
|
||||
file_content = split_handle.get_content(buffer, save_image)
|
||||
content.append('### ' + doc['name'] + '\n' + file_content)
|
||||
document_list.append({'id': file.id, 'name': doc['name'], 'content': file_content})
|
||||
break
|
||||
|
||||
return NodeResult({'content': splitter.join(content)}, {})
|
||||
return NodeResult({'content': splitter.join(content), 'document_list': document_list}, {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
content = self.context.get('content', '').split(splitter)
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
from .impl import *
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class DocumentSplitNodeSerializer(serializers.Serializer):
|
||||
document_list = serializers.ListField(required=False, label=_("document list"))
|
||||
split_strategy = serializers.ChoiceField(
|
||||
choices=['auto', 'custom', 'qa'], required=False, label=_("split strategy"), default='auto'
|
||||
)
|
||||
paragraph_title_relate_problem_type = serializers.ChoiceField(
|
||||
choices=['custom', 'referencing'], required=False, label=_("paragraph title relate problem type"),
|
||||
default='custom'
|
||||
)
|
||||
paragraph_title_relate_problem = serializers.BooleanField(
|
||||
required=False, label=_("paragraph title relate problem"), default=False
|
||||
)
|
||||
paragraph_title_relate_problem_reference = serializers.ListField(
|
||||
required=False, label=_("paragraph title relate problem reference"), child=serializers.CharField(), default=[]
|
||||
)
|
||||
document_name_relate_problem_type = serializers.ChoiceField(
|
||||
choices=['custom', 'referencing'], required=False, label=_("document name relate problem type"),
|
||||
default='custom'
|
||||
)
|
||||
document_name_relate_problem = serializers.BooleanField(
|
||||
required=False, label=_("document name relate problem"), default=False
|
||||
)
|
||||
document_name_relate_problem_reference = serializers.ListField(
|
||||
required=False, label=_("document name relate problem reference"), child=serializers.CharField(), default=[]
|
||||
)
|
||||
limit = serializers.IntegerField(required=False, label=_("limit"), default=4096)
|
||||
patterns = serializers.ListField(
|
||||
required=False, label=_("patterns"), child=serializers.CharField(), default=[]
|
||||
)
|
||||
with_filter = serializers.BooleanField(
|
||||
required=False, label=_("with filter"), default=False
|
||||
)
|
||||
|
||||
|
||||
class IDocumentSplitNode(INode):
|
||||
type = 'document-split-node'
|
||||
support = [
|
||||
WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP, WorkflowMode.KNOWLEDGE
|
||||
]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return DocumentSplitNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
# res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('file_list')[0],
|
||||
# self.node_params_serializer.data.get('file_list')[1:])
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, document_list, knowledge_id, split_strategy, paragraph_title_relate_problem_type,
|
||||
paragraph_title_relate_problem, paragraph_title_relate_problem_reference,
|
||||
document_name_relate_problem_type, document_name_relate_problem,
|
||||
document_name_relate_problem_reference, limit, patterns, with_filter, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1 @@
|
|||
from .base_document_split_node import BaseDocumentSplitNode
|
||||
|
|
@ -0,0 +1,140 @@
|
|||
# coding=utf-8
|
||||
import io
|
||||
import mimetypes
|
||||
from typing import List
|
||||
|
||||
from django.core.files.uploadedfile import InMemoryUploadedFile
|
||||
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.document_split_node.i_document_split_node import IDocumentSplitNode
|
||||
from knowledge.serializers.document import default_split_handle, FileBufferHandle
|
||||
|
||||
|
||||
def bytes_to_uploaded_file(file_bytes, file_name="file.txt"):
|
||||
content_type, _ = mimetypes.guess_type(file_name)
|
||||
if content_type is None:
|
||||
# 如果未能识别,设置为默认的二进制文件类型
|
||||
content_type = "application/octet-stream"
|
||||
# 创建一个内存中的字节流对象
|
||||
file_stream = io.BytesIO(file_bytes)
|
||||
|
||||
# 获取文件大小
|
||||
file_size = len(file_bytes)
|
||||
|
||||
# 创建 InMemoryUploadedFile 对象
|
||||
uploaded_file = InMemoryUploadedFile(
|
||||
file=file_stream,
|
||||
field_name=None,
|
||||
name=file_name,
|
||||
content_type=content_type,
|
||||
size=file_size,
|
||||
charset=None,
|
||||
)
|
||||
return uploaded_file
|
||||
|
||||
|
||||
class BaseDocumentSplitNode(IDocumentSplitNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['content'] = details.get('content')
|
||||
|
||||
def get_reference_content(self, fields: List[str]):
|
||||
return self.workflow_manage.get_reference_field(fields[0], fields[1:])
|
||||
|
||||
def execute(self, document_list, knowledge_id, split_strategy, paragraph_title_relate_problem_type,
|
||||
paragraph_title_relate_problem, paragraph_title_relate_problem_reference,
|
||||
document_name_relate_problem_type, document_name_relate_problem,
|
||||
document_name_relate_problem_reference, limit, patterns, with_filter, **kwargs) -> NodeResult:
|
||||
self.context['knowledge_id'] = knowledge_id
|
||||
file_list = self.workflow_manage.get_reference_field(document_list[0], document_list[1:])
|
||||
paragraph_list = []
|
||||
|
||||
for doc in file_list:
|
||||
get_buffer = FileBufferHandle().get_buffer
|
||||
|
||||
file_mem = bytes_to_uploaded_file(doc['content'].encode('utf-8'))
|
||||
result = default_split_handle.handle(file_mem, patterns, with_filter, limit, get_buffer, self._save_image)
|
||||
# 统一处理结果为列表
|
||||
results = result if isinstance(result, list) else [result]
|
||||
|
||||
for item in results:
|
||||
self._process_split_result(
|
||||
item, knowledge_id, doc.get('id'), doc.get('name'),
|
||||
split_strategy, paragraph_title_relate_problem_type,
|
||||
paragraph_title_relate_problem, paragraph_title_relate_problem_reference,
|
||||
document_name_relate_problem_type, document_name_relate_problem,
|
||||
document_name_relate_problem_reference
|
||||
)
|
||||
|
||||
paragraph_list += results
|
||||
|
||||
self.context['paragraph_list'] = paragraph_list
|
||||
|
||||
return NodeResult({'paragraph_list': paragraph_list}, {})
|
||||
|
||||
def _save_image(self, image_list):
|
||||
pass
|
||||
|
||||
def _process_split_result(
|
||||
self, item, knowledge_id, source_file_id, file_name,
|
||||
split_strategy, paragraph_title_relate_problem_type,
|
||||
paragraph_title_relate_problem, paragraph_title_relate_problem_reference,
|
||||
document_name_relate_problem_type, document_name_relate_problem,
|
||||
document_name_relate_problem_reference
|
||||
):
|
||||
"""处理文档分割结果"""
|
||||
item['meta'] = {
|
||||
'knowledge_id': knowledge_id,
|
||||
'source_file_id': source_file_id,
|
||||
'source_url': file_name,
|
||||
}
|
||||
item['name'] = file_name
|
||||
item['paragraphs'] = item.pop('content', [])
|
||||
|
||||
for paragraph in item['paragraphs']:
|
||||
paragraph['problem_list'] = self._generate_problem_list(
|
||||
paragraph, file_name,
|
||||
split_strategy, paragraph_title_relate_problem_type,
|
||||
paragraph_title_relate_problem, paragraph_title_relate_problem_reference,
|
||||
document_name_relate_problem_type, document_name_relate_problem,
|
||||
document_name_relate_problem_reference
|
||||
)
|
||||
paragraph['is_active'] = True
|
||||
|
||||
def _generate_problem_list(
|
||||
self, paragraph, document_name, split_strategy, paragraph_title_relate_problem_type,
|
||||
paragraph_title_relate_problem, paragraph_title_relate_problem_reference,
|
||||
document_name_relate_problem_type, document_name_relate_problem,
|
||||
document_name_relate_problem_reference
|
||||
):
|
||||
if paragraph_title_relate_problem_type == 'referencing':
|
||||
paragraph_title_relate_problem = self.get_reference_content(paragraph_title_relate_problem_reference)
|
||||
if document_name_relate_problem_type == 'referencing':
|
||||
document_name_relate_problem = self.get_reference_content(document_name_relate_problem_reference)
|
||||
|
||||
problem_list = []
|
||||
if split_strategy == 'auto':
|
||||
if paragraph_title_relate_problem and paragraph.get('title'):
|
||||
problem_list.append(paragraph.get('title'))
|
||||
if document_name_relate_problem and document_name:
|
||||
problem_list.append(document_name)
|
||||
elif split_strategy == 'custom':
|
||||
if paragraph_title_relate_problem:
|
||||
problem_list.extend(paragraph_title_relate_problem)
|
||||
if document_name_relate_problem:
|
||||
problem_list.extend(document_name_relate_problem)
|
||||
elif split_strategy == 'qa':
|
||||
if document_name_relate_problem and document_name:
|
||||
problem_list.append(document_name)
|
||||
|
||||
return problem_list
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'status': self.status,
|
||||
'err_message': self.err_message,
|
||||
'paragraph_list': self.context.get('paragraph_list', []),
|
||||
}
|
||||
|
|
@ -10,6 +10,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
|
@ -24,6 +25,7 @@ class FormNodeParamsSerializer(serializers.Serializer):
|
|||
class IFormNode(INode):
|
||||
type = 'form-node'
|
||||
view_type = 'single_view'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return FormNodeParamsSerializer
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -31,14 +32,21 @@ class ImageGenerateNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IImageGenerateNode(INode):
|
||||
type = 'image-generate-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ImageGenerateNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
**{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None})
|
||||
else:
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
**kwargs) -> NodeResult:
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import List
|
|||
import requests
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.image_generate_step_node.i_image_generate_node import IImageGenerateNode
|
||||
from common.utils.common import bytes_to_uploaded_file
|
||||
|
|
@ -20,11 +21,10 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
**kwargs) -> NodeResult:
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
workspace_id = self.workflow_manage.get_body().get('workspace_id')
|
||||
tti_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
|
||||
**model_params_setting)
|
||||
|
|
@ -44,17 +44,7 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
if isinstance(image_url, str) and image_url.startswith('http'):
|
||||
image_url = requests.get(image_url).content
|
||||
file = bytes_to_uploaded_file(image_url, file_name)
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
file_url = self.upload_file(file)
|
||||
file_urls.append(file_url)
|
||||
self.context['image_list'] = [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls]
|
||||
answer = ' '.join([f"" for path in file_urls])
|
||||
|
|
@ -101,6 +91,42 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
question
|
||||
]
|
||||
|
||||
def upload_file(self, file):
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.upload_knowledge_file(file)
|
||||
return self.upload_application_file(file)
|
||||
|
||||
def upload_knowledge_file(self, file):
|
||||
knowledge_id = self.workflow_params.get('knowledge_id')
|
||||
meta = {
|
||||
'debug': False,
|
||||
'knowledge_id': knowledge_id,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': knowledge_id,
|
||||
'source_type': FileSourceType.KNOWLEDGE.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
def upload_application_file(self, file):
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
chat_id = self.workflow_params.get('chat_id')
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
@staticmethod
|
||||
def reset_message_list(message_list: List[BaseMessage], answer_text):
|
||||
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -34,7 +35,8 @@ class ImageToVideoNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IImageToVideoNode(INode):
|
||||
type = 'image-to-video-node'
|
||||
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ImageToVideoNodeSerializer
|
||||
|
||||
|
|
@ -53,10 +55,15 @@ class IImageToVideoNode(INode):
|
|||
self.node_params_serializer.data.get('last_frame_url')[1:])
|
||||
node_params_data = {k: v for k, v in self.node_params_serializer.data.items()
|
||||
if k not in ['first_frame_url', 'last_frame_url']}
|
||||
return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url,
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url, **node_params_data, **self.flow_params_serializer.data,
|
||||
**{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None})
|
||||
else:
|
||||
return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url,
|
||||
**node_params_data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
first_frame_url, last_frame_url,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ import requests
|
|||
from django.db.models import QuerySet
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.image_to_video_step_node.i_image_to_video_node import IImageToVideoNode
|
||||
from common.utils.common import bytes_to_uploaded_file
|
||||
|
|
@ -23,12 +24,11 @@ class BaseImageToVideoNode(IImageToVideoNode):
|
|||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
first_frame_url, last_frame_url=None,
|
||||
**kwargs) -> NodeResult:
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
workspace_id = self.workflow_manage.get_body().get('workspace_id')
|
||||
ttv_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
|
||||
**model_params_setting)
|
||||
|
|
@ -54,17 +54,7 @@ class BaseImageToVideoNode(IImageToVideoNode):
|
|||
if isinstance(video_urls, str) and video_urls.startswith('http'):
|
||||
video_urls = requests.get(video_urls).content
|
||||
file = bytes_to_uploaded_file(video_urls, file_name)
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
file_url = self.upload_file(file)
|
||||
video_label = f'<video src="{file_url}" controls style="max-width: 100%; width: 100%; height: auto; max-height: 60vh;"></video>'
|
||||
video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}]
|
||||
return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list,
|
||||
|
|
@ -88,6 +78,42 @@ class BaseImageToVideoNode(IImageToVideoNode):
|
|||
raise ValueError(
|
||||
gettext("Failed to obtain the image"))
|
||||
|
||||
def upload_file(self, file):
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.upload_knowledge_file(file)
|
||||
return self.upload_application_file(file)
|
||||
|
||||
def upload_knowledge_file(self, file):
|
||||
knowledge_id = self.workflow_params.get('knowledge_id')
|
||||
meta = {
|
||||
'debug': False,
|
||||
'knowledge_id': knowledge_id
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': knowledge_id,
|
||||
'source_type': FileSourceType.KNOWLEDGE.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
def upload_application_file(self, file):
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
chat_id = self.workflow_params.get('chat_id')
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
def generate_history_ai_message(self, chat_record):
|
||||
for val in chat_record.details.values():
|
||||
if self.node.id == val['node_id'] and 'image_list' in val:
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
|
@ -30,6 +31,7 @@ class ImageUnderstandNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IImageUnderstandNode(INode):
|
||||
type = 'image-understand-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ImageUnderstandNodeSerializer
|
||||
|
|
|
|||
|
|
@ -5,11 +5,11 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class IntentBranchSerializer(serializers.Serializer):
|
||||
|
||||
id = serializers.CharField(required=True, label=_("Branch id"))
|
||||
content = serializers.CharField(required=True, label=_("content"))
|
||||
isOther = serializers.BooleanField(required=True, label=_("Branch Type"))
|
||||
|
|
@ -24,8 +24,12 @@ class IntentNodeSerializer(serializers.Serializer):
|
|||
label=_("Model parameter settings"))
|
||||
branch = IntentBranchSerializer(many=True)
|
||||
|
||||
|
||||
class IIntentNode(INode):
|
||||
type = 'intent-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def save_context(self, details, workflow_manage):
|
||||
pass
|
||||
|
||||
|
|
@ -37,10 +41,15 @@ class IIntentNode(INode):
|
|||
self.node_params_serializer.data.get('content_list')[0],
|
||||
self.node_params_serializer.data.get('content_list')[1:],
|
||||
)
|
||||
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, user_input=str(question))
|
||||
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
**{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None,
|
||||
'user_input': str(question)})
|
||||
else:
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
user_input=str(question))
|
||||
|
||||
def execute(self, model_id, dialogue_number, history_chat_record, user_input, branch,
|
||||
model_params_setting=None, **kwargs) -> NodeResult:
|
||||
pass
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: __init__.py.py
|
||||
@date:2025/11/13 11:17
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: i_knowledge_write_node.py
|
||||
@date:2025/11/13 11:19
|
||||
@desc:
|
||||
"""
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class KnowledgeWriteNodeParamSerializer(serializers.Serializer):
|
||||
document_list = serializers.ListField(required=True, child=serializers.CharField(required=True), allow_null=True,
|
||||
label=_('document list'))
|
||||
|
||||
|
||||
class IKnowledgeWriteNode(INode):
|
||||
|
||||
def save_context(self, details, workflow_manage):
|
||||
pass
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return KnowledgeWriteNodeParamSerializer
|
||||
|
||||
def _run(self):
|
||||
documents = self.workflow_manage.get_reference_field(
|
||||
self.node_params_serializer.data.get('document_list')[0],
|
||||
self.node_params_serializer.data.get('document_list')[1:],
|
||||
)
|
||||
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, documents=documents)
|
||||
|
||||
def execute(self, documents, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
||||
type = 'knowledge-write-node'
|
||||
support = [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: __init__.py.py
|
||||
@date:2025/11/13 11:18
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,213 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:niu
|
||||
@file: base_knowledge_write_node.py
|
||||
@date:2025/11/13 11:19
|
||||
@desc:
|
||||
"""
|
||||
from functools import reduce
|
||||
from typing import Dict, List
|
||||
import uuid_utils.compat as uuid
|
||||
from django.db.models import QuerySet
|
||||
from django.db.models.aggregates import Max
|
||||
|
||||
from rest_framework import serializers
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.knowledge_write_node.i_knowledge_write_node import IKnowledgeWriteNode
|
||||
from common.chunk import text_to_chunk
|
||||
from common.utils.common import bulk_create_in_batches
|
||||
from knowledge.models import Document, KnowledgeType, Paragraph, File, FileSourceType, Problem, ProblemParagraphMapping
|
||||
from knowledge.serializers.common import ProblemParagraphObject, ProblemParagraphManage
|
||||
|
||||
|
||||
class ParagraphInstanceSerializer(serializers.Serializer):
|
||||
content = serializers.CharField(required=True, label=_('content'), max_length=102400, min_length=1, allow_null=True,
|
||||
allow_blank=True)
|
||||
title = serializers.CharField(required=False, max_length=256, label=_('section title'), allow_null=True,
|
||||
allow_blank=True)
|
||||
problem_list = serializers.ListField(required=False, child=serializers.CharField(required=False, allow_blank=True))
|
||||
is_active = serializers.BooleanField(required=False, label=_('Is active'))
|
||||
chunks = serializers.ListField(required=False, child=serializers.CharField(required=True))
|
||||
|
||||
|
||||
class KnowledgeWriteParamSerializer(serializers.Serializer):
|
||||
name = serializers.CharField(required=True, label=_('document name'), max_length=128, min_length=1,
|
||||
source=_('document name'))
|
||||
meta = serializers.DictField(required=False)
|
||||
paragraphs = ParagraphInstanceSerializer(required=False, many=True, allow_null=True)
|
||||
|
||||
|
||||
def convert_uuid_to_str(obj):
|
||||
if isinstance(obj, dict):
|
||||
return {k: convert_uuid_to_str(v) for k, v in obj.items()}
|
||||
elif isinstance(obj, list):
|
||||
return [convert_uuid_to_str(i) for i in obj]
|
||||
elif isinstance(obj, uuid.UUID):
|
||||
return str(obj)
|
||||
else:
|
||||
return obj
|
||||
|
||||
def link_file(source_file_id, document_id):
|
||||
if source_file_id is None:
|
||||
return
|
||||
source_file = QuerySet(File).filter(id=source_file_id).first()
|
||||
if source_file:
|
||||
file_content = source_file.get_bytes()
|
||||
|
||||
new_file = File(
|
||||
id=uuid.uuid7(),
|
||||
file_name=source_file.file_name,
|
||||
file_size=source_file.file_size,
|
||||
source_type=FileSourceType.DOCUMENT,
|
||||
source_id=document_id, # 更新为当前知识库ID
|
||||
meta=source_file.meta.copy() if source_file.meta else {}
|
||||
)
|
||||
|
||||
# 保存文件内容和元数据
|
||||
new_file.save(file_content)
|
||||
|
||||
def get_paragraph_problem_model(knowledge_id: str, document_id: str, instance: Dict):
|
||||
paragraph = Paragraph(
|
||||
id=uuid.uuid7(),
|
||||
document_id=document_id,
|
||||
content=instance.get("content"),
|
||||
knowledge_id=knowledge_id,
|
||||
title=instance.get("title") if 'title' in instance else '',
|
||||
chunks = instance.get('chunks') if 'chunks' in instance else text_to_chunk(instance.get("content")),
|
||||
)
|
||||
|
||||
problem_paragraph_object_list = [ProblemParagraphObject(
|
||||
knowledge_id, document_id, str(paragraph.id), problem
|
||||
) for problem in (instance.get('problem_list') if 'problem_list' in instance else [])]
|
||||
|
||||
return {
|
||||
'paragraph': paragraph,
|
||||
'problem_paragraph_object_list': problem_paragraph_object_list,
|
||||
}
|
||||
|
||||
|
||||
def get_paragraph_model(document_model, paragraph_list: List):
|
||||
knowledge_id = document_model.knowledge_id
|
||||
paragraph_model_dict_list = [
|
||||
get_paragraph_problem_model(knowledge_id, document_model.id, paragraph)
|
||||
for paragraph in paragraph_list
|
||||
]
|
||||
|
||||
paragraph_model_list = []
|
||||
problem_paragraph_object_list = []
|
||||
for paragraphs in paragraph_model_dict_list:
|
||||
paragraph = paragraphs.get('paragraph')
|
||||
for problem_model in paragraphs.get('problem_paragraph_object_list'):
|
||||
problem_paragraph_object_list.append(problem_model)
|
||||
paragraph_model_list.append(paragraph)
|
||||
|
||||
return {
|
||||
'document': document_model,
|
||||
'paragraph_model_list': paragraph_model_list,
|
||||
'problem_paragraph_object_list': problem_paragraph_object_list,
|
||||
}
|
||||
|
||||
|
||||
def get_document_paragraph_model(knowledge_id: str, instance: Dict):
|
||||
source_meta = {'source_file_id': instance.get("source_file_id")} if instance.get("source_file_id") else {}
|
||||
meta = {**instance.get('meta'), **source_meta} if instance.get('meta') is not None else source_meta
|
||||
meta = {**convert_uuid_to_str(meta), 'allow_download': True}
|
||||
|
||||
document_model = Document(
|
||||
**{
|
||||
'knowledge_id': knowledge_id,
|
||||
'id': uuid.uuid7(),
|
||||
'name': instance.get('name'),
|
||||
'char_length': reduce(
|
||||
lambda x, y: x + y,
|
||||
[len(p.get('content')) for p in instance.get('paragraphs', [])],
|
||||
0),
|
||||
'meta': meta,
|
||||
'type': instance.get('type') if instance.get('type') is not None else KnowledgeType.WORKFLOW
|
||||
}
|
||||
)
|
||||
|
||||
return get_paragraph_model(
|
||||
document_model,
|
||||
instance.get('paragraphs') if 'paragraphs' in instance else []
|
||||
)
|
||||
|
||||
|
||||
class BaseKnowledgeWriteNode(IKnowledgeWriteNode):
|
||||
|
||||
def save_context(self, details, workflow_manage):
|
||||
pass
|
||||
|
||||
def save(self, document_list):
|
||||
serializer = KnowledgeWriteParamSerializer(data=document_list, many=True)
|
||||
serializer.is_valid(raise_exception=True)
|
||||
document_list = serializer.data
|
||||
|
||||
knowledge_id = self.workflow_params.get("knowledge_id")
|
||||
workspace_id = self.workflow_params.get("workspace_id")
|
||||
|
||||
document_model_list = []
|
||||
paragraph_model_list = []
|
||||
problem_paragraph_object_list = []
|
||||
|
||||
for document in document_list:
|
||||
document_paragraph_dict_model = get_document_paragraph_model(
|
||||
knowledge_id,
|
||||
document
|
||||
)
|
||||
document_instance = document_paragraph_dict_model.get('document')
|
||||
link_file(document.get("source_file_id"), document_instance.id)
|
||||
document_model_list.append(document_instance)
|
||||
for paragraph in document_paragraph_dict_model.get("paragraph_model_list"):
|
||||
paragraph_model_list.append(paragraph)
|
||||
for problem_paragraph_object in document_paragraph_dict_model.get("problem_paragraph_object_list"):
|
||||
problem_paragraph_object_list.append(problem_paragraph_object)
|
||||
|
||||
problem_model_list, problem_paragraph_mapping_list = (
|
||||
ProblemParagraphManage(problem_paragraph_object_list, knowledge_id).to_problem_model_list()
|
||||
)
|
||||
|
||||
QuerySet(Document).bulk_create(document_model_list) if len(document_model_list) > 0 else None
|
||||
|
||||
if len(paragraph_model_list) > 0:
|
||||
for document in document_model_list:
|
||||
max_position = Paragraph.objects.filter(document_id=document.id).aggregate(
|
||||
max_position=Max('position')
|
||||
)['max_position'] or 0
|
||||
sub_list = [p for p in paragraph_model_list if p.document_id == document.id]
|
||||
for i, paragraph in enumerate(sub_list):
|
||||
paragraph.position = max_position + i + 1
|
||||
QuerySet(Paragraph).bulk_create(sub_list if len(sub_list) > 0 else [])
|
||||
|
||||
bulk_create_in_batches(Problem, problem_model_list, batch_size=1000)
|
||||
|
||||
bulk_create_in_batches(ProblemParagraphMapping, problem_paragraph_mapping_list, batch_size=1000)
|
||||
|
||||
return document_model_list, knowledge_id, workspace_id
|
||||
|
||||
def execute(self, documents, **kwargs) -> NodeResult:
|
||||
|
||||
document_model_list, knowledge_id, workspace_id = self.save(documents)
|
||||
|
||||
write_content_list = [{
|
||||
"name": document.get("name"),
|
||||
"paragraphs": [{
|
||||
"title": p.get("title"),
|
||||
"content": p.get("content"),
|
||||
} for p in document.get("paragraphs")[0:4]]
|
||||
} for document in documents]
|
||||
|
||||
return NodeResult({'write_content': write_content_list}, {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'write_content': self.context.get("write_content"),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message
|
||||
}
|
||||
|
|
@ -11,6 +11,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode
|
||||
from application.flow.i_step_node import NodeResult
|
||||
|
||||
|
|
@ -28,6 +29,7 @@ class LoopBreakNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ILoopBreakNode(INode):
|
||||
type = 'loop-break-node'
|
||||
support = [WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return LoopBreakNodeSerializer
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@
|
|||
"""
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class ConditionSerializer(serializers.Serializer):
|
||||
|
|
@ -27,6 +28,7 @@ class LoopContinueNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ILoopContinueNode(INode):
|
||||
type = 'loop-continue-node'
|
||||
support = [WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return LoopContinueNodeSerializer
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.exception.app_exception import AppApiException
|
||||
|
||||
|
|
@ -40,6 +41,7 @@ class ILoopNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ILoopNode(INode):
|
||||
type = 'loop-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.KNOWLEDGE]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return ILoopNodeSerializer
|
||||
|
|
@ -52,5 +54,5 @@ class ILoopNode(INode):
|
|||
array[1:])
|
||||
return self.execute(**{**self.node_params_serializer.data, "array": array}, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, loop_type, array, number, loop_body, stream, **kwargs) -> NodeResult:
|
||||
def execute(self, loop_type, array, number, loop_body, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ from typing import Dict, List
|
|||
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
from application.flow.common import Answer
|
||||
from application.flow.common import Answer, WorkflowMode
|
||||
from application.flow.i_step_node import NodeResult, WorkFlowPostHandler, INode
|
||||
from application.flow.step_node.loop_node.i_loop_node import ILoopNode
|
||||
from application.flow.tools import Reasoning
|
||||
|
|
@ -197,6 +197,7 @@ def loop(workflow_manage_new_instance, node: INode, generate_loop):
|
|||
insert_or_replace(loop_node_data, index, instance.get_runtime_details())
|
||||
insert_or_replace(loop_answer_data, index,
|
||||
get_answer_list(instance, child_node_node_dict, node.runtime_node_id))
|
||||
instance._cleanup()
|
||||
if break_outer:
|
||||
break
|
||||
node.context['is_interrupt_exec'] = is_interrupt_exec
|
||||
|
|
@ -206,7 +207,7 @@ def loop(workflow_manage_new_instance, node: INode, generate_loop):
|
|||
node.context["item"] = current_index
|
||||
|
||||
|
||||
def get_write_context(loop_type, array, number, loop_body, stream):
|
||||
def get_write_context(loop_type, array, number, loop_body):
|
||||
def inner_write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||
if loop_type == 'ARRAY':
|
||||
return loop(node_variable['workflow_manage_new_instance'], node, generate_loop_array(array))
|
||||
|
|
@ -248,27 +249,31 @@ class BaseLoopNode(ILoopNode):
|
|||
def get_loop_context(self):
|
||||
return self.context
|
||||
|
||||
def execute(self, loop_type, array, number, loop_body, stream, **kwargs) -> NodeResult:
|
||||
def execute(self, loop_type, array, number, loop_body, **kwargs) -> NodeResult:
|
||||
from application.flow.loop_workflow_manage import LoopWorkflowManage, Workflow
|
||||
from application.flow.knowledge_loop_workflow_manage import KnowledgeLoopWorkflowManage
|
||||
def workflow_manage_new_instance(loop_data, global_data, start_node_id=None,
|
||||
start_node_data=None, chat_record=None, child_node=None):
|
||||
workflow_manage = LoopWorkflowManage(Workflow.new_instance(loop_body), self.workflow_manage.params,
|
||||
LoopWorkFlowPostHandler(
|
||||
self.workflow_manage.work_flow_post_handler.chat_info),
|
||||
self.workflow_manage,
|
||||
loop_data,
|
||||
self.get_loop_context,
|
||||
base_to_response=LoopToResponse(),
|
||||
start_node_id=start_node_id,
|
||||
start_node_data=start_node_data,
|
||||
chat_record=chat_record,
|
||||
child_node=child_node
|
||||
)
|
||||
workflow_mode = WorkflowMode.KNOWLEDGE_LOOP if WorkflowMode.KNOWLEDGE == self.workflow_manage.flow.workflow_mode else WorkflowMode.APPLICATION_LOOP
|
||||
c = KnowledgeLoopWorkflowManage if workflow_mode == WorkflowMode.KNOWLEDGE_LOOP else LoopWorkflowManage
|
||||
workflow_manage = c(Workflow.new_instance(loop_body, workflow_mode),
|
||||
self.workflow_manage.params,
|
||||
LoopWorkFlowPostHandler(
|
||||
self.workflow_manage.work_flow_post_handler.chat_info),
|
||||
self.workflow_manage,
|
||||
loop_data,
|
||||
self.get_loop_context,
|
||||
base_to_response=LoopToResponse(),
|
||||
start_node_id=start_node_id,
|
||||
start_node_data=start_node_data,
|
||||
chat_record=chat_record,
|
||||
child_node=child_node
|
||||
)
|
||||
|
||||
return workflow_manage
|
||||
|
||||
return NodeResult({'workflow_manage_new_instance': workflow_manage_new_instance}, {},
|
||||
_write_context=get_write_context(loop_type, array, number, loop_body, stream),
|
||||
_write_context=get_write_context(loop_type, array, number, loop_body),
|
||||
_is_interrupt=_is_interrupt_exec)
|
||||
|
||||
def get_loop_context_data(self):
|
||||
|
|
|
|||
|
|
@ -6,15 +6,16 @@
|
|||
@date:2024/6/3 16:54
|
||||
@desc:
|
||||
"""
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class ILoopStarNode(INode):
|
||||
type = 'loop-start-node'
|
||||
support = [WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, **kwargs) -> NodeResult:
|
||||
def execute(self, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.loop_start_node.i_loop_start_node import ILoopStarNode
|
||||
|
||||
|
|
@ -31,7 +32,8 @@ class BaseLoopStartStepNode(ILoopStarNode):
|
|||
'index': loop_params.get("index"),
|
||||
'item': loop_params.get("item")
|
||||
}
|
||||
self.workflow_manage.chat_context = self.workflow_manage.get_chat_info().get_chat_variable()
|
||||
if WorkflowMode.APPLICATION_LOOP == self.workflow_manage.flow.workflow_mode:
|
||||
self.workflow_manage.chat_context = self.workflow_manage.get_chat_info().get_chat_variable()
|
||||
return NodeResult(node_variable, {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -19,6 +20,8 @@ class McpNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IMcpNode(INode):
|
||||
type = 'mcp-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return McpNodeSerializer
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -23,6 +24,8 @@ class VariableSplittingNodeParamsSerializer(serializers.Serializer):
|
|||
|
||||
class IParameterExtractionNode(INode):
|
||||
type = 'parameter-extraction-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VariableSplittingNodeParamsSerializer
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -31,6 +32,8 @@ class QuestionNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IQuestionNode(INode):
|
||||
type = 'question-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return QuestionNodeSerializer
|
||||
|
|
|
|||
|
|
@ -10,6 +10,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
|
@ -41,6 +42,7 @@ class RerankerStepNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IRerankerNode(INode):
|
||||
type = 'reranker-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return RerankerStepNodeSerializer
|
||||
|
|
@ -57,6 +59,6 @@ class IRerankerNode(INode):
|
|||
|
||||
reranker_list=reranker_list)
|
||||
|
||||
def execute(self, question, reranker_setting, reranker_list, reranker_model_id,show_knowledge,
|
||||
def execute(self, question, reranker_setting, reranker_list, reranker_model_id, show_knowledge,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from typing import Type, List
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -42,6 +43,7 @@ class SearchDocumentStepNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ISearchDocumentStepNode(INode):
|
||||
type = 'search-document-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return SearchDocumentStepNodeSerializer
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ from django.core import validators
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.utils.common import flat_map
|
||||
|
||||
|
|
@ -67,6 +68,7 @@ def get_paragraph_list(chat_record, node_id):
|
|||
|
||||
class ISearchKnowledgeStepNode(INode):
|
||||
type = 'search-knowledge-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return SearchDatasetStepNodeSerializer
|
||||
|
|
|
|||
|
|
@ -2,10 +2,11 @@
|
|||
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class SpeechToTextNodeSerializer(serializers.Serializer):
|
||||
|
|
@ -22,6 +23,8 @@ class SpeechToTextNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ISpeechToTextNode(INode):
|
||||
type = 'speech-to-text-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return SpeechToTextNodeSerializer
|
||||
|
|
@ -36,7 +39,7 @@ class ISpeechToTextNode(INode):
|
|||
|
||||
return self.execute(audio=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, stt_model_id, chat_id,
|
||||
def execute(self, stt_model_id,
|
||||
audio, model_params_setting=None,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
|
|||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, stt_model_id, chat_id, audio, model_params_setting=None, **kwargs) -> NodeResult:
|
||||
def execute(self, stt_model_id, audio, model_params_setting=None, **kwargs) -> NodeResult:
|
||||
workspace_id = self.workflow_manage.get_body().get('workspace_id')
|
||||
stt_model = get_model_instance_by_model_workspace_id(stt_model_id, workspace_id, **model_params_setting)
|
||||
audio_list = audio
|
||||
|
|
|
|||
|
|
@ -6,12 +6,13 @@
|
|||
@date:2024/6/3 16:54
|
||||
@desc:
|
||||
"""
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
class IStarNode(INode):
|
||||
type = 'start-node'
|
||||
support = [WorkflowMode.APPLICATION]
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.flow_params_serializer.data)
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ from typing import Type
|
|||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
|
@ -22,6 +23,8 @@ class TextToSpeechNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ITextToSpeechNode(INode):
|
||||
type = 'text-to-speech-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return TextToSpeechNodeSerializer
|
||||
|
|
@ -31,7 +34,7 @@ class ITextToSpeechNode(INode):
|
|||
self.node_params_serializer.data.get('content_list')[1:])
|
||||
return self.execute(content=content, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, tts_model_id, chat_id,
|
||||
def execute(self, tts_model_id,
|
||||
content, model_params_setting=None,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@ import mimetypes
|
|||
|
||||
from django.core.files.uploadedfile import InMemoryUploadedFile
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.text_to_speech_step_node.i_text_to_speech_node import ITextToSpeechNode
|
||||
from common.utils.common import _remove_empty_lines
|
||||
|
|
@ -42,7 +43,7 @@ class BaseTextToSpeechNode(ITextToSpeechNode):
|
|||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, tts_model_id, chat_id,
|
||||
def execute(self, tts_model_id,
|
||||
content, model_params_setting=None,
|
||||
max_length=1024, **kwargs) -> NodeResult:
|
||||
# 分割文本为合理片段
|
||||
|
|
@ -77,25 +78,10 @@ class BaseTextToSpeechNode(ITextToSpeechNode):
|
|||
output_buffer = io.BytesIO()
|
||||
combined_audio.export(output_buffer, format="mp3")
|
||||
combined_bytes = output_buffer.getvalue()
|
||||
|
||||
# 存储合并后的音频文件
|
||||
file_name = 'combined_audio.mp3'
|
||||
file = bytes_to_uploaded_file(combined_bytes, file_name)
|
||||
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
|
||||
# 存储合并后的音频文件
|
||||
file_url = self.upload_file(file)
|
||||
# 生成音频标签
|
||||
audio_label = f'<audio src="{file_url}" controls style="width: 300px; height: 43px"></audio>'
|
||||
file_id = file_url.split('/')[-1]
|
||||
|
|
@ -111,6 +97,42 @@ class BaseTextToSpeechNode(ITextToSpeechNode):
|
|||
'result': audio_list
|
||||
}, {})
|
||||
|
||||
def upload_file(self, file):
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.upload_knowledge_file(file)
|
||||
return self.upload_application_file(file)
|
||||
|
||||
def upload_knowledge_file(self, file):
|
||||
knowledge_id = self.workflow_params.get('knowledge_id')
|
||||
meta = {
|
||||
'debug': False,
|
||||
'knowledge_id': knowledge_id,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': knowledge_id,
|
||||
'source_type': FileSourceType.KNOWLEDGE.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
def upload_application_file(self, file):
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
chat_id = self.workflow_params.get('chat_id')
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
'application_id': str(application.id) if application.id else None,
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -31,14 +32,21 @@ class TextToVideoNodeSerializer(serializers.Serializer):
|
|||
|
||||
class ITextToVideoNode(INode):
|
||||
type = 'text-to-video-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return TextToVideoNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
**{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None})
|
||||
else:
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
**kwargs) -> NodeResult:
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import List
|
|||
import requests
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.text_to_video_step_node.i_text_to_video_node import ITextToVideoNode
|
||||
from common.utils.common import bytes_to_uploaded_file
|
||||
|
|
@ -20,11 +21,10 @@ class BaseTextToVideoNode(ITextToVideoNode):
|
|||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
**kwargs) -> NodeResult:
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
workspace_id = self.workflow_manage.get_body().get('workspace_id')
|
||||
ttv_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
|
||||
**model_params_setting)
|
||||
|
|
@ -44,6 +44,36 @@ class BaseTextToVideoNode(ITextToVideoNode):
|
|||
if isinstance(video_urls, str) and video_urls.startswith('http'):
|
||||
video_urls = requests.get(video_urls).content
|
||||
file = bytes_to_uploaded_file(video_urls, file_name)
|
||||
file_url = self.upload_file(file)
|
||||
video_label = f'<video src="{file_url}" controls style="max-width: 100%; width: 100%; height: auto;"></video>'
|
||||
video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}]
|
||||
return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list,
|
||||
'video': video_list,
|
||||
'history_message': history_message, 'question': question}, {})
|
||||
|
||||
def upload_file(self, file):
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.upload_knowledge_file(file)
|
||||
return self.upload_application_file(file)
|
||||
|
||||
def upload_knowledge_file(self, file):
|
||||
knowledge_id = self.workflow_params.get('knowledge_id')
|
||||
meta = {
|
||||
'debug': False,
|
||||
'knowledge_id': knowledge_id
|
||||
}
|
||||
file_url = FileSerializer(data={
|
||||
'file': file,
|
||||
'meta': meta,
|
||||
'source_id': knowledge_id,
|
||||
'source_type': FileSourceType.KNOWLEDGE.value
|
||||
}).upload()
|
||||
return file_url
|
||||
|
||||
def upload_application_file(self, file):
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
chat_id = self.workflow_params.get('chat_id')
|
||||
meta = {
|
||||
'debug': False if application.id else True,
|
||||
'chat_id': chat_id,
|
||||
|
|
@ -55,11 +85,7 @@ class BaseTextToVideoNode(ITextToVideoNode):
|
|||
'source_id': meta['application_id'],
|
||||
'source_type': FileSourceType.APPLICATION.value
|
||||
}).upload()
|
||||
video_label = f'<video src="{file_url}" controls style="max-width: 100%; width: 100%; height: auto;"></video>'
|
||||
video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}]
|
||||
return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list,
|
||||
'video': video_list,
|
||||
'history_message': history_message, 'question': question}, {})
|
||||
return file_url
|
||||
|
||||
def generate_history_ai_message(self, chat_record):
|
||||
for val in chat_record.details.values():
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ from django.db.models import QuerySet
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.field.common import ObjectField
|
||||
from tools.models.tool import Tool
|
||||
|
|
@ -40,6 +41,8 @@ class FunctionLibNodeParamsSerializer(serializers.Serializer):
|
|||
|
||||
class IToolLibNode(INode):
|
||||
type = 'tool-lib-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return FunctionLibNodeParamsSerializer
|
||||
|
|
|
|||
|
|
@ -126,6 +126,16 @@ def valid_function(tool_lib, workspace_id):
|
|||
if not tool_lib.is_active:
|
||||
raise Exception(_("Tool is not active"))
|
||||
|
||||
def _filter_file_bytes(data):
|
||||
"""递归过滤掉所有层级的 file_bytes"""
|
||||
if isinstance(data, dict):
|
||||
return {k: _filter_file_bytes(v) for k, v in data.items() if k != 'file_bytes'}
|
||||
elif isinstance(data, list):
|
||||
return [_filter_file_bytes(item) for item in data]
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
|
||||
class BaseToolLibNodeNode(IToolLibNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
|
|
@ -138,7 +148,7 @@ class BaseToolLibNodeNode(IToolLibNode):
|
|||
tool_lib = QuerySet(Tool).filter(id=tool_lib_id).first()
|
||||
valid_function(tool_lib, workspace_id)
|
||||
params = {
|
||||
field.get('name'): convert_value(
|
||||
field.get('name'): convert_value(
|
||||
field.get('name'), field.get('value'), field.get('type'),
|
||||
field.get('is_required'),
|
||||
field.get('source'), self
|
||||
|
|
@ -157,14 +167,20 @@ class BaseToolLibNodeNode(IToolLibNode):
|
|||
all_params = init_params_default_value | json.loads(rsa_long_decrypt(tool_lib.init_params)) | params
|
||||
else:
|
||||
all_params = init_params_default_value | params
|
||||
if self.node.properties.get('kind') == 'data-source':
|
||||
all_params = {**all_params, **self.workflow_params.get('data_source')}
|
||||
result = function_executor.exec_code(tool_lib.code, all_params)
|
||||
return NodeResult({'result': result}, {}, _write_context=write_context)
|
||||
return NodeResult({'result': result},
|
||||
(self.workflow_manage.params.get('knowledge_base') or {}) if self.node.properties.get(
|
||||
'kind') == 'data-source' else {}, _write_context=write_context)
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
result = _filter_file_bytes(self.context.get('result'))
|
||||
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
"result": self.context.get('result'),
|
||||
"result": result,
|
||||
"params": self.context.get('params'),
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
|
|
|
|||
|
|
@ -10,15 +10,15 @@ import re
|
|||
from typing import Type
|
||||
|
||||
from django.core import validators
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
from rest_framework.utils.formatting import lazy_format
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.field.common import ObjectField
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework.utils.formatting import lazy_format
|
||||
|
||||
|
||||
class InputField(serializers.Serializer):
|
||||
name = serializers.CharField(required=True, label=_('Variable Name'))
|
||||
|
|
@ -53,6 +53,8 @@ class FunctionNodeParamsSerializer(serializers.Serializer):
|
|||
|
||||
class IToolNode(INode):
|
||||
type = 'tool-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return FunctionNodeParamsSerializer
|
||||
|
|
|
|||
|
|
@ -5,11 +5,10 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
||||
|
||||
class VariableListSerializer(serializers.Serializer):
|
||||
v_id = serializers.CharField(required=True, label=_("Variable id"))
|
||||
variable = serializers.ListField(required=True, label=_("Variable"))
|
||||
|
|
@ -29,15 +28,13 @@ class VariableAggregationNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IVariableAggregation(INode):
|
||||
type = 'variable-aggregation-node'
|
||||
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VariableAggregationNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self,strategy,group_list,**kwargs) -> NodeResult:
|
||||
def execute(self, strategy, group_list, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -9,6 +9,15 @@
|
|||
from application.flow.i_step_node import NodeResult
|
||||
from application.flow.step_node.variable_aggregation_node.i_variable_aggregation_node import IVariableAggregation
|
||||
|
||||
def _filter_file_bytes(data):
|
||||
"""递归过滤掉所有层级的 file_bytes"""
|
||||
if isinstance(data, dict):
|
||||
return {k: _filter_file_bytes(v) for k, v in data.items() if k != 'file_bytes'}
|
||||
elif isinstance(data, list):
|
||||
return [_filter_file_bytes(item) for item in data]
|
||||
else:
|
||||
return data
|
||||
|
||||
|
||||
class BaseVariableAggregationNode(IVariableAggregation):
|
||||
|
||||
|
|
@ -63,14 +72,16 @@ class BaseVariableAggregationNode(IVariableAggregation):
|
|||
{'result': result, 'strategy': strategy, 'group_list': self.reset_group_list(group_list), **result}, {})
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
result = _filter_file_bytes(self.context.get('result'))
|
||||
group_list = _filter_file_bytes(self.context.get('group_list'))
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'type': self.node.type,
|
||||
'result': self.context.get('result'),
|
||||
'result': result,
|
||||
'strategy': self.context.get('strategy'),
|
||||
'group_list': self.context.get('group_list'),
|
||||
'group_list': group_list,
|
||||
'status': self.status,
|
||||
'err_message': self.err_message
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -15,6 +16,8 @@ class VariableAssignNodeParamsSerializer(serializers.Serializer):
|
|||
|
||||
class IVariableAssignNode(INode):
|
||||
type = 'variable-assign-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VariableAssignNodeParamsSerializer
|
||||
|
|
@ -22,5 +25,5 @@ class IVariableAssignNode(INode):
|
|||
def _run(self):
|
||||
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, variable_list, stream, **kwargs) -> NodeResult:
|
||||
def execute(self, variable_list, **kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class BaseVariableAssignNode(IVariableAssignNode):
|
|||
result['output_value'] = reference
|
||||
return result
|
||||
|
||||
def execute(self, variable_list, stream, **kwargs) -> NodeResult:
|
||||
def execute(self, variable_list, **kwargs) -> NodeResult:
|
||||
#
|
||||
result_list = []
|
||||
is_chat = False
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ from typing import Type
|
|||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
|
||||
|
|
@ -18,6 +19,8 @@ class VariableSplittingNodeParamsSerializer(serializers.Serializer):
|
|||
|
||||
class IVariableSplittingNode(INode):
|
||||
type = 'variable-splitting-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VariableSplittingNodeParamsSerializer
|
||||
|
|
|
|||
|
|
@ -2,12 +2,12 @@
|
|||
|
||||
from typing import Type
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import WorkflowMode
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class VideoUnderstandNodeSerializer(serializers.Serializer):
|
||||
model_id = serializers.CharField(required=True, label=_("Model id"))
|
||||
|
|
@ -30,6 +30,8 @@ class VideoUnderstandNodeSerializer(serializers.Serializer):
|
|||
|
||||
class IVideoUnderstandNode(INode):
|
||||
type = 'video-understand-node'
|
||||
support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE,
|
||||
WorkflowMode.KNOWLEDGE_LOOP]
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VideoUnderstandNodeSerializer
|
||||
|
|
@ -37,9 +39,15 @@ class IVideoUnderstandNode(INode):
|
|||
def _run(self):
|
||||
res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('video_list')[0],
|
||||
self.node_params_serializer.data.get('video_list')[1:])
|
||||
return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__(
|
||||
self.workflow_manage.flow.workflow_mode):
|
||||
return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data,
|
||||
**{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None})
|
||||
else:
|
||||
return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
video,
|
||||
|
|
|
|||
|
|
@ -70,7 +70,7 @@ class BaseVideoUnderstandNode(IVideoUnderstandNode):
|
|||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
video,
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ from rest_framework import status
|
|||
|
||||
from application.flow import tools
|
||||
from application.flow.common import Workflow
|
||||
from application.flow.i_step_node import INode, WorkFlowPostHandler, NodeResult
|
||||
from application.flow.i_step_node import INode, WorkFlowPostHandler, NodeResult, FlowParamsSerializer
|
||||
from application.flow.step_node import get_node
|
||||
from common.handle.base_to_response import BaseToResponse
|
||||
from common.handle.impl.response.system_to_response import SystemToResponse
|
||||
|
|
@ -316,7 +316,7 @@ class WorkflowManage:
|
|||
except Exception as e:
|
||||
return True
|
||||
|
||||
def await_result(self):
|
||||
def await_result(self, is_cleanup=True):
|
||||
try:
|
||||
while self.is_run():
|
||||
while True:
|
||||
|
|
@ -339,12 +339,13 @@ class WorkflowManage:
|
|||
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
|
||||
'answer_tokens' in row and row.get('answer_tokens') is not None])
|
||||
self.work_flow_post_handler.handler(self)
|
||||
yield self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
yield self.base_to_response.to_stream_chunk_response(self.params.get('chat_id'),
|
||||
self.params.get('chat_record_id'),
|
||||
'',
|
||||
[],
|
||||
'', True, message_tokens, answer_tokens, {})
|
||||
self._cleanup()
|
||||
if is_cleanup:
|
||||
self._cleanup()
|
||||
|
||||
def run_chain_async(self, current_node, node_result_future, language='zh'):
|
||||
future = executor.submit(self.run_chain_manage, current_node, node_result_future, language)
|
||||
|
|
@ -354,7 +355,7 @@ class WorkflowManage:
|
|||
translation.activate(language)
|
||||
if current_node is None:
|
||||
start_node = self.get_start_node()
|
||||
current_node = get_node(start_node.type)(start_node, self.params, self)
|
||||
current_node = get_node(start_node.type, self.flow.workflow_mode)(start_node, self.params, self)
|
||||
self.node_chunk_manage.add_node_chunk(current_node.node_chunk)
|
||||
# 添加节点
|
||||
self.append_node(current_node)
|
||||
|
|
@ -440,8 +441,8 @@ class WorkflowManage:
|
|||
node_type = r.get("node_type")
|
||||
view_type = r.get('view_type')
|
||||
reasoning_content = r.get('reasoning_content')
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params.get('chat_id'),
|
||||
self.params.get('chat_record_id'),
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
content, False, 0, 0,
|
||||
|
|
@ -455,8 +456,8 @@ class WorkflowManage:
|
|||
'node_status': "SUCCESS"})
|
||||
current_node.node_chunk.add_chunk(chunk)
|
||||
chunk = (self.base_to_response
|
||||
.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
.to_stream_chunk_response(self.params.get('chat_id'),
|
||||
self.params.get('chat_record_id'),
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
'', False, 0, 0, {'node_is_end': True,
|
||||
|
|
@ -473,9 +474,10 @@ class WorkflowManage:
|
|||
return current_result
|
||||
except Exception as e:
|
||||
# 添加节点
|
||||
|
||||
maxkb_logger.error(f'Exception: {e}', exc_info=True)
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
|
||||
self.params['chat_record_id'],
|
||||
chunk = self.base_to_response.to_stream_chunk_response(self.params.get('chat_id'),
|
||||
self.params.get('chat_id'),
|
||||
current_node.id,
|
||||
current_node.up_node_id_list,
|
||||
'Exception:' + str(e), False, 0, 0,
|
||||
|
|
@ -543,7 +545,7 @@ class WorkflowManage:
|
|||
return self._has_next_node(self.get_start_node() if self.current_node is None else self.current_node,
|
||||
node_result)
|
||||
|
||||
def get_runtime_details(self):
|
||||
def get_runtime_details(self, get_details=lambda n, index: n.get_details(index)):
|
||||
details_result = {}
|
||||
for index in range(len(self.node_context)):
|
||||
node = self.node_context[index]
|
||||
|
|
@ -552,7 +554,7 @@ class WorkflowManage:
|
|||
if details is not None and self.start_node.runtime_node_id != node.runtime_node_id:
|
||||
details_result[node.runtime_node_id] = details
|
||||
continue
|
||||
details = node.get_details(index)
|
||||
details = get_details(node, index)
|
||||
details['node_id'] = node.id
|
||||
details['up_node_id_list'] = node.up_node_id_list
|
||||
details['runtime_node_id'] = node.runtime_node_id
|
||||
|
|
@ -736,8 +738,9 @@ class WorkflowManage:
|
|||
get_node_params=lambda node: node.properties.get('node_data')):
|
||||
for node in self.flow.nodes:
|
||||
if node.id == node_id:
|
||||
node_instance = get_node(node.type)(node,
|
||||
self.params, self, up_node_id_list, get_node_params)
|
||||
node_instance = get_node(node.type, self.flow.workflow_mode)(node,
|
||||
self.params, self, up_node_id_list,
|
||||
get_node_params)
|
||||
return node_instance
|
||||
return None
|
||||
|
||||
|
|
@ -750,3 +753,6 @@ class WorkflowManage:
|
|||
def get_node_reference(self, reference_address: Dict):
|
||||
node = self.get_node_by_id(reference_address.get('node_id'))
|
||||
return node.context[reference_address.get('node_field')]
|
||||
|
||||
def get_params_serializer_class(self):
|
||||
return FlowParamsSerializer
|
||||
|
|
|
|||
|
|
@ -39,9 +39,12 @@ class Group(Enum):
|
|||
SYSTEM_RES_KNOWLEDGE = "SYSTEM_RESOURCE_KNOWLEDGE"
|
||||
KNOWLEDGE_HIT_TEST = "KNOWLEDGE_HIT_TEST"
|
||||
KNOWLEDGE_DOCUMENT = "KNOWLEDGE_DOCUMENT"
|
||||
KNOWLEDGE_WORKFLOW = "KNOWLEDGE_WORKFLOW"
|
||||
KNOWLEDGE_TAG = "KNOWLEDGE_TAG"
|
||||
SYSTEM_KNOWLEDGE_DOCUMENT = "SYSTEM_KNOWLEDGE_DOCUMENT"
|
||||
SYSTEM_KNOWLEDGE_WORKFLOW = "SYSTEM_KNOWLEDGE_WORKFLOW"
|
||||
SYSTEM_RES_KNOWLEDGE_DOCUMENT = "SYSTEM_RESOURCE_KNOWLEDGE_DOCUMENT"
|
||||
SYSTEM_RES_KNOWLEDGE_WORKFLOW = "SYSTEM_RESOURCE_KNOWLEDGE_WORKFLOW"
|
||||
SYSTEM_RES_KNOWLEDGE_TAG = "SYSTEM_RES_KNOWLEDGE_TAG"
|
||||
SYSTEM_KNOWLEDGE_TAG = "SYSTEM_KNOWLEDGE_TAG"
|
||||
|
||||
|
|
@ -328,6 +331,7 @@ Permission_Label = {
|
|||
Group.APPLICATION.value: _("Application"),
|
||||
Group.KNOWLEDGE.value: _("Knowledge"),
|
||||
Group.KNOWLEDGE_DOCUMENT.value: _("Document"),
|
||||
Group.KNOWLEDGE_WORKFLOW.value: _("Workflow"),
|
||||
Group.KNOWLEDGE_TAG.value: _("Tag"),
|
||||
Group.KNOWLEDGE_PROBLEM.value: _("Problem"),
|
||||
Group.KNOWLEDGE_HIT_TEST.value: _("Hit-Test"),
|
||||
|
|
@ -375,6 +379,7 @@ Permission_Label = {
|
|||
Group.SYSTEM_MODEL.value: _("Model"),
|
||||
Group.SYSTEM_KNOWLEDGE.value: _("Knowledge"),
|
||||
Group.SYSTEM_KNOWLEDGE_DOCUMENT.value: _("Document"),
|
||||
Group.SYSTEM_KNOWLEDGE_WORKFLOW.value: _("Workflow"),
|
||||
Group.SYSTEM_KNOWLEDGE_TAG.value: _("Tag"),
|
||||
Group.SYSTEM_KNOWLEDGE_PROBLEM.value: _("Problem"),
|
||||
Group.SYSTEM_KNOWLEDGE_HIT_TEST.value: _("Hit-Test"),
|
||||
|
|
@ -383,6 +388,7 @@ Permission_Label = {
|
|||
Group.SYSTEM_RES_MODEL.value: _("Model"),
|
||||
Group.SYSTEM_RES_KNOWLEDGE.value: _("Knowledge"),
|
||||
Group.SYSTEM_RES_KNOWLEDGE_DOCUMENT.value: _("Document"),
|
||||
Group.SYSTEM_RES_KNOWLEDGE_WORKFLOW.value: _("Workflow"),
|
||||
Group.SYSTEM_RES_KNOWLEDGE_TAG.value: _("Tag"),
|
||||
Group.SYSTEM_RES_KNOWLEDGE_PROBLEM.value: _("Problem"),
|
||||
Group.SYSTEM_RES_KNOWLEDGE_HIT_TEST.value: _("Hit-Test"),
|
||||
|
|
@ -616,6 +622,16 @@ class PermissionConstants(Enum):
|
|||
resource_permission_group_list=[ResourcePermissionConst.KNOWLEDGE_MANGE],
|
||||
parent_group=[WorkspaceGroup.KNOWLEDGE, UserGroup.KNOWLEDGE]
|
||||
)
|
||||
KNOWLEDGE_WORKFLOW_READ = Permission(
|
||||
group=Group.KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN, RoleConstants.USER],
|
||||
resource_permission_group_list=[ResourcePermissionConst.KNOWLEDGE_VIEW],
|
||||
parent_group=[WorkspaceGroup.KNOWLEDGE, UserGroup.KNOWLEDGE]
|
||||
)
|
||||
KNOWLEDGE_WORKFLOW_EDIT = Permission(
|
||||
group=Group.KNOWLEDGE_WORKFLOW, operate=Operate.EDIT, role_list=[RoleConstants.ADMIN, RoleConstants.USER],
|
||||
resource_permission_group_list=[ResourcePermissionConst.KNOWLEDGE_MANGE],
|
||||
parent_group=[WorkspaceGroup.KNOWLEDGE, UserGroup.KNOWLEDGE]
|
||||
)
|
||||
KNOWLEDGE_DOCUMENT_READ = Permission(
|
||||
group=Group.KNOWLEDGE_DOCUMENT, operate=Operate.READ,
|
||||
role_list=[RoleConstants.ADMIN, RoleConstants.USER],
|
||||
|
|
@ -1209,6 +1225,14 @@ class PermissionConstants(Enum):
|
|||
group=Group.SYSTEM_KNOWLEDGE, operate=Operate.DELETE, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
)
|
||||
SHARED_KNOWLEDGE_WORKFLOW_READ = Permission(
|
||||
group=Group.SYSTEM_KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
)
|
||||
SHARED_KNOWLEDGE_WORKFLOW_EDIT = Permission(
|
||||
group=Group.SYSTEM_KNOWLEDGE_WORKFLOW, operate=Operate.EDIT, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
)
|
||||
SHARED_KNOWLEDGE_DOCUMENT_READ = Permission(
|
||||
group=Group.SYSTEM_KNOWLEDGE_DOCUMENT, operate=Operate.READ, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
|
|
@ -1437,6 +1461,14 @@ class PermissionConstants(Enum):
|
|||
parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
)
|
||||
# 文档
|
||||
RESOURCE_KNOWLEDGE_WORKFLOW_READ = Permission(
|
||||
group=Group.SYSTEM_RES_KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
)
|
||||
RESOURCE_KNOWLEDGE_WORKFLOW_EDIT = Permission(
|
||||
group=Group.SYSTEM_RES_KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
)
|
||||
RESOURCE_KNOWLEDGE_DOCUMENT_READ = Permission(
|
||||
group=Group.SYSTEM_RES_KNOWLEDGE_DOCUMENT, operate=Operate.READ, role_list=[RoleConstants.ADMIN],
|
||||
parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE"
|
||||
|
|
|
|||
|
|
@ -112,5 +112,5 @@ class XlsxParseTableHandle(BaseParseTableHandle):
|
|||
|
||||
return md_tables
|
||||
except Exception as e:
|
||||
max_kb.error(f'excel split handle error: {e}')
|
||||
maxkb_logger.error(f'excel split handle error: {e}')
|
||||
return f'error: {e}'
|
||||
|
|
|
|||
|
|
@ -114,7 +114,11 @@ def get_image_id_func():
|
|||
|
||||
title_font_list = [
|
||||
[36, 100],
|
||||
[30, 36]
|
||||
[26, 36],
|
||||
[24, 26],
|
||||
[22, 24],
|
||||
[18, 22],
|
||||
[16, 18]
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -125,12 +129,12 @@ def get_title_level(paragraph: Paragraph):
|
|||
if psn.startswith('Heading') or psn.startswith('TOC 标题') or psn.startswith('标题'):
|
||||
return int(psn.replace("Heading ", '').replace('TOC 标题', '').replace('标题',
|
||||
''))
|
||||
if len(paragraph.runs) == 1:
|
||||
if len(paragraph.runs) >= 1:
|
||||
font_size = paragraph.runs[0].font.size
|
||||
pt = font_size.pt
|
||||
if pt >= 30:
|
||||
if pt >= 16:
|
||||
for _value, index in zip(title_font_list, range(len(title_font_list))):
|
||||
if pt >= _value[0] and pt < _value[1]:
|
||||
if pt >= _value[0] and pt < _value[1] and any([run.font.bold for run in paragraph.runs]):
|
||||
return index + 1
|
||||
except Exception as e:
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@
|
|||
import ast
|
||||
import base64
|
||||
import gzip
|
||||
import io
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
|
|
@ -76,10 +77,12 @@ class ToolExecutor:
|
|||
f.write(f"SANDBOX_PYTHON_ALLOW_SUBPROCESS={allow_subprocess}\n")
|
||||
os.system(f"chmod -R 550 {self.sandbox_path}")
|
||||
|
||||
def exec_code(self, code_str, keywords):
|
||||
def exec_code(self, code_str, keywords, function_name=None):
|
||||
_id = str(uuid.uuid7())
|
||||
success = '{"code":200,"msg":"成功","data":exec_result}'
|
||||
err = '{"code":500,"msg":str(e),"data":None}'
|
||||
action_function = f'({function_name !a}, locals_v.get({function_name !a}))' if function_name else 'locals_v.popitem()'
|
||||
result_path = f'{self.sandbox_path}/result/{_id}.result'
|
||||
python_paths = CONFIG.get_sandbox_python_package_paths().split(',')
|
||||
_exec_code = f"""
|
||||
try:
|
||||
|
|
@ -92,7 +95,7 @@ try:
|
|||
globals_v={'{}'}
|
||||
os.environ.clear()
|
||||
exec({dedent(code_str)!a}, globals_v, locals_v)
|
||||
f_name, f = locals_v.popitem()
|
||||
f_name, f = {action_function}
|
||||
for local in locals_v:
|
||||
globals_v[local] = locals_v[local]
|
||||
exec_result=f(**keywords)
|
||||
|
|
@ -216,7 +219,10 @@ exec({dedent(code)!a})
|
|||
else:
|
||||
tool_config = {
|
||||
'command': sys.executable,
|
||||
'args': f'import base64,gzip; exec(gzip.decompress(base64.b64decode(\'{compressed_and_base64_encoded_code_str}\')).decode())',
|
||||
'args': [
|
||||
'-c',
|
||||
f'import base64,gzip; exec(gzip.decompress(base64.b64decode(\'{compressed_and_base64_encoded_code_str}\')).decode())',
|
||||
],
|
||||
'transport': 'stdio',
|
||||
}
|
||||
return tool_config
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
# coding=utf-8
|
||||
|
||||
from common.mixins.api_mixin import APIMixin
|
||||
|
||||
|
||||
class KnowledgeWorkflowApi(APIMixin):
|
||||
pass
|
||||
|
||||
|
||||
class KnowledgeWorkflowVersionApi(APIMixin):
|
||||
pass
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
# Generated by Django 5.2.4 on 2025-11-04 05:54
|
||||
|
||||
import django.db.models.deletion
|
||||
import uuid_utils.compat
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('knowledge', '0003_tag_documenttag'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='document',
|
||||
name='type',
|
||||
field=models.IntegerField(choices=[(0, '通用类型'), (1, 'web站点类型'), (2, '飞书类型'), (3, '语雀类型'), (4, '工作流类型')], db_index=True, default=0, verbose_name='类型'),
|
||||
),
|
||||
migrations.AlterField(
|
||||
model_name='knowledge',
|
||||
name='type',
|
||||
field=models.IntegerField(choices=[(0, '通用类型'), (1, 'web站点类型'), (2, '飞书类型'), (3, '语雀类型'), (4, '工作流类型')], db_index=True, default=0, verbose_name='类型'),
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='KnowledgeWorkflow',
|
||||
fields=[
|
||||
('create_time', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间')),
|
||||
('update_time', models.DateTimeField(auto_now=True, db_index=True, verbose_name='修改时间')),
|
||||
('id', models.UUIDField(default=uuid_utils.compat.uuid7, editable=False, primary_key=True, serialize=False, verbose_name='主键id')),
|
||||
('workspace_id', models.CharField(db_index=True, default='default', max_length=64, verbose_name='工作空间id')),
|
||||
('work_flow', models.JSONField(default=dict, verbose_name='工作流数据')),
|
||||
('is_publish', models.BooleanField(db_index=True, default=False, verbose_name='是否发布')),
|
||||
('publish_time', models.DateTimeField(blank=True, null=True, verbose_name='发布时间')),
|
||||
('knowledge', models.OneToOneField(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, related_name='workflow', to='knowledge.knowledge', verbose_name='知识库')),
|
||||
],
|
||||
options={
|
||||
'db_table': 'knowledge_workflow',
|
||||
},
|
||||
),
|
||||
migrations.CreateModel(
|
||||
name='KnowledgeWorkflowVersion',
|
||||
fields=[
|
||||
('create_time', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间')),
|
||||
('update_time', models.DateTimeField(auto_now=True, db_index=True, verbose_name='修改时间')),
|
||||
('id', models.UUIDField(default=uuid_utils.compat.uuid7, editable=False, primary_key=True, serialize=False, verbose_name='主键id')),
|
||||
('workspace_id', models.CharField(db_index=True, default='default', max_length=64, verbose_name='工作空间id')),
|
||||
('work_flow', models.JSONField(default=dict, verbose_name='工作流数据')),
|
||||
('publish_user_id', models.UUIDField(default=None, null=True, verbose_name='发布者id')),
|
||||
('publish_user_name', models.CharField(default='', max_length=128, verbose_name='发布者名称')),
|
||||
('knowledge', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='knowledge.knowledge', verbose_name='知识库')),
|
||||
('workflow', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='knowledge.knowledgeworkflow', verbose_name='工作流')),
|
||||
],
|
||||
options={
|
||||
'db_table': 'knowledge_workflow_version',
|
||||
'unique_together': {('knowledge',)},
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
# Generated by Django 5.2.8 on 2025-11-19 06:06
|
||||
|
||||
import common.encoder.encoder
|
||||
import django.db.models.deletion
|
||||
import uuid_utils.compat
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('knowledge', '0004_alter_document_type_alter_knowledge_type_and_more'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.CreateModel(
|
||||
name='KnowledgeAction',
|
||||
fields=[
|
||||
('create_time', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间')),
|
||||
('update_time', models.DateTimeField(auto_now=True, db_index=True, verbose_name='修改时间')),
|
||||
('id', models.UUIDField(default=uuid_utils.compat.uuid7, editable=False, primary_key=True, serialize=False, verbose_name='主键id')),
|
||||
('state', models.CharField(choices=[('PENDING', 'Pending'), ('STARTED', 'Started'), ('SUCCESS', 'Success'), ('FAILURE', 'Failure'), ('REVOKE', 'Revoke'), ('REVOKED', 'Revoked')], default='STARTED', max_length=20, verbose_name='状态')),
|
||||
('details', models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='执行详情')),
|
||||
('run_time', models.FloatField(default=0, verbose_name='运行时长')),
|
||||
('meta', models.JSONField(default=dict, verbose_name='元数据')),
|
||||
('knowledge', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.DO_NOTHING, to='knowledge.knowledge', verbose_name='知识库')),
|
||||
],
|
||||
options={
|
||||
'db_table': 'knowledge_action',
|
||||
},
|
||||
),
|
||||
]
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
# Generated by Django 5.2.8 on 2025-11-24 07:09
|
||||
|
||||
import django.contrib.postgres.fields
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('knowledge', '0005_knowledgeaction'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AddField(
|
||||
model_name='paragraph',
|
||||
name='chunks',
|
||||
field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(), default=list, size=None, verbose_name='块'),
|
||||
),
|
||||
]
|
||||
|
|
@ -3,6 +3,7 @@ import zipfile
|
|||
from enum import Enum
|
||||
|
||||
import uuid_utils.compat as uuid
|
||||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.contrib.postgres.search import SearchVectorField
|
||||
from django.db import models
|
||||
from django.db.models import QuerySet
|
||||
|
|
@ -23,6 +24,7 @@ class KnowledgeType(models.IntegerChoices):
|
|||
WEB = 1, 'web站点类型'
|
||||
LARK = 2, '飞书类型'
|
||||
YUQUE = 3, '语雀类型'
|
||||
WORKFLOW = 4, '工作流类型'
|
||||
|
||||
|
||||
class TaskType(Enum):
|
||||
|
|
@ -135,6 +137,40 @@ class Knowledge(AppModelMixin):
|
|||
db_table = "knowledge"
|
||||
|
||||
|
||||
class KnowledgeWorkflow(AppModelMixin):
|
||||
"""
|
||||
知识库工作流表
|
||||
"""
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid7, editable=False, verbose_name="主键id")
|
||||
knowledge = models.OneToOneField(Knowledge, on_delete=models.CASCADE, verbose_name="知识库",
|
||||
db_constraint=False, related_name='workflow')
|
||||
workspace_id = models.CharField(max_length=64, verbose_name="工作空间id", default="default", db_index=True)
|
||||
work_flow = models.JSONField(verbose_name="工作流数据", default=dict)
|
||||
is_publish = models.BooleanField(verbose_name="是否发布", default=False, db_index=True)
|
||||
publish_time = models.DateTimeField(verbose_name="发布时间", null=True, blank=True)
|
||||
|
||||
class Meta:
|
||||
db_table = "knowledge_workflow"
|
||||
|
||||
|
||||
class KnowledgeWorkflowVersion(AppModelMixin):
|
||||
"""
|
||||
知识库工作流版本表 - 记录工作流历史版本
|
||||
"""
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid7, editable=False, verbose_name="主键id")
|
||||
knowledge = models.ForeignKey(Knowledge, on_delete=models.CASCADE, verbose_name="知识库", db_constraint=False)
|
||||
workflow = models.ForeignKey(KnowledgeWorkflow, on_delete=models.CASCADE, verbose_name="工作流",
|
||||
db_constraint=False, related_name='versions')
|
||||
workspace_id = models.CharField(max_length=64, verbose_name="工作空间id", default="default", db_index=True)
|
||||
work_flow = models.JSONField(verbose_name="工作流数据", default=dict)
|
||||
publish_user_id = models.UUIDField(verbose_name="发布者id", max_length=128, default=None, null=True)
|
||||
publish_user_name = models.CharField(verbose_name="发布者名称", max_length=128, default="")
|
||||
|
||||
class Meta:
|
||||
db_table = "knowledge_workflow_version"
|
||||
unique_together = [['knowledge']] # 同一知识库的版本号唯一
|
||||
|
||||
|
||||
def get_default_status():
|
||||
return Status('').__str__()
|
||||
|
||||
|
|
@ -162,6 +198,7 @@ class Document(AppModelMixin):
|
|||
class Meta:
|
||||
db_table = "document"
|
||||
|
||||
|
||||
class Tag(AppModelMixin):
|
||||
"""
|
||||
标签表 - 存储标签的key-value定义
|
||||
|
|
@ -206,6 +243,7 @@ class Paragraph(AppModelMixin):
|
|||
hit_num = models.IntegerField(verbose_name="命中次数", default=0)
|
||||
is_active = models.BooleanField(default=True, db_index=True)
|
||||
position = models.IntegerField(verbose_name="段落顺序", default=0, db_index=True)
|
||||
chunks = ArrayField(verbose_name="块", base_field=models.CharField(), default=list)
|
||||
|
||||
class Meta:
|
||||
db_table = "paragraph"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,49 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎虎
|
||||
@file: knowledge_action.py
|
||||
@date:2025/11/18 17:59
|
||||
@desc:
|
||||
"""
|
||||
import uuid_utils.compat as uuid
|
||||
|
||||
from django.db import models
|
||||
|
||||
from common.encoder.encoder import SystemEncoder
|
||||
from common.mixins.app_model_mixin import AppModelMixin
|
||||
from knowledge.models import Knowledge
|
||||
|
||||
|
||||
class State(models.TextChoices):
|
||||
# 等待
|
||||
PENDING = 'PENDING'
|
||||
# 执行中
|
||||
STARTED = 'STARTED'
|
||||
# 成功
|
||||
SUCCESS = 'SUCCESS'
|
||||
# 失败
|
||||
FAILURE = 'FAILURE'
|
||||
# 取消任务
|
||||
REVOKE = 'REVOKE'
|
||||
# 取消成功
|
||||
REVOKED = 'REVOKED'
|
||||
|
||||
|
||||
class KnowledgeAction(AppModelMixin):
|
||||
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid7, editable=False, verbose_name="主键id")
|
||||
|
||||
knowledge = models.ForeignKey(Knowledge, on_delete=models.DO_NOTHING, verbose_name="知识库", db_constraint=False)
|
||||
|
||||
state = models.CharField(verbose_name='状态', max_length=20,
|
||||
choices=State.choices,
|
||||
default=State.STARTED)
|
||||
|
||||
details = models.JSONField(verbose_name="执行详情", default=dict, encoder=SystemEncoder)
|
||||
|
||||
run_time = models.FloatField(verbose_name="运行时长", default=0)
|
||||
|
||||
meta = models.JSONField(verbose_name="元数据", default=dict)
|
||||
|
||||
class Meta:
|
||||
db_table = "knowledge_action"
|
||||
|
|
@ -31,7 +31,7 @@ from common.utils.fork import Fork, ChildLink
|
|||
from common.utils.logger import maxkb_logger
|
||||
from common.utils.split_model import get_split_model
|
||||
from knowledge.models import Knowledge, KnowledgeScope, KnowledgeType, Document, Paragraph, Problem, \
|
||||
ProblemParagraphMapping, TaskType, State, SearchMode, KnowledgeFolder, File, Tag
|
||||
ProblemParagraphMapping, TaskType, State, SearchMode, KnowledgeFolder, File, Tag, KnowledgeWorkflow
|
||||
from knowledge.serializers.common import ProblemParagraphManage, drop_knowledge_index, \
|
||||
get_embedding_model_id_by_knowledge_id, MetaSerializer, \
|
||||
GenerateRelatedSerializer, get_embedding_model_by_knowledge_id, list_paragraph, write_image, zip_dir
|
||||
|
|
@ -342,8 +342,15 @@ class KnowledgeSerializer(serializers.Serializer):
|
|||
)
|
||||
)
|
||||
), with_search_one=True)
|
||||
workflow = {}
|
||||
if knowledge_dict.get('type') == 4:
|
||||
from knowledge.models import KnowledgeWorkflow
|
||||
k = QuerySet(KnowledgeWorkflow).filter(knowledge_id=knowledge_dict.get('id')).first()
|
||||
if k:
|
||||
workflow = k.work_flow
|
||||
return {
|
||||
**knowledge_dict,
|
||||
'work_flow': workflow,
|
||||
'meta': json.loads(knowledge_dict.get('meta', '{}')),
|
||||
'application_id_list': list(filter(
|
||||
lambda application_id: all_application_list.__contains__(application_id),
|
||||
|
|
@ -406,7 +413,15 @@ class KnowledgeSerializer(serializers.Serializer):
|
|||
application_id=application_id, knowledge_id=self.data.get('knowledge_id')
|
||||
) for application_id in application_id_list
|
||||
]) if len(application_id_list) > 0 else None
|
||||
|
||||
if instance.get("work_flow"):
|
||||
QuerySet(KnowledgeWorkflow).update_or_create(knowledge_id=self.data.get("knowledge_id"),
|
||||
create_defaults={'id': uuid.uuid7(),
|
||||
'knowledge_id': self.data.get("knowledge_id"),
|
||||
"workspace_id": self.data.get('workspace_id'),
|
||||
'work_flow': instance.get('work_flow', {}), },
|
||||
defaults={
|
||||
'work_flow': instance.get('work_flow')
|
||||
})
|
||||
knowledge.save()
|
||||
if select_one:
|
||||
return self.one()
|
||||
|
|
|
|||
|
|
@ -0,0 +1,187 @@
|
|||
# coding=utf-8
|
||||
import asyncio
|
||||
import json
|
||||
from typing import Dict
|
||||
|
||||
import uuid_utils.compat as uuid
|
||||
from django.db import transaction
|
||||
from django.db.models import QuerySet
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.common import Workflow, WorkflowMode
|
||||
from application.flow.i_step_node import KnowledgeWorkflowPostHandler
|
||||
from application.flow.knowledge_workflow_manage import KnowledgeWorkflowManage
|
||||
from application.flow.step_node import get_node
|
||||
from application.serializers.application import get_mcp_tools
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.utils.rsa_util import rsa_long_decrypt
|
||||
from common.utils.tool_code import ToolExecutor
|
||||
from knowledge.models import KnowledgeScope, Knowledge, KnowledgeType, KnowledgeWorkflow
|
||||
from knowledge.models.knowledge_action import KnowledgeAction, State
|
||||
from knowledge.serializers.knowledge import KnowledgeModelSerializer
|
||||
from maxkb.const import CONFIG
|
||||
from system_manage.models import AuthTargetType
|
||||
from system_manage.serializers.user_resource_permission import UserResourcePermissionSerializer
|
||||
from tools.models import Tool
|
||||
|
||||
tool_executor = ToolExecutor(CONFIG.get('SANDBOX'))
|
||||
|
||||
|
||||
class KnowledgeWorkflowModelSerializer(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
model = KnowledgeWorkflow
|
||||
fields = '__all__'
|
||||
|
||||
|
||||
class KnowledgeWorkflowActionSerializer(serializers.Serializer):
|
||||
workspace_id = serializers.CharField(required=True, label=_('workspace id'))
|
||||
knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id'))
|
||||
|
||||
def action(self, instance: Dict, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
knowledge_workflow = QuerySet(KnowledgeWorkflow).filter(knowledge_id=self.data.get("knowledge_id")).first()
|
||||
knowledge_action_id = uuid.uuid7()
|
||||
KnowledgeAction(id=knowledge_action_id, knowledge_id=self.data.get("knowledge_id"), state=State.STARTED).save()
|
||||
work_flow_manage = KnowledgeWorkflowManage(
|
||||
Workflow.new_instance(knowledge_workflow.work_flow, WorkflowMode.KNOWLEDGE),
|
||||
{'knowledge_id': self.data.get("knowledge_id"), 'knowledge_action_id': knowledge_action_id, 'stream': True,
|
||||
'workspace_id': self.data.get("workspace_id"),
|
||||
**instance},
|
||||
KnowledgeWorkflowPostHandler(None, knowledge_action_id))
|
||||
work_flow_manage.run()
|
||||
return {'id': knowledge_action_id, 'knowledge_id': self.data.get("knowledge_id"), 'state': State.STARTED,
|
||||
'details': {}}
|
||||
|
||||
class Operate(serializers.Serializer):
|
||||
workspace_id = serializers.CharField(required=True, label=_('workspace id'))
|
||||
knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id'))
|
||||
id = serializers.UUIDField(required=True, label=_('knowledge action id'))
|
||||
|
||||
def one(self, is_valid=True):
|
||||
if is_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
knowledge_action_id = self.data.get("id")
|
||||
knowledge_action = QuerySet(KnowledgeAction).filter(id=knowledge_action_id).first()
|
||||
return {'id': knowledge_action_id, 'knowledge_id': knowledge_action.knowledge_id,
|
||||
'state': knowledge_action.state,
|
||||
'details': knowledge_action.details}
|
||||
|
||||
|
||||
class KnowledgeWorkflowSerializer(serializers.Serializer):
|
||||
class Datasource(serializers.Serializer):
|
||||
type = serializers.CharField(required=True, label=_('type'))
|
||||
id = serializers.CharField(required=True, label=_('type'))
|
||||
params = serializers.DictField(required=True, label="")
|
||||
function_name = serializers.CharField(required=True, label=_('function_name'))
|
||||
|
||||
def action(self):
|
||||
self.is_valid(raise_exception=True)
|
||||
if self.data.get('type') == 'local':
|
||||
node = get_node(self.data.get('id'), WorkflowMode.KNOWLEDGE)
|
||||
return node.__getattribute__(node, self.data.get("function_name"))(**self.data.get("params"))
|
||||
elif self.data.get('type') == 'tool':
|
||||
tool = QuerySet(Tool).filter(id=self.data.get("id")).first()
|
||||
init_params = json.loads(rsa_long_decrypt(tool.init_params))
|
||||
return tool_executor.exec_code(tool.code, {**init_params, **self.data.get('params')},
|
||||
self.data.get('function_name'))
|
||||
|
||||
class Create(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, label=_('user id'))
|
||||
workspace_id = serializers.CharField(required=True, label=_('workspace id'))
|
||||
scope = serializers.ChoiceField(
|
||||
required=False, label=_('scope'), default=KnowledgeScope.WORKSPACE, choices=KnowledgeScope.choices
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def save_workflow(self, instance: Dict):
|
||||
self.is_valid(raise_exception=True)
|
||||
|
||||
folder_id = instance.get('folder_id', self.data.get('workspace_id'))
|
||||
if QuerySet(Knowledge).filter(
|
||||
workspace_id=self.data.get('workspace_id'), folder_id=folder_id, name=instance.get('name')
|
||||
).exists():
|
||||
raise AppApiException(500, _('Knowledge base name duplicate!'))
|
||||
|
||||
knowledge_id = uuid.uuid7()
|
||||
knowledge = Knowledge(
|
||||
id=knowledge_id,
|
||||
name=instance.get('name'),
|
||||
desc=instance.get('desc'),
|
||||
user_id=self.data.get('user_id'),
|
||||
type=instance.get('type', KnowledgeType.WORKFLOW),
|
||||
scope=self.data.get('scope', KnowledgeScope.WORKSPACE),
|
||||
folder_id=folder_id,
|
||||
workspace_id=self.data.get('workspace_id'),
|
||||
embedding_model_id=instance.get('embedding_model_id'),
|
||||
meta={},
|
||||
)
|
||||
knowledge.save()
|
||||
# 自动资源给授权当前用户
|
||||
UserResourcePermissionSerializer(data={
|
||||
'workspace_id': self.data.get('workspace_id'),
|
||||
'user_id': self.data.get('user_id'),
|
||||
'auth_target_type': AuthTargetType.KNOWLEDGE.value
|
||||
}).auth_resource(str(knowledge_id))
|
||||
|
||||
knowledge_workflow = KnowledgeWorkflow(
|
||||
id=uuid.uuid7(),
|
||||
knowledge_id=knowledge_id,
|
||||
workspace_id=self.data.get('workspace_id'),
|
||||
work_flow=instance.get('work_flow', {}),
|
||||
)
|
||||
|
||||
knowledge_workflow.save()
|
||||
|
||||
return {**KnowledgeModelSerializer(knowledge).data, 'document_list': []}
|
||||
|
||||
class Operate(serializers.Serializer):
|
||||
user_id = serializers.UUIDField(required=True, label=_('user id'))
|
||||
workspace_id = serializers.CharField(required=True, label=_('workspace id'))
|
||||
knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id'))
|
||||
|
||||
def edit(self, instance: Dict):
|
||||
pass
|
||||
|
||||
def one(self):
|
||||
self.is_valid(raise_exception=True)
|
||||
workflow = QuerySet(KnowledgeWorkflow).filter(knowledge_id=self.data.get('knowledge_id')).first()
|
||||
return {**KnowledgeWorkflowModelSerializer(workflow).data}
|
||||
|
||||
class McpServersSerializer(serializers.Serializer):
|
||||
mcp_servers = serializers.JSONField(required=True)
|
||||
|
||||
class KnowledgeWorkflowMcpSerializer(serializers.Serializer):
|
||||
knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id'))
|
||||
user_id = serializers.UUIDField(required=True, label=_("User ID"))
|
||||
workspace_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, label=_("Workspace ID"))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
super().is_valid(raise_exception=True)
|
||||
workspace_id = self.data.get('workspace_id')
|
||||
query_set = QuerySet(Knowledge).filter(id=self.data.get('knowledge_id'))
|
||||
if workspace_id:
|
||||
query_set = query_set.filter(workspace_id=workspace_id)
|
||||
if not query_set.exists():
|
||||
raise AppApiException(500, _('Knowledge id does not exist'))
|
||||
|
||||
def get_mcp_servers(self, instance, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
McpServersSerializer(data=instance).is_valid(raise_exception=True)
|
||||
servers = json.loads(instance.get('mcp_servers'))
|
||||
for server, config in servers.items():
|
||||
if config.get('transport') not in ['sse', 'streamable_http']:
|
||||
raise AppApiException(500, _('Only support transport=sse or transport=streamable_http'))
|
||||
tools = []
|
||||
for server in servers:
|
||||
tools += [
|
||||
{
|
||||
'server': server,
|
||||
'name': tool.name,
|
||||
'description': tool.description,
|
||||
'args_schema': tool.args_schema,
|
||||
}
|
||||
for tool in asyncio.run(get_mcp_tools({server: servers[server]}))]
|
||||
return tools
|
||||
|
|
@ -9,12 +9,14 @@ urlpatterns = [
|
|||
path('workspace/knowledge/document/table_template/export', views.TableTemplate.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge', views.KnowledgeView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/base', views.KnowledgeBaseView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/workflow', views.KnowledgeWorkflowView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/web', views.KnowledgeWebView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/model', views.KnowledgeView.Model.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/embedding_model', views.KnowledgeView.EmbeddingModel.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/tags', views.KnowledgeView.Tags.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>', views.KnowledgeView.Operate.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/sync', views.KnowledgeView.SyncWeb.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/workfolw', views.KnowledgeWorkflowView.Operate.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/generate_related', views.KnowledgeView.GenerateRelated.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/embedding', views.KnowledgeView.Embedding.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/hit_test', views.KnowledgeView.HitTest.as_view()),
|
||||
|
|
@ -67,5 +69,9 @@ urlpatterns = [
|
|||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/problem/<int:current_page>/<int:page_size>', views.ProblemView.Page.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/document/<int:current_page>/<int:page_size>', views.DocumentView.Page.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<int:current_page>/<int:page_size>', views.KnowledgeView.Page.as_view()),
|
||||
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/datasource/<str:type>/<str:id>/form_list', views.KnowledgeDatasourceFormListView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/datasource/<str:type>/<str:id>/<str:function_name>', views.KnowledgeDatasourceView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/action', views.KnowledgeWorkflowActionView.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/action/<str:knowledge_action_id>', views.KnowledgeWorkflowActionView.Operate.as_view()),
|
||||
path('workspace/<str:workspace_id>/knowledge/<str:knowledge_id>/mcp_tools', views.McpServers.as_view()),
|
||||
]
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ lock = threading.Lock()
|
|||
def chunk_data(data: Dict):
|
||||
if str(data.get('source_type')) == str(SourceType.PARAGRAPH.value):
|
||||
text = data.get('text')
|
||||
chunk_list = text_to_chunk(text)
|
||||
chunk_list = data.get('chunks') if data.get('chunks') else text_to_chunk(text)
|
||||
return [{**data, 'text': chunk} for chunk in chunk_list]
|
||||
return [data]
|
||||
|
||||
|
|
@ -63,7 +63,8 @@ class BaseVectorStore(ABC):
|
|||
BaseVectorStore.vector_exists = True
|
||||
return True
|
||||
|
||||
def save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str, source_id: str,
|
||||
def save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str,
|
||||
source_id: str,
|
||||
is_active: bool,
|
||||
embedding: Embeddings):
|
||||
"""
|
||||
|
|
@ -104,7 +105,8 @@ class BaseVectorStore(ABC):
|
|||
break
|
||||
|
||||
@abstractmethod
|
||||
def _save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str, source_id: str,
|
||||
def _save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str,
|
||||
source_id: str,
|
||||
is_active: bool,
|
||||
embedding: Embeddings):
|
||||
pass
|
||||
|
|
|
|||
|
|
@ -3,3 +3,4 @@ from .knowledge import *
|
|||
from .paragraph import *
|
||||
from .problem import *
|
||||
from .tag import *
|
||||
from .knowledge_workflow import *
|
||||
|
|
|
|||
|
|
@ -0,0 +1,155 @@
|
|||
# coding=utf-8
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
from drf_spectacular.utils import extend_schema
|
||||
from rest_framework.request import Request
|
||||
from rest_framework.views import APIView
|
||||
|
||||
from application.api.application_api import SpeechToTextAPI
|
||||
from common.auth import TokenAuth
|
||||
from common.auth.authentication import has_permissions
|
||||
from common.constants.permission_constants import PermissionConstants, RoleConstants, ViewPermission, CompareConstants
|
||||
from common.log.log import log
|
||||
from common.result import result
|
||||
from knowledge.api.knowledge_workflow import KnowledgeWorkflowApi
|
||||
from knowledge.serializers.common import get_knowledge_operation_object
|
||||
from knowledge.serializers.knowledge_workflow import KnowledgeWorkflowSerializer, KnowledgeWorkflowActionSerializer, \
|
||||
KnowledgeWorkflowMcpSerializer
|
||||
|
||||
|
||||
class KnowledgeDatasourceFormListView(APIView):
|
||||
authentication_classes = [TokenAuth]
|
||||
|
||||
def post(self, request: Request, workspace_id: str, knowledge_id: str, type: str, id: str):
|
||||
return result.success(KnowledgeWorkflowSerializer.Datasource(
|
||||
data={'type': type, 'id': id, 'params': request.data, 'function_name': 'get_form_list'}).action())
|
||||
|
||||
|
||||
class KnowledgeDatasourceView(APIView):
|
||||
def post(self, request: Request, workspace_id: str, knowledge_id: str, type: str, id: str, function_name: str):
|
||||
return result.success(KnowledgeWorkflowSerializer.Datasource(
|
||||
data={'type': type, 'id': id, 'params': request.data, 'function_name': function_name}).action())
|
||||
|
||||
|
||||
class KnowledgeWorkflowActionView(APIView):
|
||||
authentication_classes = [TokenAuth]
|
||||
|
||||
def post(self, request: Request, workspace_id: str, knowledge_id: str):
|
||||
return result.success(KnowledgeWorkflowActionSerializer(
|
||||
data={'workspace_id': workspace_id, 'knowledge_id': knowledge_id}).action(request.data, True))
|
||||
|
||||
class Operate(APIView):
|
||||
authentication_classes = [TokenAuth]
|
||||
|
||||
def get(self, request, workspace_id: str, knowledge_id: str, knowledge_action_id: str):
|
||||
return result.success(KnowledgeWorkflowActionSerializer.Operate(
|
||||
data={'workspace_id': workspace_id, 'knowledge_id': knowledge_id, 'id': knowledge_action_id})
|
||||
.one())
|
||||
|
||||
|
||||
class KnowledgeWorkflowView(APIView):
|
||||
authentication_classes = [TokenAuth]
|
||||
|
||||
@extend_schema(
|
||||
methods=['POST'],
|
||||
description=_('Create knowledge workflow'),
|
||||
summary=_('Create knowledge workflow'),
|
||||
operation_id=_('Create knowledge workflow'), # type: ignore
|
||||
parameters=KnowledgeWorkflowApi.get_parameters(),
|
||||
responses=KnowledgeWorkflowApi.get_response(),
|
||||
tags=[_('Knowledge Base')] # type: ignore
|
||||
)
|
||||
@has_permissions(
|
||||
PermissionConstants.KNOWLEDGE_CREATE.get_workspace_permission(),
|
||||
RoleConstants.WORKSPACE_MANAGE.get_workspace_role(), RoleConstants.USER.get_workspace_role()
|
||||
)
|
||||
def post(self, request: Request, workspace_id: str):
|
||||
return result.success(KnowledgeWorkflowSerializer.Create(
|
||||
data={'user_id': request.user.id, 'workspace_id': workspace_id}
|
||||
).save_workflow(request.data))
|
||||
|
||||
class Operate(APIView):
|
||||
authentication_classes = [TokenAuth]
|
||||
|
||||
@extend_schema(
|
||||
methods=['PUT'],
|
||||
description=_('Edit knowledge workflow'),
|
||||
summary=_('Edit knowledge workflow'),
|
||||
operation_id=_('Edit knowledge workflow'), # type: ignore
|
||||
parameters=KnowledgeWorkflowApi.get_parameters(),
|
||||
request=KnowledgeWorkflowApi.get_request(),
|
||||
responses=KnowledgeWorkflowApi.get_response(),
|
||||
tags=[_('Knowledge Base')] # type: ignore
|
||||
)
|
||||
@has_permissions(
|
||||
PermissionConstants.KNOWLEDGE_EDIT.get_workspace_knowledge_permission(),
|
||||
PermissionConstants.KNOWLEDGE_EDIT.get_workspace_permission_workspace_manage_role(),
|
||||
RoleConstants.WORKSPACE_MANAGE.get_workspace_role(),
|
||||
ViewPermission(
|
||||
[RoleConstants.USER.get_workspace_role()],
|
||||
[PermissionConstants.KNOWLEDGE.get_workspace_knowledge_permission()],
|
||||
CompareConstants.AND
|
||||
)
|
||||
)
|
||||
@log(
|
||||
menu='Knowledge Base', operate="Modify knowledge workflow",
|
||||
get_operation_object=lambda r, keywords: get_knowledge_operation_object(keywords.get('knowledge_id')),
|
||||
)
|
||||
def put(self, request: Request, workspace_id: str, knowledge_id: str):
|
||||
return result.success(KnowledgeWorkflowSerializer.Operate(
|
||||
data={'user_id': request.user.id, 'workspace_id': workspace_id, 'knowledge_id': knowledge_id}
|
||||
).edit(request.data))
|
||||
|
||||
@extend_schema(
|
||||
methods=['GET'],
|
||||
description=_('Get knowledge workflow'),
|
||||
summary=_('Get knowledge workflow'),
|
||||
operation_id=_('Get knowledge workflow'), # type: ignore
|
||||
parameters=KnowledgeWorkflowApi.get_parameters(),
|
||||
responses=KnowledgeWorkflowApi.get_response(),
|
||||
tags=[_('Knowledge Base')] # type: ignore
|
||||
)
|
||||
@has_permissions(
|
||||
PermissionConstants.KNOWLEDGE_READ.get_workspace_knowledge_permission(),
|
||||
PermissionConstants.KNOWLEDGE_READ.get_workspace_permission_workspace_manage_role(),
|
||||
RoleConstants.WORKSPACE_MANAGE.get_workspace_role(),
|
||||
ViewPermission(
|
||||
[RoleConstants.USER.get_workspace_role()],
|
||||
[PermissionConstants.KNOWLEDGE.get_workspace_knowledge_permission()],
|
||||
CompareConstants.AND
|
||||
),
|
||||
)
|
||||
def get(self, request: Request, workspace_id: str, knowledge_id: str):
|
||||
return result.success(KnowledgeWorkflowSerializer.Operate(
|
||||
data={'user_id': request.user.id, 'workspace_id': workspace_id, 'knowledge_id': knowledge_id}
|
||||
).one())
|
||||
|
||||
|
||||
class KnowledgeWorkflowVersionView(APIView):
|
||||
pass
|
||||
|
||||
|
||||
class McpServers(APIView):
|
||||
authentication_classes = [TokenAuth]
|
||||
|
||||
@extend_schema(
|
||||
methods=['GET'],
|
||||
description=_("speech to text"),
|
||||
summary=_("speech to text"),
|
||||
operation_id=_("speech to text"), # type: ignore
|
||||
parameters=SpeechToTextAPI.get_parameters(),
|
||||
request=SpeechToTextAPI.get_request(),
|
||||
responses=SpeechToTextAPI.get_response(),
|
||||
tags=[_('Knowledge Base')] # type: ignore
|
||||
)
|
||||
@has_permissions(PermissionConstants.KNOWLEDGE_READ.get_workspace_application_permission(),
|
||||
PermissionConstants.KNOWLEDGE_READ.get_workspace_permission_workspace_manage_role(),
|
||||
ViewPermission([RoleConstants.USER.get_workspace_role()],
|
||||
[PermissionConstants.KNOWLEDGE.get_workspace_application_permission()],
|
||||
CompareConstants.AND),
|
||||
RoleConstants.WORKSPACE_MANAGE.get_workspace_role())
|
||||
def post(self, request: Request, workspace_id, knowledge_id: str):
|
||||
return result.success(KnowledgeWorkflowMcpSerializer(
|
||||
data={'mcp_servers': request.query_params.get('mcp_servers'), 'workspace_id': workspace_id,
|
||||
'user_id': request.user.id,
|
||||
'knowledge_id': knowledge_id}).get_mcp_servers(request.data))
|
||||
|
|
@ -22,8 +22,8 @@ class TorchBlocker:
|
|||
['torch']
|
||||
if
|
||||
i in name.lower()]) > 0:
|
||||
print(f"Disable package is being imported: 【{name}】", file=sys.stderr)
|
||||
pass
|
||||
import types
|
||||
return types.ModuleType(name)
|
||||
else:
|
||||
return self.original_import(name, *args, **kwargs)
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,18 @@
|
|||
# Generated by Django 5.2.8 on 2025-11-17 07:07
|
||||
|
||||
from django.db import migrations, models
|
||||
|
||||
|
||||
class Migration(migrations.Migration):
|
||||
|
||||
dependencies = [
|
||||
('tools', '0003_alter_tool_template_id'),
|
||||
]
|
||||
|
||||
operations = [
|
||||
migrations.AlterField(
|
||||
model_name='tool',
|
||||
name='tool_type',
|
||||
field=models.CharField(choices=[('INTERNAL', '内置'), ('CUSTOM', '自定义'), ('MCP', 'MCP工具'), ('DATA_SOURCE', '数据源')], db_index=True, default='CUSTOM', max_length=20, verbose_name='工具类型'),
|
||||
),
|
||||
]
|
||||
|
|
@ -32,6 +32,7 @@ class ToolType(models.TextChoices):
|
|||
INTERNAL = "INTERNAL", '内置'
|
||||
CUSTOM = "CUSTOM", "自定义"
|
||||
MCP = "MCP", "MCP工具"
|
||||
DATA_SOURCE = "DATA_SOURCE", "数据源"
|
||||
|
||||
|
||||
class Tool(AppModelMixin):
|
||||
|
|
|
|||
|
|
@ -607,6 +607,7 @@ class ToolSerializer(serializers.Serializer):
|
|||
workspace_id=self.data.get('workspace_id'),
|
||||
input_field_list=tool.get('input_field_list'),
|
||||
init_field_list=tool.get('init_field_list', []),
|
||||
tool_type=tool.get('tool_type'),
|
||||
folder_id=folder_id,
|
||||
scope=scope,
|
||||
is_active=False
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import { Result } from '@/request/Result'
|
||||
import { get, post, del, put, exportFile, exportExcel } from '@/request/index'
|
||||
import { type Ref } from 'vue'
|
||||
import type { pageRequest } from '@/api/type/common'
|
||||
import type { Dict, pageRequest } from '@/api/type/common'
|
||||
import type { knowledgeData } from '@/api/type/knowledge'
|
||||
|
||||
import useStore from '@/stores'
|
||||
|
|
@ -196,6 +196,18 @@ const postKnowledge: (data: knowledgeData, loading?: Ref<boolean>) => Promise<Re
|
|||
return post(`${prefix.value}/base`, data, undefined, loading, 1000 * 60 * 5)
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建工作流知识库
|
||||
* @param data
|
||||
* @param loading
|
||||
* @returns
|
||||
*/
|
||||
const createWorkflowKnowledge: (data: any, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
data,
|
||||
loading,
|
||||
) => {
|
||||
return post(`${prefix.value}/workflow`, data, undefined, loading)
|
||||
}
|
||||
/**
|
||||
* 获取当前用户可使用的向量化模型列表 (没用到)
|
||||
* @param application_id
|
||||
|
|
@ -250,7 +262,7 @@ const postLarkKnowledge: (data: any, loading?: Ref<boolean>) => Promise<Result<A
|
|||
const putLarkKnowledge: (
|
||||
knowledge_id: string,
|
||||
data: any,
|
||||
loading?: Ref<boolean>
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, data, loading) => {
|
||||
return put(`${prefix.value}/lark/${knowledge_id}`, data, undefined, loading)
|
||||
}
|
||||
|
|
@ -262,47 +274,115 @@ const getAllTags: (params: any, loading?: Ref<boolean>) => Promise<Result<any>>
|
|||
return get(`${prefix.value}/tags`, params, loading)
|
||||
}
|
||||
|
||||
const getTags: (knowledge_id: string, params: any, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
knowledge_id,
|
||||
params,
|
||||
loading,
|
||||
) => {
|
||||
const getTags: (
|
||||
knowledge_id: string,
|
||||
params: any,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, params, loading) => {
|
||||
return get(`${prefix.value}/${knowledge_id}/tags`, params, loading)
|
||||
}
|
||||
|
||||
const postTags: (knowledge_id: string, tags: any, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
knowledge_id,
|
||||
tags,
|
||||
loading,
|
||||
) => {
|
||||
const postTags: (
|
||||
knowledge_id: string,
|
||||
tags: any,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, tags, loading) => {
|
||||
return post(`${prefix.value}/${knowledge_id}/tags`, tags, null, loading)
|
||||
}
|
||||
|
||||
const putTag: (knowledge_id: string, tag_id: string, tag: any, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
knowledge_id,
|
||||
tag_id,
|
||||
tag,
|
||||
loading,
|
||||
) => {
|
||||
const putTag: (
|
||||
knowledge_id: string,
|
||||
tag_id: string,
|
||||
tag: any,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, tag_id, tag, loading) => {
|
||||
return put(`${prefix.value}/${knowledge_id}/tags/${tag_id}`, tag, null, loading)
|
||||
}
|
||||
|
||||
const delTag: (knowledge_id: string, tag_id: string, type: string, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
knowledge_id,
|
||||
tag_id,
|
||||
type,
|
||||
loading,
|
||||
) => {
|
||||
const delTag: (
|
||||
knowledge_id: string,
|
||||
tag_id: string,
|
||||
type: string,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, tag_id, type, loading) => {
|
||||
return del(`${prefix.value}/${knowledge_id}/tags/${tag_id}/${type}`, null, loading)
|
||||
}
|
||||
|
||||
const delMulTag: (knowledge_id: string, tags: any, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
knowledge_id,
|
||||
tags,
|
||||
loading,
|
||||
) => {
|
||||
const delMulTag: (
|
||||
knowledge_id: string,
|
||||
tags: any,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, tags, loading) => {
|
||||
return put(`${prefix.value}/${knowledge_id}/tags/batch_delete`, tags, null, loading)
|
||||
}
|
||||
const getKnowledgeWorkflowFormList: (
|
||||
knowledge_id: string,
|
||||
type: 'loacl' | 'tool',
|
||||
id: string,
|
||||
node: any,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (
|
||||
knowledge_id: string,
|
||||
type: 'loacl' | 'tool',
|
||||
id: string,
|
||||
node,
|
||||
loading,
|
||||
) => {
|
||||
return post(
|
||||
`${prefix.value}/${knowledge_id}/datasource/${type}/${id}/form_list`,
|
||||
{ node },
|
||||
{},
|
||||
loading,
|
||||
)
|
||||
}
|
||||
const getKnowledgeWorkflowDatasourceDetails: (
|
||||
knowledge_id: string,
|
||||
type: 'loacl' | 'tool',
|
||||
id: string,
|
||||
params: any,
|
||||
function_name: string,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (
|
||||
knowledge_id: string,
|
||||
type: 'loacl' | 'tool',
|
||||
id: string,
|
||||
params,
|
||||
function_name,
|
||||
loading,
|
||||
) => {
|
||||
return post(
|
||||
`${prefix.value}/${knowledge_id}/datasource/${type}/${id}/${function_name}`,
|
||||
params,
|
||||
{},
|
||||
loading,
|
||||
)
|
||||
}
|
||||
const workflowAction: (
|
||||
knowledge_id: string,
|
||||
instance: Dict<any>,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id: string, instance, loading) => {
|
||||
return post(`${prefix.value}/${knowledge_id}/action`, instance, {}, loading)
|
||||
}
|
||||
const getWorkflowAction: (
|
||||
knowledge_id: string,
|
||||
knowledge_action_id: string,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id: string, knowledge_action_id, loading) => {
|
||||
return get(`${prefix.value}/${knowledge_id}/action/${knowledge_action_id}`, {}, loading)
|
||||
}
|
||||
|
||||
/**
|
||||
* mcp 节点
|
||||
*/
|
||||
const getMcpTools: (
|
||||
knowledge_id: string,
|
||||
mcp_servers: any,
|
||||
loading?: Ref<boolean>,
|
||||
) => Promise<Result<any>> = (knowledge_id, mcp_servers, loading) => {
|
||||
return post(`${prefix.value}/${knowledge_id}/mcp_tools`, { mcp_servers }, {}, loading)
|
||||
}
|
||||
|
||||
|
||||
export default {
|
||||
getKnowledgeList,
|
||||
|
|
@ -326,5 +406,11 @@ export default {
|
|||
postTags,
|
||||
putTag,
|
||||
delTag,
|
||||
delMulTag
|
||||
delMulTag,
|
||||
createWorkflowKnowledge,
|
||||
getKnowledgeWorkflowFormList,
|
||||
workflowAction,
|
||||
getWorkflowAction,
|
||||
getKnowledgeWorkflowDatasourceDetails,
|
||||
getMcpTools,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -189,6 +189,19 @@ const postKnowledge: (data: knowledgeData, loading?: Ref<boolean>) => Promise<Re
|
|||
return post(`${prefix}/base`, data, undefined, loading, 1000 * 60 * 5)
|
||||
}
|
||||
|
||||
/**
|
||||
* 创建工作流知识库
|
||||
* @param data
|
||||
* @param loading
|
||||
* @returns
|
||||
*/
|
||||
const createWorkflowKnowledge: (data: any, loading?: Ref<boolean>) => Promise<Result<any>> = (
|
||||
data,
|
||||
loading,
|
||||
) => {
|
||||
return post(`${prefix}/workflow`, data, undefined, loading)
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取当前用户可使用的向量化模型列表(没用到)
|
||||
* @param application_id
|
||||
|
|
@ -313,6 +326,7 @@ export default {
|
|||
postKnowledge,
|
||||
getKnowledgeModel,
|
||||
postWebKnowledge,
|
||||
createWorkflowKnowledge,
|
||||
postLarkKnowledge,
|
||||
putLarkKnowledge,
|
||||
getAllTags,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,7 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M4 2.66671C4 1.93033 4.59695 1.33337 5.33333 1.33337H21.1477C21.3265 1.33337 21.4979 1.4052 21.6232 1.53272L27.8088 7.82592C27.9313 7.9506 28 8.11842 28 8.29324V29.3334C28 30.0698 27.403 30.6667 26.6667 30.6667H5.33333C4.59695 30.6667 4 30.0698 4 29.3334V2.66671Z" fill="#FF8800"/>
|
||||
<path d="M9.33325 11.3333C9.33325 10.9651 9.63173 10.6666 9.99992 10.6666H21.9999C22.3681 10.6666 22.6666 10.9651 22.6666 11.3333V12.6666C22.6666 13.0348 22.3681 13.3333 21.9999 13.3333H9.99992C9.63173 13.3333 9.33325 13.0348 9.33325 12.6666V11.3333Z" fill="white"/>
|
||||
<path d="M14.6666 16.6666C14.6666 16.2984 14.9651 16 15.3333 16H21.9999C22.3681 16 22.6666 16.2984 22.6666 16.6666V23.3333C22.6666 23.7015 22.3681 24 21.9999 24H15.3333C14.9651 24 14.6666 23.7015 14.6666 23.3333V16.6666Z" fill="white"/>
|
||||
<path d="M9.99992 16C9.63173 16 9.33325 16.2984 9.33325 16.6666V23.3333C9.33325 23.7015 9.63173 24 9.99992 24H11.3333C11.7014 24 11.9999 23.7015 11.9999 23.3333V16.6666C11.9999 16.2984 11.7014 16 11.3333 16H9.99992Z" fill="white"/>
|
||||
<path d="M21.3333 1.33337C21.4421 1.36494 21.5422 1.42405 21.6231 1.50637L27.8087 7.79957C27.8593 7.85109 27.9008 7.90998 27.932 7.97369H22.8468C22.0109 7.97369 21.3333 7.29607 21.3333 6.46017V1.33337Z" fill="#D97400"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.3 KiB |
|
|
@ -0,0 +1,7 @@
|
|||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M3.18415 4.04678C3.99587 3.49862 4.93815 3.33337 5.56813 3.33337H12.5C12.0397 3.33337 11.6666 3.70647 11.6666 4.16671C11.6666 4.62694 12.0397 5.00004 12.5 5.00004H5.56813C5.1754 5.00004 4.58359 5.11283 4.1169 5.428C3.69621 5.71209 3.33329 6.18316 3.33329 7.06392C3.33329 7.94608 3.69781 8.42805 4.12306 8.72081C4.59223 9.04379 5.18345 9.16132 5.56813 9.16132H14.4977C15.1331 9.16132 16.0633 9.3331 16.86 9.89052C17.7011 10.4789 18.3333 11.4598 18.3333 12.9079C18.3333 14.3555 17.7016 15.3385 16.8623 15.9298C16.0665 16.4905 15.1361 16.6667 14.4977 16.6667H7.49996C7.9602 16.6667 8.33329 16.2936 8.33329 15.8334C8.33329 15.3731 7.9602 15 7.49996 15H14.4977C14.8819 15 15.4527 14.8841 15.9024 14.5673C16.3085 14.2812 16.6666 13.8014 16.6666 12.9079C16.6666 12.0149 16.3091 11.5391 15.9046 11.2562C15.4559 10.9423 14.8849 10.828 14.4977 10.828H5.56813C4.9301 10.828 3.98723 10.6507 3.17799 10.0936C2.32484 9.50629 1.66663 8.52289 1.66663 7.06392C1.66663 5.60354 2.32643 4.62601 3.18415 4.04678Z" fill="white"/>
|
||||
<path d="M7.49996 15C7.03972 15 6.66663 15.3731 6.66663 15.8334C6.66663 16.2936 7.03972 16.6667 7.49996 16.6667H2.49996C2.03972 16.6667 1.66663 16.2936 1.66663 15.8334C1.66663 15.3731 2.03972 15 2.49996 15H7.49996Z" fill="white"/>
|
||||
<path d="M12.5 5.00004C12.9602 5.00004 13.3333 4.62694 13.3333 4.16671C13.3333 3.70647 12.9602 3.33337 12.5 3.33337H17.4545C17.9147 3.33337 18.2878 3.70647 18.2878 4.16671C18.2878 4.62694 17.9147 5.00004 17.4545 5.00004H12.5Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M12.5 3.33329C12.0398 3.33329 11.6667 3.70639 11.6667 4.16663C11.6667 4.62686 12.0398 4.99996 12.5 4.99996C12.9602 4.99996 13.3333 4.62686 13.3333 4.16663C13.3333 3.70639 12.9602 3.33329 12.5 3.33329ZM10 4.16663C10 2.78591 11.1193 1.66663 12.5 1.66663C13.8807 1.66663 15 2.78591 15 4.16663C15 5.54734 13.8807 6.66663 12.5 6.66663C11.1193 6.66663 10 5.54734 10 4.16663Z" fill="white"/>
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M7.5 15C7.03976 15 6.66667 15.3731 6.66667 15.8334C6.66667 16.2936 7.03976 16.6667 7.5 16.6667C7.96024 16.6667 8.33333 16.2936 8.33333 15.8334C8.33333 15.3731 7.96024 15 7.5 15ZM5 15.8334C5 14.4527 6.11929 13.3334 7.5 13.3334C8.88071 13.3334 10 14.4527 10 15.8334C10 17.2141 8.88071 18.3334 7.5 18.3334C6.11929 18.3334 5 17.2141 5 15.8334Z" fill="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.4 KiB |
|
|
@ -0,0 +1,5 @@
|
|||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M2.08337 4.58331C2.08337 4.91162 2.28814 5.23671 2.68599 5.54002C3.08384 5.84334 3.66698 6.11893 4.40211 6.35108C5.13724 6.58323 6.00997 6.76738 6.97046 6.89301C7.93096 7.01865 8.96041 7.08331 10 7.08331C11.0397 7.08331 12.0691 7.01865 13.0296 6.89301C13.9901 6.76738 14.8628 6.58323 15.598 6.35108C16.3331 6.11893 16.9162 5.84334 17.3141 5.54002C17.7119 5.23671 17.9167 4.91162 17.9167 4.58331C17.9167 3.92027 17.0826 3.28439 15.598 2.81555C14.1133 2.34671 12.0997 2.08331 10 2.08331C7.90041 2.08331 5.88678 2.34671 4.40211 2.81555C2.91745 3.28439 2.08337 3.92027 2.08337 4.58331Z" fill="white"/>
|
||||
<path d="M10 13.75C6.52088 13.75 3.56671 13.0417 2.50421 12.0583C2.46911 12.0244 2.42481 12.0016 2.37683 11.9928C2.32885 11.9839 2.27932 11.9894 2.23443 12.0085C2.18954 12.0276 2.15127 12.0595 2.12441 12.1002C2.09754 12.141 2.08327 12.1887 2.08337 12.2375V15.4167C2.08337 16.7958 5.62921 17.9167 10 17.9167C14.3709 17.9167 17.9167 16.7958 17.9167 15.4167V12.2375C17.9167 12.0167 17.6584 11.9083 17.4959 12.0583C16.4334 13.0417 13.4792 13.75 10 13.75Z" fill="white"/>
|
||||
<path d="M10 8.33332C6.52088 8.33332 3.56671 7.62499 2.50421 6.64165C2.46911 6.60776 2.42481 6.58496 2.37683 6.57609C2.32885 6.56723 2.27932 6.57269 2.23443 6.59181C2.18954 6.61092 2.15127 6.64283 2.12441 6.68356C2.09754 6.72429 2.08327 6.77203 2.08337 6.82082V9.99998C2.08337 11.3792 5.62921 12.5 10 12.5C14.3709 12.5 17.9167 11.3792 17.9167 9.99998V6.82082C17.9167 6.59998 17.6584 6.49165 17.4959 6.64165C16.4334 7.62499 13.4792 8.33332 10 8.33332Z" fill="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.6 KiB |
|
Before Width: | Height: | Size: 2.5 KiB After Width: | Height: | Size: 2.5 KiB |
|
Before Width: | Height: | Size: 764 B After Width: | Height: | Size: 764 B |
|
Before Width: | Height: | Size: 2.2 KiB After Width: | Height: | Size: 2.2 KiB |
|
Before Width: | Height: | Size: 908 B After Width: | Height: | Size: 908 B |
|
|
@ -0,0 +1,9 @@
|
|||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M5.83325 7.08329C5.83325 6.85317 6.0198 6.66663 6.24992 6.66663H13.7499C13.98 6.66663 14.1666 6.85317 14.1666 7.08329V7.91663C14.1666 8.14674 13.98 8.33329 13.7499 8.33329H6.24992C6.0198 8.33329 5.83325 8.14674 5.83325 7.91663V7.08329Z" fill="white"/>
|
||||
<path d="M9.16659 10.4166C9.16659 10.1865 9.35313 9.99996 9.58325 9.99996H13.7499C13.98 9.99996 14.1666 10.1865 14.1666 10.4166V14.5833C14.1666 14.8134 13.98 15 13.7499 15H9.58325C9.35313 15 9.16659 14.8134 9.16659 14.5833V10.4166Z" fill="white"/>
|
||||
<path d="M6.24992 9.99996C6.0198 9.99996 5.83325 10.1865 5.83325 10.4166V14.5833C5.83325 14.8134 6.0198 15 6.24992 15H7.08325C7.31337 15 7.49992 14.8134 7.49992 14.5833V10.4166C7.49992 10.1865 7.31337 9.99996 7.08325 9.99996H6.24992Z" fill="white"/>
|
||||
<path d="M2.5 1.66671C2.5 1.20647 2.8731 0.833374 3.33333 0.833374H13.2173C13.3291 0.833374 13.4362 0.878265 13.5145 0.957964L17.3805 4.89122C17.4571 4.96914 17.5 5.07403 17.5 5.18329V18.3334C17.5 18.7936 17.1269 19.1667 16.6667 19.1667H3.33333C2.8731 19.1667 2.5 18.7936 2.5 18.3334V1.66671Z" fill="#FFC60A"/>
|
||||
<path d="M10.3252 7.90699C10.1584 7.69848 9.84126 7.69848 9.67446 7.90699L7.20805 10.99C6.9898 11.2628 7.18404 11.667 7.53342 11.667H9.16649V13.3336H10.8332V11.667H12.4662C12.8156 11.667 13.0098 11.2628 12.7916 10.99L10.3252 7.90699Z" fill="white"/>
|
||||
<path d="M9.16649 15.0003V14.167H10.8332V15.0003H9.16649Z" fill="white"/>
|
||||
<path d="M13.3333 0.833374C13.4013 0.8531 13.4638 0.890044 13.5144 0.941494L17.3804 4.87474C17.4121 4.90695 17.438 4.94375 17.4575 4.98357H14.2792C13.7568 4.98357 13.3333 4.56006 13.3333 4.03762V0.833374Z" fill="#D9A808"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.7 KiB |
|
|
@ -0,0 +1,5 @@
|
|||
<svg width="13" height="13" viewBox="0 0 13 13" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M1.04175 1.82292C1.04175 1.67909 1.15834 1.5625 1.30216 1.5625H11.198C11.3418 1.5625 11.4584 1.67909 11.4584 1.82292V2.34375C11.4584 2.48757 11.3418 2.60417 11.198 2.60417H1.30216C1.15834 2.60417 1.04175 2.48757 1.04175 2.34375V1.82292Z" fill="white"/>
|
||||
<path d="M3.64591 5.98958C3.64591 5.84576 3.76251 5.72917 3.90633 5.72917H8.59383C8.73765 5.72917 8.85424 5.84576 8.85424 5.98958V6.51042C8.85424 6.65424 8.73765 6.77083 8.59383 6.77083H3.90633C3.76251 6.77083 3.64591 6.65424 3.64591 6.51042V5.98958Z" fill="white"/>
|
||||
<path d="M1.04175 10.1562C1.04175 10.0124 1.15834 9.89583 1.30216 9.89583H11.198C11.3418 9.89583 11.4584 10.0124 11.4584 10.1562V10.6771C11.4584 10.8209 11.3418 10.9375 11.198 10.9375H1.30216C1.15834 10.9375 1.04175 10.8209 1.04175 10.6771V10.1562Z" fill="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 897 B |
|
|
@ -0,0 +1,5 @@
|
|||
<svg width="13" height="13" viewBox="0 0 13 13" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M3.16961 11.9792H1.56258C1.42445 11.9792 1.29197 11.9243 1.1943 11.8267C1.09662 11.729 1.04175 11.5965 1.04175 11.4584V1.04171C1.04175 0.903574 1.09662 0.771098 1.1943 0.673423C1.29197 0.575748 1.42445 0.520874 1.56258 0.520874H9.89591C10.034 0.520874 10.1665 0.575748 10.2642 0.673423C10.3619 0.771098 10.4167 0.903574 10.4167 1.04171V4.73155C10.4168 4.86964 10.362 5.00208 10.2644 5.09978L3.5381 11.8266C3.48972 11.875 3.43227 11.9134 3.36904 11.9396C3.30581 11.9658 3.23805 11.9792 3.16961 11.9792ZM3.46177 6.17377C3.51061 6.2226 3.57685 6.25004 3.64591 6.25004H6.25008C6.31915 6.25004 6.38538 6.2226 6.43422 6.17377C6.48306 6.12493 6.5105 6.05869 6.5105 5.98962V5.46879C6.5105 5.39972 6.48306 5.33348 6.43422 5.28465C6.38538 5.23581 6.31915 5.20837 6.25008 5.20837H3.64591C3.57685 5.20837 3.51061 5.23581 3.46177 5.28465C3.41293 5.33348 3.3855 5.39972 3.3855 5.46879V5.98962C3.3855 6.05869 3.41293 6.12493 3.46177 6.17377ZM3.46177 3.5696C3.51061 3.61844 3.57685 3.64587 3.64591 3.64587H8.07299C8.10719 3.64587 8.14106 3.63914 8.17265 3.62605C8.20425 3.61296 8.23295 3.59378 8.25714 3.5696C8.28132 3.54542 8.3005 3.51671 8.31359 3.48511C8.32668 3.45352 8.33341 3.41966 8.33341 3.38546V2.86462C8.33341 2.83043 8.32668 2.79656 8.31359 2.76497C8.3005 2.73337 8.28132 2.70466 8.25714 2.68048C8.23295 2.6563 8.20425 2.63712 8.17265 2.62403C8.14106 2.61094 8.10719 2.60421 8.07299 2.60421H3.64591C3.57685 2.60421 3.51061 2.63164 3.46177 2.68048C3.41293 2.72932 3.3855 2.79556 3.3855 2.86462V3.38546C3.3855 3.45452 3.41293 3.52076 3.46177 3.5696Z" fill="white"/>
|
||||
<path d="M10.5977 8.9985L9.12873 7.52981L5.90087 10.7782L5.72925 12.1701C5.72925 12.2392 5.75668 12.3054 5.80552 12.3542C5.85436 12.4031 5.9206 12.4305 5.98966 12.4305L7.38336 12.2087L10.5977 8.9985Z" fill="white"/>
|
||||
<path d="M10.9755 6.41346C10.7724 6.21007 10.4586 6.19445 10.2742 6.3783L9.4956 7.15955L10.9664 8.63033L11.7458 7.8522L11.7705 7.82564C11.9289 7.63918 11.906 7.34387 11.7119 7.1496L10.9755 6.41346Z" fill="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.1 KiB |
|
|
@ -0,0 +1,6 @@
|
|||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path d="M0 10C0 5.28595 0 2.92893 1.46447 1.46447C2.92893 0 5.28595 0 10 0C14.714 0 17.0711 0 18.5355 1.46447C20 2.92893 20 5.28595 20 10C20 14.714 20 17.0711 18.5355 18.5355C17.0711 20 14.714 20 10 20C5.28595 20 2.92893 20 1.46447 18.5355C0 17.0711 0 14.714 0 10Z" fill="#3370FF"/>
|
||||
<path d="M6.91961 15.7292H5.31258C5.17445 15.7292 5.04197 15.6743 4.9443 15.5767C4.84662 15.479 4.79175 15.3465 4.79175 15.2084V4.79171C4.79175 4.65357 4.84662 4.5211 4.9443 4.42342C5.04197 4.32575 5.17445 4.27087 5.31258 4.27087H13.6459C13.784 4.27087 13.9165 4.32575 14.0142 4.42342C14.1119 4.5211 14.1667 4.65357 14.1667 4.79171V8.48155C14.1668 8.61964 14.112 8.75208 14.0144 8.84978L7.2881 15.5766C7.23972 15.625 7.18227 15.6634 7.11904 15.6896C7.05582 15.7158 6.98805 15.7292 6.91961 15.7292ZM7.21177 9.92377C7.26061 9.9726 7.32685 10 7.39591 10H10.0001C10.0691 10 10.1354 9.9726 10.1842 9.92377C10.2331 9.87493 10.2605 9.80869 10.2605 9.73962V9.21879C10.2605 9.14972 10.2331 9.08349 10.1842 9.03465C10.1354 8.98581 10.0691 8.95837 10.0001 8.95837H7.39591C7.32685 8.95837 7.26061 8.98581 7.21177 9.03465C7.16293 9.08349 7.1355 9.14972 7.1355 9.21879V9.73962C7.1355 9.80869 7.16293 9.87493 7.21177 9.92377ZM7.21177 7.3196C7.26061 7.36844 7.32685 7.39587 7.39591 7.39587H11.823C11.8572 7.39587 11.8911 7.38914 11.9227 7.37605C11.9542 7.36296 11.983 7.34378 12.0071 7.3196C12.0313 7.29542 12.0505 7.26671 12.0636 7.23511C12.0767 7.20352 12.0834 7.16966 12.0834 7.13546V6.61462C12.0834 6.58043 12.0767 6.54656 12.0636 6.51497C12.0505 6.48337 12.0313 6.45466 12.0071 6.43048C11.983 6.4063 11.9542 6.38712 11.9227 6.37403C11.8911 6.36094 11.8572 6.35421 11.823 6.35421H7.39591C7.32685 6.35421 7.26061 6.38164 7.21177 6.43048C7.16293 6.47932 7.1355 6.54556 7.1355 6.61462V7.13546C7.1355 7.20452 7.16293 7.27076 7.21177 7.3196Z" fill="white"/>
|
||||
<path d="M14.3477 12.7485L12.8787 11.2798L9.65087 14.5282L9.47925 15.9201C9.47925 15.9892 9.50668 16.0554 9.55552 16.1042C9.60436 16.1531 9.6706 16.1805 9.73966 16.1805L11.1334 15.9587L14.3477 12.7485Z" fill="white"/>
|
||||
<path d="M14.7255 10.1635C14.5224 9.96007 14.2086 9.94445 14.0242 10.1283L13.2456 10.9096L14.7164 12.3803L15.4958 11.6022L15.5205 11.5756C15.6789 11.3892 15.656 11.0939 15.462 10.8996L14.7255 10.1635Z" fill="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.3 KiB |
|
|
@ -27,10 +27,15 @@
|
|||
"
|
||||
>{{ data?.message_tokens + data?.answer_tokens }} tokens</span
|
||||
>
|
||||
<span class="mr-16 color-secondary">{{ data?.run_time?.toFixed(2) || 0.0 }} s</span>
|
||||
<span class="mr-16 color-secondary" v-if="data.status != 202"
|
||||
>{{ data?.run_time?.toFixed(2) || 0.0 }} s</span
|
||||
>
|
||||
<el-icon class="color-success" :size="16" v-if="data.status === 200">
|
||||
<CircleCheck />
|
||||
</el-icon>
|
||||
<el-icon class="is-loading" :size="16" v-else-if="data.status === 202">
|
||||
<Loading />
|
||||
</el-icon>
|
||||
<el-icon class="color-danger" :size="16" v-else>
|
||||
<CircleClose />
|
||||
</el-icon>
|
||||
|
|
@ -224,7 +229,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6 mt-8" v-if="data.type == WorkflowType.AiChat">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.aiChatNode.think') }}
|
||||
{{ $t('views.workflow.nodes.aiChatNode.think') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
{{ data.reasoning_content || '-' }}
|
||||
|
|
@ -666,7 +671,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label') }}
|
||||
{{ $t('views.workflow.nodes.imageGenerateNode.negative_prompt.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
{{ data.negative_prompt || '-' }}
|
||||
|
|
@ -704,7 +709,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label') }}
|
||||
{{ $t('views.workflow.nodes.imageGenerateNode.negative_prompt.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
{{ data.negative_prompt || '-' }}
|
||||
|
|
@ -743,7 +748,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label') }}
|
||||
{{ $t('views.workflow.nodes.imageGenerateNode.negative_prompt.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
{{ data.negative_prompt || '-' }}
|
||||
|
|
@ -751,7 +756,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.label') }}
|
||||
{{ $t('views.workflow.nodes.imageToVideoGenerate.first_frame.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
<div v-if="typeof data.first_frame_url === 'string'">
|
||||
|
|
@ -780,7 +785,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.label') }}
|
||||
{{ $t('views.workflow.nodes.imageToVideoGenerate.last_frame.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
<div v-if="typeof data.last_frame_url === 'string'">
|
||||
|
|
@ -884,7 +889,7 @@
|
|||
<template v-if="data.type === WorkflowType.VariableAggregationNode">
|
||||
<div class="card-never border-r-6">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.variableAggregationNode.Strategy') }}
|
||||
{{ $t('views.workflow.nodes.variableAggregationNode.Strategy') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
{{ data.strategy }}
|
||||
|
|
@ -896,11 +901,12 @@
|
|||
:key="groupI"
|
||||
>
|
||||
<h5 class="p-8-12">
|
||||
{{ group.label+ ' '+ $t('common.param.inputParam') }}
|
||||
{{ group.label + ' ' + $t('common.param.inputParam') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div v-for="(f, i) in group.variable_list" :key="i" class="mb-8">
|
||||
<span class="color-secondary">{{ `${f.node_name}.${f.field}` }}:</span> {{ f.value }}
|
||||
<span class="color-secondary">{{ `${f.node_name}.${f.field}` }}:</span>
|
||||
{{ f.value }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
@ -930,7 +936,7 @@
|
|||
</div>
|
||||
<div class="card-never border-r-6">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.mcpNode.toolParam') }}
|
||||
{{ $t('views.workflow.nodes.mcpNode.toolParam') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div v-for="(value, name) in data.tool_params" :key="name" class="mb-8">
|
||||
|
|
@ -952,19 +958,19 @@
|
|||
<!-- 循环 节点 -->
|
||||
<div class="card-never border-r-6" v-if="data.type === WorkflowType.LoopNode">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopNode.loopSetting') }}
|
||||
{{ $t('views.workflow.nodes.loopNode.loopSetting') }}
|
||||
</h5>
|
||||
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div class="mb-8">
|
||||
<span class="color-secondary">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopNode.loopType.label') }}:</span
|
||||
{{ $t('views.workflow.nodes.loopNode.loopType.label') }}:</span
|
||||
>
|
||||
{{ data.loop_type || '-' }}
|
||||
</div>
|
||||
<div>
|
||||
<span class="color-secondary">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopNode.loopArray.label') }}:</span
|
||||
{{ $t('views.workflow.nodes.loopNode.loopArray.label') }}:</span
|
||||
>
|
||||
{{
|
||||
data.loop_type === 'NUMBER'
|
||||
|
|
@ -974,7 +980,7 @@
|
|||
</div>
|
||||
</div>
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopNode.loopDetail') }}
|
||||
{{ $t('views.workflow.nodes.loopNode.loopDetail') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<template v-if="data.type === WorkflowType.LoopNode">
|
||||
|
|
@ -1004,14 +1010,14 @@
|
|||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div class="mb-8">
|
||||
<span class="color-secondary">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopStartNode.loopItem') }}:</span
|
||||
{{ $t('views.workflow.nodes.loopStartNode.loopItem') }}:</span
|
||||
>
|
||||
|
||||
{{ data.current_item }}
|
||||
</div>
|
||||
<div class="mb-8">
|
||||
<span class="color-secondary">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopStartNode.loopIndex') }}:</span
|
||||
{{ $t('views.workflow.nodes.loopStartNode.loopIndex') }}:</span
|
||||
>
|
||||
|
||||
{{ data.current_index }}
|
||||
|
|
@ -1029,7 +1035,7 @@
|
|||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div class="mb-8">
|
||||
<span class="color-secondary">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopContinueNode.isContinue') }}:</span
|
||||
{{ $t('views.workflow.nodes.loopContinueNode.isContinue') }}:</span
|
||||
>
|
||||
|
||||
{{ data.is_continue }}
|
||||
|
|
@ -1047,7 +1053,7 @@
|
|||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div class="mb-8">
|
||||
<span class="color-secondary">
|
||||
{{ $t('views.applicationWorkflow.nodes.loopBreakNode.isBreak') }}:</span
|
||||
{{ $t('views.workflow.nodes.loopBreakNode.isBreak') }}:</span
|
||||
>
|
||||
|
||||
{{ data.is_break }}
|
||||
|
|
|
|||
|
|
@ -1,75 +1,73 @@
|
|||
<template>
|
||||
<el-scrollbar>
|
||||
<div class="execution-details p-8">
|
||||
<div v-if="isWorkFlow(props.appType)">
|
||||
<template v-for="(item, index) in arraySort(props.detail ?? [], 'index')" :key="index">
|
||||
<ExecutionDetailCard :data="item"> </ExecutionDetailCard>
|
||||
</template>
|
||||
</div>
|
||||
|
||||
<template v-else>
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.paragraphSource.question') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<span class="mb-8">user: {{ problem }}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div v-if="paddedProblem" class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.paragraphSource.questionPadded') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<span class="mb-8">user: {{ paddedProblem }}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div v-if="system" class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.application.form.roleSettings.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<span class="mb-8">{{ system }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.history') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div v-for="(msg, index) in historyRecord" :key="index">
|
||||
<span>{{ msg.role }}: </span>
|
||||
<span>{{ msg.content }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.executionDetails.currentChat') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div class="mb-8">{{ $t('chat.executionDetails.knowedMessage') }}:</div>
|
||||
<div v-for="(msg, index) in currentChat" :key="index">
|
||||
<span>{{ msg.content }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.executionDetails.answer') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div v-for="(msg, index) in AiResponse" :key="index">
|
||||
<span>{{ msg.content }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="execution-details">
|
||||
<div v-if="isWorkFlow(props.appType)">
|
||||
<template v-for="(item, index) in arraySort(props.detail ?? [], 'index')" :key="index">
|
||||
<ExecutionDetailCard :data="item"> </ExecutionDetailCard>
|
||||
</template>
|
||||
</div>
|
||||
</el-scrollbar>
|
||||
|
||||
<template v-else>
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.paragraphSource.question') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<span class="mb-8">user: {{ problem }}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div v-if="paddedProblem" class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.paragraphSource.questionPadded') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<span class="mb-8">user: {{ paddedProblem }}</span>
|
||||
</div>
|
||||
</div>
|
||||
<div v-if="system" class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.application.form.roleSettings.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<span class="mb-8">{{ system }}</span>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.history') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div v-for="(msg, index) in historyRecord" :key="index">
|
||||
<span>{{ msg.role }}: </span>
|
||||
<span>{{ msg.content }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.executionDetails.currentChat') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div class="mb-8">{{ $t('chat.executionDetails.knowedMessage') }}:</div>
|
||||
<div v-for="(msg, index) in currentChat" :key="index">
|
||||
<span>{{ msg.content }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<div class="card-never border-r-6 mb-12">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.executionDetails.answer') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<div v-for="(msg, index) in AiResponse" :key="index">
|
||||
<span>{{ msg.content }}</span>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
</div>
|
||||
</template>
|
||||
<script setup lang="ts">
|
||||
import { ref, computed } from 'vue'
|
||||
|
|
@ -145,13 +143,9 @@ const AiResponse = computed(() => {
|
|||
const messages = messageList.value?.filter((item: any) => item.role != 'system')
|
||||
return messages.slice(messages.length - 1, messages.length)
|
||||
})
|
||||
|
||||
|
||||
</script>
|
||||
<style lang="scss" scoped>
|
||||
.execution-details {
|
||||
max-height: calc(100vh - 260px);
|
||||
|
||||
.arrow-icon {
|
||||
transition: 0.2s;
|
||||
}
|
||||
|
|
|
|||