diff --git a/apps/application/flow/common.py b/apps/application/flow/common.py index 2ba944875..65aaa399d 100644 --- a/apps/application/flow/common.py +++ b/apps/application/flow/common.py @@ -6,7 +6,7 @@ @date:2024/12/11 17:57 @desc: """ - +from enum import Enum from typing import List, Dict from django.db.models import QuerySet @@ -90,6 +90,16 @@ class EdgeNode: self.node = node +class WorkflowMode(Enum): + APPLICATION = "application" + + APPLICATION_LOOP = "application-loop" + + KNOWLEDGE = "knowledge" + + KNOWLEDGE_LOOP = "knowledge-loop" + + class Workflow: """ 节点列表 @@ -112,7 +122,10 @@ class Workflow: """ next_node_map: Dict[str, List[EdgeNode]] - def __init__(self, nodes: List[Node], edges: List[Edge]): + workflow_mode: WorkflowMode + + def __init__(self, nodes: List[Node], edges: List[Edge], + workflow_mode: WorkflowMode = WorkflowMode.APPLICATION.value): self.nodes = nodes self.edges = edges self.node_map = {node.id: node for node in nodes} @@ -125,6 +138,7 @@ class Workflow: self.next_node_map = {key: [EdgeNode(edge, self.node_map.get(edge.targetNodeId)) for edge in edges] for key, edges in group_by(edges, key=lambda edge: edge.sourceNodeId).items()} + self.workflow_mode = workflow_mode def get_node(self, node_id): """ @@ -167,13 +181,13 @@ class Workflow: return [en.node for en in self.next_node_map.get(node_id, [])] @staticmethod - def new_instance(flow_obj: Dict): + def new_instance(flow_obj: Dict, workflow_mode: WorkflowMode = WorkflowMode.APPLICATION): nodes = flow_obj.get('nodes') edges = flow_obj.get('edges') nodes = [Node(node.get('id'), node.get('type'), **node) for node in nodes] edges = [Edge(edge.get('id'), edge.get('type'), **edge) for edge in edges] - return Workflow(nodes, edges) + return Workflow(nodes, edges, workflow_mode) def get_start_node(self): return self.get_node('start-node') @@ -190,10 +204,9 @@ class Workflow: self.is_valid_base_node() self.is_valid_work_flow() - @staticmethod - def is_valid_node_params(node: Node): + def is_valid_node_params(self, node: Node): from application.flow.step_node import get_node - get_node(node.type)(node, None, None) + get_node(node.type, self.workflow_mode)(node, None, None) def is_valid_node(self, node: Node): self.is_valid_node_params(node) diff --git a/apps/application/flow/i_step_node.py b/apps/application/flow/i_step_node.py index 23822b6d2..e963b74ae 100644 --- a/apps/application/flow/i_step_node.py +++ b/apps/application/flow/i_step_node.py @@ -21,6 +21,7 @@ from application.flow.common import Answer, NodeChunk from application.models import ApplicationChatUserStats from application.models import ChatRecord, ChatUserType from common.field.common import InstanceField +from knowledge.models.knowledge_action import KnowledgeAction, State chat_cache = cache @@ -78,7 +79,8 @@ class WorkFlowPostHandler: message_tokens=message_tokens, answer_tokens=answer_tokens, answer_text_list=answer_text_list, - run_time=time.time() - workflow.context['start_time'], + run_time=time.time() - workflow.context.get('start_time') if workflow.context.get( + 'start_time') is not None else 0, index=0) self.chat_info.append_chat_record(chat_record) @@ -97,6 +99,16 @@ class WorkFlowPostHandler: self.chat_info = None +class KnowledgeWorkflowPostHandler(WorkFlowPostHandler): + def __init__(self, chat_info, knowledge_action_id): + super().__init__(chat_info) + self.knowledge_action_id = knowledge_action_id + + def handler(self, workflow): + QuerySet(KnowledgeAction).filter(id=self.knowledge_action_id).update( + state=State.SUCCESS) + + class NodeResult: def __init__(self, node_variable: Dict, workflow_variable: Dict, _write_context=write_context, _is_interrupt=is_interrupt): @@ -153,6 +165,14 @@ class FlowParamsSerializer(serializers.Serializer): debug = serializers.BooleanField(required=True, label="是否debug") +class KnowledgeFlowParamsSerializer(serializers.Serializer): + knowledge_id = serializers.UUIDField(required=True, label="知识库id") + workspace_id = serializers.CharField(required=True, label="工作空间id") + knowledge_action_id = serializers.UUIDField(required=True, label="知识库任务执行器id") + data_source = serializers.DictField(required=True, label="数据源") + knowledge_base = serializers.DictField(required=False, label="知识库设置") + + class INode: view_type = 'many_view' @@ -165,7 +185,8 @@ class INode: return None reasoning_content_enable = self.context.get('model_setting', {}).get('reasoning_content_enable', False) return [ - Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {}, + Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params.get('chat_record_id'), + {}, self.runtime_node_id, self.context.get('reasoning_content', '') if reasoning_content_enable else '')] def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None, @@ -222,13 +243,14 @@ class INode: pass def get_flow_params_serializer_class(self) -> Type[serializers.Serializer]: - return FlowParamsSerializer + return self.workflow_manage.get_params_serializer_class() def get_write_error_context(self, e): self.status = 500 self.answer_text = str(e) self.err_message = str(e) - self.context['run_time'] = time.time() - self.context['start_time'] + current_time = time.time() + self.context['run_time'] = current_time - (self.context.get('start_time') or current_time) def write_error_context(answer, status=200): pass diff --git a/apps/application/flow/knowledge_loop_workflow_manage.py b/apps/application/flow/knowledge_loop_workflow_manage.py new file mode 100644 index 000000000..fbda319c1 --- /dev/null +++ b/apps/application/flow/knowledge_loop_workflow_manage.py @@ -0,0 +1,15 @@ +# coding=utf-8 +""" + @project: maxkb + @Author:虎 + @file: workflow_manage.py + @date:2024/1/9 17:40 + @desc: +""" +from application.flow.i_step_node import KnowledgeFlowParamsSerializer +from application.flow.loop_workflow_manage import LoopWorkflowManage + + +class KnowledgeLoopWorkflowManage(LoopWorkflowManage): + def get_params_serializer_class(self): + return KnowledgeFlowParamsSerializer diff --git a/apps/application/flow/knowledge_workflow_manage.py b/apps/application/flow/knowledge_workflow_manage.py new file mode 100644 index 000000000..07a184df6 --- /dev/null +++ b/apps/application/flow/knowledge_workflow_manage.py @@ -0,0 +1,102 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎虎 + @file: Knowledge_workflow_manage.py + @date:2025/11/13 19:02 + @desc: +""" +import traceback +from concurrent.futures import ThreadPoolExecutor + +from django.db.models import QuerySet +from django.utils.translation import get_language + +from application.flow.common import Workflow +from application.flow.i_step_node import WorkFlowPostHandler, KnowledgeFlowParamsSerializer +from application.flow.workflow_manage import WorkflowManage +from common.handle.base_to_response import BaseToResponse +from common.handle.impl.response.system_to_response import SystemToResponse +from knowledge.models.knowledge_action import KnowledgeAction, State + +executor = ThreadPoolExecutor(max_workers=200) + + +class KnowledgeWorkflowManage(WorkflowManage): + + def __init__(self, flow: Workflow, + params, + work_flow_post_handler: WorkFlowPostHandler, + base_to_response: BaseToResponse = SystemToResponse(), + start_node_id=None, + start_node_data=None, chat_record=None, child_node=None): + super().__init__(flow, params, work_flow_post_handler, base_to_response, None, None, None, + None, + None, None, start_node_id, start_node_data, chat_record, child_node) + + def get_params_serializer_class(self): + return KnowledgeFlowParamsSerializer + + def get_start_node(self): + start_node_list = [node for node in self.flow.nodes if + self.params.get('data_source', {}).get('node_id') == node.id] + return start_node_list[0] + + def run(self): + executor.submit(self._run) + + def _run(self): + QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update( + state=State.STARTED) + language = get_language() + self.run_chain_async(self.start_node, None, language) + while self.is_run(): + pass + self.work_flow_post_handler.handler(self) + + @staticmethod + def get_node_details(current_node, node, index): + if current_node == node: + return { + 'name': node.node.properties.get('stepName'), + "index": index, + 'run_time': 0, + 'type': node.type, + 'status': 202, + 'err_message': "" + } + + return node.get_details(index) + + def run_chain(self, current_node, node_result_future=None): + QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update( + details=self.get_runtime_details(lambda node, index: self.get_node_details(current_node, node, index))) + if node_result_future is None: + node_result_future = self.run_node_future(current_node) + try: + result = self.hand_node_result(current_node, node_result_future) + return result + except Exception as e: + traceback.print_exc() + return None + + def hand_node_result(self, current_node, node_result_future): + try: + current_result = node_result_future.result() + result = current_result.write_context(current_node, self) + if result is not None: + # 阻塞获取结果 + list(result) + return current_result + except Exception as e: + traceback.print_exc() + self.status = 500 + current_node.get_write_error_context(e) + self.answer += str(e) + QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update( + details=self.get_runtime_details(), + state=State.FAILURE) + finally: + current_node.node_chunk.end() + QuerySet(KnowledgeAction).filter(id=self.params.get('knowledge_action_id')).update( + details=self.get_runtime_details()) diff --git a/apps/application/flow/loop_workflow_manage.py b/apps/application/flow/loop_workflow_manage.py index cba38d320..bf01e7606 100644 --- a/apps/application/flow/loop_workflow_manage.py +++ b/apps/application/flow/loop_workflow_manage.py @@ -105,10 +105,10 @@ class LoopWorkflowManage(WorkflowManage): get_node_params=lambda node: node.properties.get('node_data')): for node in self.flow.nodes: if node.id == node_id: - node_instance = get_node(node.type)(node, - self.params, self, up_node_id_list, - get_node_params, - salt=self.get_index()) + node_instance = get_node(node.type, self.flow.workflow_mode)(node, + self.params, self, up_node_id_list, + get_node_params, + salt=self.get_index()) return node_instance return None @@ -116,7 +116,7 @@ class LoopWorkflowManage(WorkflowManage): close_old_connections() language = get_language() self.run_chain_async(self.start_node, None, language) - return self.await_result() + return self.await_result(is_cleanup=False) def get_index(self): return self.loop_params.get('index') diff --git a/apps/application/flow/step_node/__init__.py b/apps/application/flow/step_node/__init__.py index ffb0c7828..523794868 100644 --- a/apps/application/flow/step_node/__init__.py +++ b/apps/application/flow/step_node/__init__.py @@ -9,6 +9,8 @@ from .ai_chat_step_node import * from .application_node import BaseApplicationNode from .condition_node import * +from .data_source_local_node.impl.base_data_source_local_node import BaseDataSourceLocalNode +from .data_source_web_node.impl.base_data_source_web_node import BaseDataSourceWebNode from .direct_reply_node import * from .document_extract_node import * from .form_node import * @@ -16,6 +18,7 @@ from .image_generate_step_node import * from .image_to_video_step_node import BaseImageToVideoNode from .image_understand_step_node import * from .intent_node import * +from .knowledge_write_node.impl.base_knowledge_write_node import BaseKnowledgeWriteNode from .loop_break_node import BaseLoopBreakNode from .loop_continue_node import BaseLoopContinueNode from .loop_node import * @@ -36,6 +39,7 @@ from .variable_aggregation_node.impl.base_variable_aggregation_node import BaseV from .variable_assign_node import BaseVariableAssignNode from .variable_splitting_node import BaseVariableSplittingNode from .video_understand_step_node import BaseVideoUnderstandNode +from .document_split_node import BaseDocumentSplitNode node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseSearchDocumentNode, BaseQuestionNode, BaseConditionNode, BaseReplyNode, @@ -46,11 +50,11 @@ node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseSearc BaseVideoUnderstandNode, BaseIntentNode, BaseLoopNode, BaseLoopStartStepNode, BaseLoopContinueNode, - BaseLoopBreakNode, BaseVariableSplittingNode, BaseParameterExtractionNode, BaseVariableAggregationNode] + BaseLoopBreakNode, BaseVariableSplittingNode, BaseParameterExtractionNode, BaseVariableAggregationNode, + BaseDataSourceLocalNode, BaseDataSourceWebNode, BaseKnowledgeWriteNode, BaseDocumentSplitNode] + +node_map = {n.type: {w: n for w in n.support} for n in node_list} -def get_node(node_type): - find_list = [node for node in node_list if node.type == node_type] - if len(find_list) > 0: - return find_list[0] - return None +def get_node(node_type, workflow_model): + return node_map.get(node_type).get(workflow_model) diff --git a/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py b/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py index 21e23f167..edb21890e 100644 --- a/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py +++ b/apps/application/flow/step_node/ai_chat_step_node/i_chat_node.py @@ -11,6 +11,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -34,7 +35,8 @@ class ChatNodeSerializer(serializers.Serializer): mcp_enable = serializers.BooleanField(required=False, label=_("Whether to enable MCP")) mcp_servers = serializers.JSONField(required=False, label=_("MCP Server")) mcp_tool_id = serializers.CharField(required=False, allow_blank=True, allow_null=True, label=_("MCP Tool ID")) - mcp_tool_ids = serializers.ListField(child=serializers.UUIDField(), required=False, allow_empty=True, label=_("MCP Tool IDs"), ) + mcp_tool_ids = serializers.ListField(child=serializers.UUIDField(), required=False, allow_empty=True, + label=_("MCP Tool IDs"), ) mcp_source = serializers.CharField(required=False, allow_blank=True, allow_null=True, label=_("MCP Source")) tool_enable = serializers.BooleanField(required=False, default=False, label=_("Whether to enable tools")) @@ -42,14 +44,22 @@ class ChatNodeSerializer(serializers.Serializer): label=_("Tool IDs"), ) mcp_output_enable = serializers.BooleanField(required=False, default=True, label=_("Whether to enable MCP output")) + class IChatNode(INode): type = 'ai-chat-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP, + WorkflowMode.KNOWLEDGE] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ChatNodeSerializer def _run(self): - return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + **{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None}) + else: + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id, diff --git a/apps/application/flow/step_node/application_node/i_application_node.py b/apps/application/flow/step_node/application_node/i_application_node.py index 5a4ea6e51..b41151ee3 100644 --- a/apps/application/flow/step_node/application_node/i_application_node.py +++ b/apps/application/flow/step_node/application_node/i_application_node.py @@ -3,6 +3,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from django.utils.translation import gettext_lazy as _ @@ -25,6 +26,7 @@ class ApplicationNodeSerializer(serializers.Serializer): class IApplicationNode(INode): type = 'application-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ApplicationNodeSerializer diff --git a/apps/application/flow/step_node/condition_node/i_condition_node.py b/apps/application/flow/step_node/condition_node/i_condition_node.py index 1bd541b4c..9dec6b0c6 100644 --- a/apps/application/flow/step_node/condition_node/i_condition_node.py +++ b/apps/application/flow/step_node/condition_node/i_condition_node.py @@ -11,6 +11,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode @@ -36,3 +37,5 @@ class IConditionNode(INode): return ConditionNodeParamsSerializer type = 'condition-node' + + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP] diff --git a/apps/application/flow/step_node/data_source_local_node/__init__.py b/apps/application/flow/step_node/data_source_local_node/__init__.py new file mode 100644 index 000000000..bbf804a70 --- /dev/null +++ b/apps/application/flow/step_node/data_source_local_node/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎虎 + @file: __init__.py.py + @date:2025/11/11 10:06 + @desc: +""" diff --git a/apps/application/flow/step_node/data_source_local_node/i_data_source_local_node.py b/apps/application/flow/step_node/data_source_local_node/i_data_source_local_node.py new file mode 100644 index 000000000..e6b39f686 --- /dev/null +++ b/apps/application/flow/step_node/data_source_local_node/i_data_source_local_node.py @@ -0,0 +1,42 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎虎 + @file: i_data_source_local_node.py + @date:2025/11/11 10:06 + @desc: +""" +from abc import abstractmethod +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.common import WorkflowMode +from application.flow.i_step_node import INode, NodeResult + + +class DataSourceLocalNodeParamsSerializer(serializers.Serializer): + file_type_list = serializers.ListField(child=serializers.CharField(label=('')), label='') + file_size_limit = serializers.IntegerField(required=True, label=_("Number of uploaded files")) + file_count_limit = serializers.IntegerField(required=True, label=_("Upload file size")) + + +class IDataSourceLocalNode(INode): + type = 'data-source-local-node' + + @staticmethod + @abstractmethod + def get_form_list(node): + pass + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return DataSourceLocalNodeParamsSerializer + + def _run(self): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, file_type_list, file_size_limit, file_count_limit, **kwargs) -> NodeResult: + pass + + support = [WorkflowMode.KNOWLEDGE] diff --git a/apps/application/flow/step_node/data_source_local_node/impl/__init__.py b/apps/application/flow/step_node/data_source_local_node/impl/__init__.py new file mode 100644 index 000000000..6f8301519 --- /dev/null +++ b/apps/application/flow/step_node/data_source_local_node/impl/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎虎 + @file: __init__.py.py + @date:2025/11/11 10:08 + @desc: +""" diff --git a/apps/application/flow/step_node/data_source_local_node/impl/base_data_source_local_node.py b/apps/application/flow/step_node/data_source_local_node/impl/base_data_source_local_node.py new file mode 100644 index 000000000..b43263501 --- /dev/null +++ b/apps/application/flow/step_node/data_source_local_node/impl/base_data_source_local_node.py @@ -0,0 +1,51 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎虎 + @file: base_data_source_local_node.py + @date:2025/11/11 10:30 + @desc: +""" +from application.flow.i_step_node import NodeResult +from application.flow.step_node.data_source_local_node.i_data_source_local_node import IDataSourceLocalNode +from common import forms +from common.forms import BaseForm + + +class BaseDataSourceLocalNodeForm(BaseForm): + api_key = forms.PasswordInputField('API Key', required=True) + + +class BaseDataSourceLocalNode(IDataSourceLocalNode): + def save_context(self, details, workflow_manage): + pass + + @staticmethod + def get_form_list(node): + node_data = node.get('properties').get('node_data') + return [{ + 'field': 'file_list', + 'input_type': 'LocalFileUpload', + 'attrs': { + 'file_count_limit': node_data.get('file_count_limit') or 10, + 'file_size_limit': node_data.get('file_size_limit') or 100, + 'file_type_list': node_data.get('file_type_list'), + }, + 'label': '', + }] + + def execute(self, file_type_list, file_size_limit, file_count_limit, **kwargs) -> NodeResult: + return NodeResult({'file_list': self.workflow_manage.params.get('data_source', {}).get('file_list')}, + self.workflow_manage.params.get('knowledge_base') or {}) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'file_list': self.context.get('file_list'), + 'knowledge_base': self.workflow_params.get('knowledge_base'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/data_source_web_node/__init__.py b/apps/application/flow/step_node/data_source_web_node/__init__.py new file mode 100644 index 000000000..461bab6fc --- /dev/null +++ b/apps/application/flow/step_node/data_source_web_node/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: __init__.py.py + @date:2025/11/12 13:43 + @desc: +""" diff --git a/apps/application/flow/step_node/data_source_web_node/i_data_source_web_node.py b/apps/application/flow/step_node/data_source_web_node/i_data_source_web_node.py new file mode 100644 index 000000000..ee5dc990b --- /dev/null +++ b/apps/application/flow/step_node/data_source_web_node/i_data_source_web_node.py @@ -0,0 +1,28 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: i_data_source_web_node.py + @date:2025/11/12 13:47 + @desc: +""" +from abc import abstractmethod + +from application.flow.common import WorkflowMode +from application.flow.i_step_node import INode, NodeResult + + +class IDataSourceWebNode(INode): + type = 'data-source-web-node' + support = [WorkflowMode.KNOWLEDGE] + + @staticmethod + @abstractmethod + def get_form_list(node): + pass + + def _run(self): + return self.execute(**self.flow_params_serializer.data) + + def execute(self, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/data_source_web_node/impl/__init__.py b/apps/application/flow/step_node/data_source_web_node/impl/__init__.py new file mode 100644 index 000000000..b7541b12d --- /dev/null +++ b/apps/application/flow/step_node/data_source_web_node/impl/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: __init__.py + @date:2025/11/12 13:44 + @desc: +""" diff --git a/apps/application/flow/step_node/data_source_web_node/impl/base_data_source_web_node.py b/apps/application/flow/step_node/data_source_web_node/impl/base_data_source_web_node.py new file mode 100644 index 000000000..5943b3c3e --- /dev/null +++ b/apps/application/flow/step_node/data_source_web_node/impl/base_data_source_web_node.py @@ -0,0 +1,86 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: base_data_source_web_node.py + @date:2025/11/12 13:47 + @desc: +""" +import traceback + +from django.utils.translation import gettext_lazy as _ + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.data_source_web_node.i_data_source_web_node import IDataSourceWebNode +from common import forms +from common.forms import BaseForm +from common.utils.fork import ForkManage, Fork, ChildLink +from common.utils.logger import maxkb_logger + + +class BaseDataSourceWebNodeForm(BaseForm): + source_url = forms.TextInputField('source url', required=True) + selector = forms.TextInputField('knowledge selector', required=False,default_value="body") + + +def get_collect_handler(): + results = [] + + def handler(child_link: ChildLink, response: Fork.Response): + if response.status == 200: + try: + document_name = child_link.tag.text if child_link.tag is not None and len( + child_link.tag.text.strip()) > 0 else child_link.url + results.append({ + "name": document_name.strip(), + "content": response.content, + }) + except Exception as e: + maxkb_logger.error(f'{str(e)}:{traceback.format_exc()}') + + return handler,results + + +class BaseDataSourceWebNode(IDataSourceWebNode): + def save_context(self, details, workflow_manage): + pass + + @staticmethod + def get_form_list(node): + return BaseDataSourceWebNodeForm().to_form_list() + + def execute(self, **kwargs) -> NodeResult: + BaseDataSourceWebNodeForm().valid_form(self.workflow_params.get("data_source")) + + data_source = self.workflow_params.get("data_source") + + node_id = data_source.get("node_id") + source_url = data_source.get("source_url") + selector = data_source.get("selector") or "body" + + collect_handler, document_list = get_collect_handler() + + try: + ForkManage(source_url,selector.split(" ") if selector is not None else []).fork(3,set(),collect_handler) + + return NodeResult({'document_list': document_list}, + self.workflow_manage.params.get('knowledge_base') or {}) + + except Exception as e: + maxkb_logger.error(_('data source web node:{node_id} error{error}{traceback}').format( + knowledge_id=node_id, error=str(e), traceback=traceback.format_exc())) + + + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'input_params': {"source_url": self.context.get("source_url"),"selector": self.context.get('selector')}, + 'output_params': self.context.get('document_list'), + 'knowledge_base': self.workflow_params.get('knowledge_base'), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/direct_reply_node/i_reply_node.py b/apps/application/flow/step_node/direct_reply_node/i_reply_node.py index acb9262bd..c1646a054 100644 --- a/apps/application/flow/step_node/direct_reply_node/i_reply_node.py +++ b/apps/application/flow/step_node/direct_reply_node/i_reply_node.py @@ -10,6 +10,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from common.exception.app_exception import AppApiException @@ -38,12 +39,19 @@ class ReplyNodeParamsSerializer(serializers.Serializer): class IReplyNode(INode): type = 'reply-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP, + WorkflowMode.KNOWLEDGE] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ReplyNodeParamsSerializer def _run(self): - return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + **{'stream': True}) + else: + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py b/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py index e907220ca..eca4856dd 100644 --- a/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py +++ b/apps/application/flow/step_node/document_extract_node/i_document_extract_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -14,7 +15,8 @@ class DocumentExtractNodeSerializer(serializers.Serializer): class IDocumentExtractNode(INode): type = 'document-extract-node' - + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP, + WorkflowMode.KNOWLEDGE] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return DocumentExtractNodeSerializer @@ -23,5 +25,5 @@ class IDocumentExtractNode(INode): self.node_params_serializer.data.get('document_list')[1:]) return self.execute(document=res, **self.flow_params_serializer.data) - def execute(self, document, chat_id, **kwargs) -> NodeResult: + def execute(self, document, chat_id=None, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py b/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py index 80bab8688..b3004685a 100644 --- a/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py +++ b/apps/application/flow/step_node/document_extract_node/impl/base_document_extract_node.py @@ -1,39 +1,14 @@ # coding=utf-8 +import ast import io -import mimetypes -from django.core.files.uploadedfile import InMemoryUploadedFile +import uuid_utils.compat as uuid from django.db.models import QuerySet from application.flow.i_step_node import NodeResult from application.flow.step_node.document_extract_node.i_document_extract_node import IDocumentExtractNode from knowledge.models import File, FileSourceType from knowledge.serializers.document import split_handles, parse_table_handle_list, FileBufferHandle -from oss.serializers.file import FileSerializer - - -def bytes_to_uploaded_file(file_bytes, file_name="file.txt"): - content_type, _ = mimetypes.guess_type(file_name) - if content_type is None: - # 如果未能识别,设置为默认的二进制文件类型 - content_type = "application/octet-stream" - # 创建一个内存中的字节流对象 - file_stream = io.BytesIO(file_bytes) - - # 获取文件大小 - file_size = len(file_bytes) - - # 创建 InMemoryUploadedFile 对象 - uploaded_file = InMemoryUploadedFile( - file=file_stream, - field_name=None, - name=file_name, - content_type=content_type, - size=file_size, - charset=None, - ) - return uploaded_file - splitter = '\n`-----------------------------------`\n' @@ -42,35 +17,69 @@ class BaseDocumentExtractNode(IDocumentExtractNode): def save_context(self, details, workflow_manage): self.context['content'] = details.get('content') - def execute(self, document, chat_id, **kwargs): + def execute(self, document, chat_id=None, **kwargs): get_buffer = FileBufferHandle().get_buffer self.context['document_list'] = document content = [] if document is None or not isinstance(document, list): - return NodeResult({'content': ''}, {}) + return NodeResult({'content': '', 'document_list': []}, {}) - application = self.workflow_manage.work_flow_post_handler.chat_info.application + # 安全获取 application + application_id = None + if (self.workflow_manage and + self.workflow_manage.work_flow_post_handler and + self.workflow_manage.work_flow_post_handler.chat_info): + application_id = self.workflow_manage.work_flow_post_handler.chat_info.application.id + knowledge_id = self.workflow_params.get('knowledge_id') # doc文件中的图片保存 def save_image(image_list): for image in image_list: meta = { - 'debug': False if application.id else True, + 'debug': False if (application_id or knowledge_id) else True, 'chat_id': chat_id, - 'application_id': str(application.id) if application.id else None, + 'application_id': str(application_id) if application_id else None, + 'knowledge_id': str(knowledge_id) if knowledge_id else None, 'file_id': str(image.id) } file_bytes = image.meta.pop('content') - f = bytes_to_uploaded_file(file_bytes, image.file_name) - FileSerializer(data={ - 'file': f, - 'meta': meta, - 'source_id': meta['application_id'], - 'source_type': FileSourceType.APPLICATION.value - }).upload() + new_file = File( + id=meta['file_id'], + file_name=image.file_name, + file_size=len(file_bytes), + source_type=FileSourceType.APPLICATION.value if meta[ + 'application_id'] else FileSourceType.KNOWLEDGE.value, + source_id=meta['application_id'] if meta['application_id'] else meta['knowledge_id'], + meta=meta + ) + new_file.save(file_bytes) + document_list = [] for doc in document: + if 'file_bytes' in doc: + file_bytes = doc['file_bytes'] + # 如果是字符串,转换为字节 + if isinstance(file_bytes, str): + file_bytes = ast.literal_eval(file_bytes) + doc['file_id'] = doc.get('file_id') or uuid.uuid7() + meta = { + 'debug': False if (application_id or knowledge_id) else True, + 'chat_id': chat_id, + 'application_id': str(application_id) if application_id else None, + 'knowledge_id': str(knowledge_id) if knowledge_id else None, + 'file_id': str(doc['file_id']) + } + new_file = File( + id=doc['file_id'], + file_name=doc['name'], + file_size=len(file_bytes), + source_type=FileSourceType.APPLICATION.value if meta[ + 'application_id'] else FileSourceType.KNOWLEDGE.value, + source_id=meta['application_id'] if meta['application_id'] else meta['knowledge_id'], + meta={} + ) + new_file.save(file_bytes) file = QuerySet(File).filter(id=doc['file_id']).first() buffer = io.BytesIO(file.get_bytes()) buffer.name = doc['name'] # this is the important line @@ -81,9 +90,10 @@ class BaseDocumentExtractNode(IDocumentExtractNode): buffer.seek(0) file_content = split_handle.get_content(buffer, save_image) content.append('### ' + doc['name'] + '\n' + file_content) + document_list.append({'id': file.id, 'name': doc['name'], 'content': file_content}) break - return NodeResult({'content': splitter.join(content)}, {}) + return NodeResult({'content': splitter.join(content), 'document_list': document_list}, {}) def get_details(self, index: int, **kwargs): content = self.context.get('content', '').split(splitter) diff --git a/apps/application/flow/step_node/document_split_node/__init__.py b/apps/application/flow/step_node/document_split_node/__init__.py new file mode 100644 index 000000000..ce8f10f3e --- /dev/null +++ b/apps/application/flow/step_node/document_split_node/__init__.py @@ -0,0 +1 @@ +from .impl import * \ No newline at end of file diff --git a/apps/application/flow/step_node/document_split_node/i_document_split_node.py b/apps/application/flow/step_node/document_split_node/i_document_split_node.py new file mode 100644 index 000000000..9a6327365 --- /dev/null +++ b/apps/application/flow/step_node/document_split_node/i_document_split_node.py @@ -0,0 +1,64 @@ +# coding=utf-8 + +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.common import WorkflowMode +from application.flow.i_step_node import INode, NodeResult + + +class DocumentSplitNodeSerializer(serializers.Serializer): + document_list = serializers.ListField(required=False, label=_("document list")) + split_strategy = serializers.ChoiceField( + choices=['auto', 'custom', 'qa'], required=False, label=_("split strategy"), default='auto' + ) + paragraph_title_relate_problem_type = serializers.ChoiceField( + choices=['custom', 'referencing'], required=False, label=_("paragraph title relate problem type"), + default='custom' + ) + paragraph_title_relate_problem = serializers.BooleanField( + required=False, label=_("paragraph title relate problem"), default=False + ) + paragraph_title_relate_problem_reference = serializers.ListField( + required=False, label=_("paragraph title relate problem reference"), child=serializers.CharField(), default=[] + ) + document_name_relate_problem_type = serializers.ChoiceField( + choices=['custom', 'referencing'], required=False, label=_("document name relate problem type"), + default='custom' + ) + document_name_relate_problem = serializers.BooleanField( + required=False, label=_("document name relate problem"), default=False + ) + document_name_relate_problem_reference = serializers.ListField( + required=False, label=_("document name relate problem reference"), child=serializers.CharField(), default=[] + ) + limit = serializers.IntegerField(required=False, label=_("limit"), default=4096) + patterns = serializers.ListField( + required=False, label=_("patterns"), child=serializers.CharField(), default=[] + ) + with_filter = serializers.BooleanField( + required=False, label=_("with filter"), default=False + ) + + +class IDocumentSplitNode(INode): + type = 'document-split-node' + support = [ + WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP, WorkflowMode.KNOWLEDGE + ] + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return DocumentSplitNodeSerializer + + def _run(self): + # res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('file_list')[0], + # self.node_params_serializer.data.get('file_list')[1:]) + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, document_list, knowledge_id, split_strategy, paragraph_title_relate_problem_type, + paragraph_title_relate_problem, paragraph_title_relate_problem_reference, + document_name_relate_problem_type, document_name_relate_problem, + document_name_relate_problem_reference, limit, patterns, with_filter, **kwargs) -> NodeResult: + pass diff --git a/apps/application/flow/step_node/document_split_node/impl/__init__.py b/apps/application/flow/step_node/document_split_node/impl/__init__.py new file mode 100644 index 000000000..cc7dc7dda --- /dev/null +++ b/apps/application/flow/step_node/document_split_node/impl/__init__.py @@ -0,0 +1 @@ +from .base_document_split_node import BaseDocumentSplitNode diff --git a/apps/application/flow/step_node/document_split_node/impl/base_document_split_node.py b/apps/application/flow/step_node/document_split_node/impl/base_document_split_node.py new file mode 100644 index 000000000..ec4055ce0 --- /dev/null +++ b/apps/application/flow/step_node/document_split_node/impl/base_document_split_node.py @@ -0,0 +1,140 @@ +# coding=utf-8 +import io +import mimetypes +from typing import List + +from django.core.files.uploadedfile import InMemoryUploadedFile + +from application.flow.i_step_node import NodeResult +from application.flow.step_node.document_split_node.i_document_split_node import IDocumentSplitNode +from knowledge.serializers.document import default_split_handle, FileBufferHandle + + +def bytes_to_uploaded_file(file_bytes, file_name="file.txt"): + content_type, _ = mimetypes.guess_type(file_name) + if content_type is None: + # 如果未能识别,设置为默认的二进制文件类型 + content_type = "application/octet-stream" + # 创建一个内存中的字节流对象 + file_stream = io.BytesIO(file_bytes) + + # 获取文件大小 + file_size = len(file_bytes) + + # 创建 InMemoryUploadedFile 对象 + uploaded_file = InMemoryUploadedFile( + file=file_stream, + field_name=None, + name=file_name, + content_type=content_type, + size=file_size, + charset=None, + ) + return uploaded_file + + +class BaseDocumentSplitNode(IDocumentSplitNode): + def save_context(self, details, workflow_manage): + self.context['content'] = details.get('content') + + def get_reference_content(self, fields: List[str]): + return self.workflow_manage.get_reference_field(fields[0], fields[1:]) + + def execute(self, document_list, knowledge_id, split_strategy, paragraph_title_relate_problem_type, + paragraph_title_relate_problem, paragraph_title_relate_problem_reference, + document_name_relate_problem_type, document_name_relate_problem, + document_name_relate_problem_reference, limit, patterns, with_filter, **kwargs) -> NodeResult: + self.context['knowledge_id'] = knowledge_id + file_list = self.workflow_manage.get_reference_field(document_list[0], document_list[1:]) + paragraph_list = [] + + for doc in file_list: + get_buffer = FileBufferHandle().get_buffer + + file_mem = bytes_to_uploaded_file(doc['content'].encode('utf-8')) + result = default_split_handle.handle(file_mem, patterns, with_filter, limit, get_buffer, self._save_image) + # 统一处理结果为列表 + results = result if isinstance(result, list) else [result] + + for item in results: + self._process_split_result( + item, knowledge_id, doc.get('id'), doc.get('name'), + split_strategy, paragraph_title_relate_problem_type, + paragraph_title_relate_problem, paragraph_title_relate_problem_reference, + document_name_relate_problem_type, document_name_relate_problem, + document_name_relate_problem_reference + ) + + paragraph_list += results + + self.context['paragraph_list'] = paragraph_list + + return NodeResult({'paragraph_list': paragraph_list}, {}) + + def _save_image(self, image_list): + pass + + def _process_split_result( + self, item, knowledge_id, source_file_id, file_name, + split_strategy, paragraph_title_relate_problem_type, + paragraph_title_relate_problem, paragraph_title_relate_problem_reference, + document_name_relate_problem_type, document_name_relate_problem, + document_name_relate_problem_reference + ): + """处理文档分割结果""" + item['meta'] = { + 'knowledge_id': knowledge_id, + 'source_file_id': source_file_id, + 'source_url': file_name, + } + item['name'] = file_name + item['paragraphs'] = item.pop('content', []) + + for paragraph in item['paragraphs']: + paragraph['problem_list'] = self._generate_problem_list( + paragraph, file_name, + split_strategy, paragraph_title_relate_problem_type, + paragraph_title_relate_problem, paragraph_title_relate_problem_reference, + document_name_relate_problem_type, document_name_relate_problem, + document_name_relate_problem_reference + ) + paragraph['is_active'] = True + + def _generate_problem_list( + self, paragraph, document_name, split_strategy, paragraph_title_relate_problem_type, + paragraph_title_relate_problem, paragraph_title_relate_problem_reference, + document_name_relate_problem_type, document_name_relate_problem, + document_name_relate_problem_reference + ): + if paragraph_title_relate_problem_type == 'referencing': + paragraph_title_relate_problem = self.get_reference_content(paragraph_title_relate_problem_reference) + if document_name_relate_problem_type == 'referencing': + document_name_relate_problem = self.get_reference_content(document_name_relate_problem_reference) + + problem_list = [] + if split_strategy == 'auto': + if paragraph_title_relate_problem and paragraph.get('title'): + problem_list.append(paragraph.get('title')) + if document_name_relate_problem and document_name: + problem_list.append(document_name) + elif split_strategy == 'custom': + if paragraph_title_relate_problem: + problem_list.extend(paragraph_title_relate_problem) + if document_name_relate_problem: + problem_list.extend(document_name_relate_problem) + elif split_strategy == 'qa': + if document_name_relate_problem and document_name: + problem_list.append(document_name) + + return problem_list + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'status': self.status, + 'err_message': self.err_message, + 'paragraph_list': self.context.get('paragraph_list', []), + } diff --git a/apps/application/flow/step_node/form_node/i_form_node.py b/apps/application/flow/step_node/form_node/i_form_node.py index 552434dbf..a47a75ddc 100644 --- a/apps/application/flow/step_node/form_node/i_form_node.py +++ b/apps/application/flow/step_node/form_node/i_form_node.py @@ -10,6 +10,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from django.utils.translation import gettext_lazy as _ @@ -24,6 +25,7 @@ class FormNodeParamsSerializer(serializers.Serializer): class IFormNode(INode): type = 'form-node' view_type = 'single_view' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return FormNodeParamsSerializer diff --git a/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py b/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py index 5ea2afad2..a3528d5fb 100644 --- a/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py +++ b/apps/application/flow/step_node/image_generate_step_node/i_image_generate_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -31,14 +32,21 @@ class ImageGenerateNodeSerializer(serializers.Serializer): class IImageGenerateNode(INode): type = 'image-generate-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ImageGenerateNodeSerializer def _run(self): - return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + **{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None}) + else: + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, model_params_setting, chat_record_id, **kwargs) -> NodeResult: diff --git a/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py b/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py index bfa78fc9c..27419b056 100644 --- a/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py +++ b/apps/application/flow/step_node/image_generate_step_node/impl/base_image_generate_node.py @@ -5,6 +5,7 @@ from typing import List import requests from langchain_core.messages import BaseMessage, HumanMessage, AIMessage +from application.flow.common import WorkflowMode from application.flow.i_step_node import NodeResult from application.flow.step_node.image_generate_step_node.i_image_generate_node import IImageGenerateNode from common.utils.common import bytes_to_uploaded_file @@ -20,11 +21,10 @@ class BaseImageGenerateNode(IImageGenerateNode): if self.node_params.get('is_result', False): self.answer_text = details.get('answer') - def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, model_params_setting, chat_record_id, **kwargs) -> NodeResult: - application = self.workflow_manage.work_flow_post_handler.chat_info.application workspace_id = self.workflow_manage.get_body().get('workspace_id') tti_model = get_model_instance_by_model_workspace_id(model_id, workspace_id, **model_params_setting) @@ -44,17 +44,7 @@ class BaseImageGenerateNode(IImageGenerateNode): if isinstance(image_url, str) and image_url.startswith('http'): image_url = requests.get(image_url).content file = bytes_to_uploaded_file(image_url, file_name) - meta = { - 'debug': False if application.id else True, - 'chat_id': chat_id, - 'application_id': str(application.id) if application.id else None, - } - file_url = FileSerializer(data={ - 'file': file, - 'meta': meta, - 'source_id': meta['application_id'], - 'source_type': FileSourceType.APPLICATION.value - }).upload() + file_url = self.upload_file(file) file_urls.append(file_url) self.context['image_list'] = [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls] answer = ' '.join([f"![Image]({path})" for path in file_urls]) @@ -101,6 +91,42 @@ class BaseImageGenerateNode(IImageGenerateNode): question ] + def upload_file(self, file): + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.upload_knowledge_file(file) + return self.upload_application_file(file) + + def upload_knowledge_file(self, file): + knowledge_id = self.workflow_params.get('knowledge_id') + meta = { + 'debug': False, + 'knowledge_id': knowledge_id, + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': knowledge_id, + 'source_type': FileSourceType.KNOWLEDGE.value + }).upload() + return file_url + + def upload_application_file(self, file): + application = self.workflow_manage.work_flow_post_handler.chat_info.application + chat_id = self.workflow_params.get('chat_id') + meta = { + 'debug': False if application.id else True, + 'chat_id': chat_id, + 'application_id': str(application.id) if application.id else None, + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': meta['application_id'], + 'source_type': FileSourceType.APPLICATION.value + }).upload() + return file_url + @staticmethod def reset_message_list(message_list: List[BaseMessage], answer_text): result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for diff --git a/apps/application/flow/step_node/image_to_video_step_node/i_image_to_video_node.py b/apps/application/flow/step_node/image_to_video_step_node/i_image_to_video_node.py index 5c408f6d4..8e4aa9f0d 100644 --- a/apps/application/flow/step_node/image_to_video_step_node/i_image_to_video_node.py +++ b/apps/application/flow/step_node/image_to_video_step_node/i_image_to_video_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -34,7 +35,8 @@ class ImageToVideoNodeSerializer(serializers.Serializer): class IImageToVideoNode(INode): type = 'image-to-video-node' - + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ImageToVideoNodeSerializer @@ -53,10 +55,15 @@ class IImageToVideoNode(INode): self.node_params_serializer.data.get('last_frame_url')[1:]) node_params_data = {k: v for k, v in self.node_params_serializer.data.items() if k not in ['first_frame_url', 'last_frame_url']} - return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url, + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url, **node_params_data, **self.flow_params_serializer.data, + **{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None}) + else: + return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url, **node_params_data, **self.flow_params_serializer.data) - def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, model_params_setting, chat_record_id, first_frame_url, last_frame_url, diff --git a/apps/application/flow/step_node/image_to_video_step_node/impl/base_image_to_video_node.py b/apps/application/flow/step_node/image_to_video_step_node/impl/base_image_to_video_node.py index aa146cea2..88eb406b4 100644 --- a/apps/application/flow/step_node/image_to_video_step_node/impl/base_image_to_video_node.py +++ b/apps/application/flow/step_node/image_to_video_step_node/impl/base_image_to_video_node.py @@ -7,6 +7,7 @@ import requests from django.db.models import QuerySet from langchain_core.messages import BaseMessage, HumanMessage, AIMessage +from application.flow.common import WorkflowMode from application.flow.i_step_node import NodeResult from application.flow.step_node.image_to_video_step_node.i_image_to_video_node import IImageToVideoNode from common.utils.common import bytes_to_uploaded_file @@ -23,12 +24,11 @@ class BaseImageToVideoNode(IImageToVideoNode): if self.node_params.get('is_result', False): self.answer_text = details.get('answer') - def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, model_params_setting, chat_record_id, first_frame_url, last_frame_url=None, **kwargs) -> NodeResult: - application = self.workflow_manage.work_flow_post_handler.chat_info.application workspace_id = self.workflow_manage.get_body().get('workspace_id') ttv_model = get_model_instance_by_model_workspace_id(model_id, workspace_id, **model_params_setting) @@ -54,17 +54,7 @@ class BaseImageToVideoNode(IImageToVideoNode): if isinstance(video_urls, str) and video_urls.startswith('http'): video_urls = requests.get(video_urls).content file = bytes_to_uploaded_file(video_urls, file_name) - meta = { - 'debug': False if application.id else True, - 'chat_id': chat_id, - 'application_id': str(application.id) if application.id else None, - } - file_url = FileSerializer(data={ - 'file': file, - 'meta': meta, - 'source_id': meta['application_id'], - 'source_type': FileSourceType.APPLICATION.value - }).upload() + file_url = self.upload_file(file) video_label = f'' video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}] return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list, @@ -88,6 +78,42 @@ class BaseImageToVideoNode(IImageToVideoNode): raise ValueError( gettext("Failed to obtain the image")) + def upload_file(self, file): + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.upload_knowledge_file(file) + return self.upload_application_file(file) + + def upload_knowledge_file(self, file): + knowledge_id = self.workflow_params.get('knowledge_id') + meta = { + 'debug': False, + 'knowledge_id': knowledge_id + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': knowledge_id, + 'source_type': FileSourceType.KNOWLEDGE.value + }).upload() + return file_url + + def upload_application_file(self, file): + application = self.workflow_manage.work_flow_post_handler.chat_info.application + chat_id = self.workflow_params.get('chat_id') + meta = { + 'debug': False if application.id else True, + 'chat_id': chat_id, + 'application_id': str(application.id) if application.id else None, + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': meta['application_id'], + 'source_type': FileSourceType.APPLICATION.value + }).upload() + return file_url + def generate_history_ai_message(self, chat_record): for val in chat_record.details.values(): if self.node.id == val['node_id'] and 'image_list' in val: diff --git a/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py b/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py index 1803fbea3..c7dd99ccb 100644 --- a/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py +++ b/apps/application/flow/step_node/image_understand_step_node/i_image_understand_node.py @@ -4,6 +4,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from django.utils.translation import gettext_lazy as _ @@ -30,6 +31,7 @@ class ImageUnderstandNodeSerializer(serializers.Serializer): class IImageUnderstandNode(INode): type = 'image-understand-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ImageUnderstandNodeSerializer diff --git a/apps/application/flow/step_node/intent_node/i_intent_node.py b/apps/application/flow/step_node/intent_node/i_intent_node.py index 0c48be9b9..d609359d4 100644 --- a/apps/application/flow/step_node/intent_node/i_intent_node.py +++ b/apps/application/flow/step_node/intent_node/i_intent_node.py @@ -5,11 +5,11 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult class IntentBranchSerializer(serializers.Serializer): - id = serializers.CharField(required=True, label=_("Branch id")) content = serializers.CharField(required=True, label=_("content")) isOther = serializers.BooleanField(required=True, label=_("Branch Type")) @@ -24,8 +24,12 @@ class IntentNodeSerializer(serializers.Serializer): label=_("Model parameter settings")) branch = IntentBranchSerializer(many=True) + class IIntentNode(INode): type = 'intent-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] + def save_context(self, details, workflow_manage): pass @@ -37,10 +41,15 @@ class IIntentNode(INode): self.node_params_serializer.data.get('content_list')[0], self.node_params_serializer.data.get('content_list')[1:], ) - - return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, user_input=str(question)) - + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + **{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None, + 'user_input': str(question)}) + else: + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + user_input=str(question)) def execute(self, model_id, dialogue_number, history_chat_record, user_input, branch, model_params_setting=None, **kwargs) -> NodeResult: - pass \ No newline at end of file + pass diff --git a/apps/application/flow/step_node/knowledge_write_node/__init__.py b/apps/application/flow/step_node/knowledge_write_node/__init__.py new file mode 100644 index 000000000..ea50569d5 --- /dev/null +++ b/apps/application/flow/step_node/knowledge_write_node/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: __init__.py.py + @date:2025/11/13 11:17 + @desc: +""" \ No newline at end of file diff --git a/apps/application/flow/step_node/knowledge_write_node/i_knowledge_write_node.py b/apps/application/flow/step_node/knowledge_write_node/i_knowledge_write_node.py new file mode 100644 index 000000000..b3a27a1df --- /dev/null +++ b/apps/application/flow/step_node/knowledge_write_node/i_knowledge_write_node.py @@ -0,0 +1,43 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: i_knowledge_write_node.py + @date:2025/11/13 11:19 + @desc: +""" +from typing import Type + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.common import WorkflowMode +from application.flow.i_step_node import INode, NodeResult + + +class KnowledgeWriteNodeParamSerializer(serializers.Serializer): + document_list = serializers.ListField(required=True, child=serializers.CharField(required=True), allow_null=True, + label=_('document list')) + + +class IKnowledgeWriteNode(INode): + + def save_context(self, details, workflow_manage): + pass + + def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: + return KnowledgeWriteNodeParamSerializer + + def _run(self): + documents = self.workflow_manage.get_reference_field( + self.node_params_serializer.data.get('document_list')[0], + self.node_params_serializer.data.get('document_list')[1:], + ) + + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, documents=documents) + + def execute(self, documents, **kwargs) -> NodeResult: + pass + + type = 'knowledge-write-node' + support = [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP] diff --git a/apps/application/flow/step_node/knowledge_write_node/impl/__init__.py b/apps/application/flow/step_node/knowledge_write_node/impl/__init__.py new file mode 100644 index 000000000..077d74325 --- /dev/null +++ b/apps/application/flow/step_node/knowledge_write_node/impl/__init__.py @@ -0,0 +1,8 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: __init__.py.py + @date:2025/11/13 11:18 + @desc: +""" diff --git a/apps/application/flow/step_node/knowledge_write_node/impl/base_knowledge_write_node.py b/apps/application/flow/step_node/knowledge_write_node/impl/base_knowledge_write_node.py new file mode 100644 index 000000000..9fface6c4 --- /dev/null +++ b/apps/application/flow/step_node/knowledge_write_node/impl/base_knowledge_write_node.py @@ -0,0 +1,213 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:niu + @file: base_knowledge_write_node.py + @date:2025/11/13 11:19 + @desc: +""" +from functools import reduce +from typing import Dict, List +import uuid_utils.compat as uuid +from django.db.models import QuerySet +from django.db.models.aggregates import Max + +from rest_framework import serializers +from django.utils.translation import gettext_lazy as _ +from application.flow.i_step_node import NodeResult +from application.flow.step_node.knowledge_write_node.i_knowledge_write_node import IKnowledgeWriteNode +from common.chunk import text_to_chunk +from common.utils.common import bulk_create_in_batches +from knowledge.models import Document, KnowledgeType, Paragraph, File, FileSourceType, Problem, ProblemParagraphMapping +from knowledge.serializers.common import ProblemParagraphObject, ProblemParagraphManage + + +class ParagraphInstanceSerializer(serializers.Serializer): + content = serializers.CharField(required=True, label=_('content'), max_length=102400, min_length=1, allow_null=True, + allow_blank=True) + title = serializers.CharField(required=False, max_length=256, label=_('section title'), allow_null=True, + allow_blank=True) + problem_list = serializers.ListField(required=False, child=serializers.CharField(required=False, allow_blank=True)) + is_active = serializers.BooleanField(required=False, label=_('Is active')) + chunks = serializers.ListField(required=False, child=serializers.CharField(required=True)) + + +class KnowledgeWriteParamSerializer(serializers.Serializer): + name = serializers.CharField(required=True, label=_('document name'), max_length=128, min_length=1, + source=_('document name')) + meta = serializers.DictField(required=False) + paragraphs = ParagraphInstanceSerializer(required=False, many=True, allow_null=True) + + +def convert_uuid_to_str(obj): + if isinstance(obj, dict): + return {k: convert_uuid_to_str(v) for k, v in obj.items()} + elif isinstance(obj, list): + return [convert_uuid_to_str(i) for i in obj] + elif isinstance(obj, uuid.UUID): + return str(obj) + else: + return obj + +def link_file(source_file_id, document_id): + if source_file_id is None: + return + source_file = QuerySet(File).filter(id=source_file_id).first() + if source_file: + file_content = source_file.get_bytes() + + new_file = File( + id=uuid.uuid7(), + file_name=source_file.file_name, + file_size=source_file.file_size, + source_type=FileSourceType.DOCUMENT, + source_id=document_id, # 更新为当前知识库ID + meta=source_file.meta.copy() if source_file.meta else {} + ) + + # 保存文件内容和元数据 + new_file.save(file_content) + +def get_paragraph_problem_model(knowledge_id: str, document_id: str, instance: Dict): + paragraph = Paragraph( + id=uuid.uuid7(), + document_id=document_id, + content=instance.get("content"), + knowledge_id=knowledge_id, + title=instance.get("title") if 'title' in instance else '', + chunks = instance.get('chunks') if 'chunks' in instance else text_to_chunk(instance.get("content")), + ) + + problem_paragraph_object_list = [ProblemParagraphObject( + knowledge_id, document_id, str(paragraph.id), problem + ) for problem in (instance.get('problem_list') if 'problem_list' in instance else [])] + + return { + 'paragraph': paragraph, + 'problem_paragraph_object_list': problem_paragraph_object_list, + } + + +def get_paragraph_model(document_model, paragraph_list: List): + knowledge_id = document_model.knowledge_id + paragraph_model_dict_list = [ + get_paragraph_problem_model(knowledge_id, document_model.id, paragraph) + for paragraph in paragraph_list + ] + + paragraph_model_list = [] + problem_paragraph_object_list = [] + for paragraphs in paragraph_model_dict_list: + paragraph = paragraphs.get('paragraph') + for problem_model in paragraphs.get('problem_paragraph_object_list'): + problem_paragraph_object_list.append(problem_model) + paragraph_model_list.append(paragraph) + + return { + 'document': document_model, + 'paragraph_model_list': paragraph_model_list, + 'problem_paragraph_object_list': problem_paragraph_object_list, + } + + +def get_document_paragraph_model(knowledge_id: str, instance: Dict): + source_meta = {'source_file_id': instance.get("source_file_id")} if instance.get("source_file_id") else {} + meta = {**instance.get('meta'), **source_meta} if instance.get('meta') is not None else source_meta + meta = {**convert_uuid_to_str(meta), 'allow_download': True} + + document_model = Document( + **{ + 'knowledge_id': knowledge_id, + 'id': uuid.uuid7(), + 'name': instance.get('name'), + 'char_length': reduce( + lambda x, y: x + y, + [len(p.get('content')) for p in instance.get('paragraphs', [])], + 0), + 'meta': meta, + 'type': instance.get('type') if instance.get('type') is not None else KnowledgeType.WORKFLOW + } + ) + + return get_paragraph_model( + document_model, + instance.get('paragraphs') if 'paragraphs' in instance else [] + ) + + +class BaseKnowledgeWriteNode(IKnowledgeWriteNode): + + def save_context(self, details, workflow_manage): + pass + + def save(self, document_list): + serializer = KnowledgeWriteParamSerializer(data=document_list, many=True) + serializer.is_valid(raise_exception=True) + document_list = serializer.data + + knowledge_id = self.workflow_params.get("knowledge_id") + workspace_id = self.workflow_params.get("workspace_id") + + document_model_list = [] + paragraph_model_list = [] + problem_paragraph_object_list = [] + + for document in document_list: + document_paragraph_dict_model = get_document_paragraph_model( + knowledge_id, + document + ) + document_instance = document_paragraph_dict_model.get('document') + link_file(document.get("source_file_id"), document_instance.id) + document_model_list.append(document_instance) + for paragraph in document_paragraph_dict_model.get("paragraph_model_list"): + paragraph_model_list.append(paragraph) + for problem_paragraph_object in document_paragraph_dict_model.get("problem_paragraph_object_list"): + problem_paragraph_object_list.append(problem_paragraph_object) + + problem_model_list, problem_paragraph_mapping_list = ( + ProblemParagraphManage(problem_paragraph_object_list, knowledge_id).to_problem_model_list() + ) + + QuerySet(Document).bulk_create(document_model_list) if len(document_model_list) > 0 else None + + if len(paragraph_model_list) > 0: + for document in document_model_list: + max_position = Paragraph.objects.filter(document_id=document.id).aggregate( + max_position=Max('position') + )['max_position'] or 0 + sub_list = [p for p in paragraph_model_list if p.document_id == document.id] + for i, paragraph in enumerate(sub_list): + paragraph.position = max_position + i + 1 + QuerySet(Paragraph).bulk_create(sub_list if len(sub_list) > 0 else []) + + bulk_create_in_batches(Problem, problem_model_list, batch_size=1000) + + bulk_create_in_batches(ProblemParagraphMapping, problem_paragraph_mapping_list, batch_size=1000) + + return document_model_list, knowledge_id, workspace_id + + def execute(self, documents, **kwargs) -> NodeResult: + + document_model_list, knowledge_id, workspace_id = self.save(documents) + + write_content_list = [{ + "name": document.get("name"), + "paragraphs": [{ + "title": p.get("title"), + "content": p.get("content"), + } for p in document.get("paragraphs")[0:4]] + } for document in documents] + + return NodeResult({'write_content': write_content_list}, {}) + + def get_details(self, index: int, **kwargs): + return { + 'name': self.node.properties.get('stepName'), + "index": index, + 'run_time': self.context.get('run_time'), + 'type': self.node.type, + 'write_content': self.context.get("write_content"), + 'status': self.status, + 'err_message': self.err_message + } diff --git a/apps/application/flow/step_node/loop_break_node/i_loop_break_node.py b/apps/application/flow/step_node/loop_break_node/i_loop_break_node.py index d42d05a11..140ccf853 100644 --- a/apps/application/flow/step_node/loop_break_node/i_loop_break_node.py +++ b/apps/application/flow/step_node/loop_break_node/i_loop_break_node.py @@ -11,6 +11,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode from application.flow.i_step_node import NodeResult @@ -28,6 +29,7 @@ class LoopBreakNodeSerializer(serializers.Serializer): class ILoopBreakNode(INode): type = 'loop-break-node' + support = [WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return LoopBreakNodeSerializer diff --git a/apps/application/flow/step_node/loop_continue_node/i_loop_continue_node.py b/apps/application/flow/step_node/loop_continue_node/i_loop_continue_node.py index 07ee1252f..efb21437d 100644 --- a/apps/application/flow/step_node/loop_continue_node/i_loop_continue_node.py +++ b/apps/application/flow/step_node/loop_continue_node/i_loop_continue_node.py @@ -8,10 +8,11 @@ """ from typing import Type +from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult -from django.utils.translation import gettext_lazy as _ class ConditionSerializer(serializers.Serializer): @@ -27,6 +28,7 @@ class LoopContinueNodeSerializer(serializers.Serializer): class ILoopContinueNode(INode): type = 'loop-continue-node' + support = [WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return LoopContinueNodeSerializer diff --git a/apps/application/flow/step_node/loop_node/i_loop_node.py b/apps/application/flow/step_node/loop_node/i_loop_node.py index 6b2176513..fb0fd9147 100644 --- a/apps/application/flow/step_node/loop_node/i_loop_node.py +++ b/apps/application/flow/step_node/loop_node/i_loop_node.py @@ -11,6 +11,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from common.exception.app_exception import AppApiException @@ -40,6 +41,7 @@ class ILoopNodeSerializer(serializers.Serializer): class ILoopNode(INode): type = 'loop-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.KNOWLEDGE] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return ILoopNodeSerializer @@ -52,5 +54,5 @@ class ILoopNode(INode): array[1:]) return self.execute(**{**self.node_params_serializer.data, "array": array}, **self.flow_params_serializer.data) - def execute(self, loop_type, array, number, loop_body, stream, **kwargs) -> NodeResult: + def execute(self, loop_type, array, number, loop_body, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/loop_node/impl/base_loop_node.py b/apps/application/flow/step_node/loop_node/impl/base_loop_node.py index 92083a6da..48ce59f11 100644 --- a/apps/application/flow/step_node/loop_node/impl/base_loop_node.py +++ b/apps/application/flow/step_node/loop_node/impl/base_loop_node.py @@ -11,7 +11,7 @@ from typing import Dict, List from django.utils.translation import gettext as _ -from application.flow.common import Answer +from application.flow.common import Answer, WorkflowMode from application.flow.i_step_node import NodeResult, WorkFlowPostHandler, INode from application.flow.step_node.loop_node.i_loop_node import ILoopNode from application.flow.tools import Reasoning @@ -197,6 +197,7 @@ def loop(workflow_manage_new_instance, node: INode, generate_loop): insert_or_replace(loop_node_data, index, instance.get_runtime_details()) insert_or_replace(loop_answer_data, index, get_answer_list(instance, child_node_node_dict, node.runtime_node_id)) + instance._cleanup() if break_outer: break node.context['is_interrupt_exec'] = is_interrupt_exec @@ -206,7 +207,7 @@ def loop(workflow_manage_new_instance, node: INode, generate_loop): node.context["item"] = current_index -def get_write_context(loop_type, array, number, loop_body, stream): +def get_write_context(loop_type, array, number, loop_body): def inner_write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow): if loop_type == 'ARRAY': return loop(node_variable['workflow_manage_new_instance'], node, generate_loop_array(array)) @@ -248,27 +249,31 @@ class BaseLoopNode(ILoopNode): def get_loop_context(self): return self.context - def execute(self, loop_type, array, number, loop_body, stream, **kwargs) -> NodeResult: + def execute(self, loop_type, array, number, loop_body, **kwargs) -> NodeResult: from application.flow.loop_workflow_manage import LoopWorkflowManage, Workflow + from application.flow.knowledge_loop_workflow_manage import KnowledgeLoopWorkflowManage def workflow_manage_new_instance(loop_data, global_data, start_node_id=None, start_node_data=None, chat_record=None, child_node=None): - workflow_manage = LoopWorkflowManage(Workflow.new_instance(loop_body), self.workflow_manage.params, - LoopWorkFlowPostHandler( - self.workflow_manage.work_flow_post_handler.chat_info), - self.workflow_manage, - loop_data, - self.get_loop_context, - base_to_response=LoopToResponse(), - start_node_id=start_node_id, - start_node_data=start_node_data, - chat_record=chat_record, - child_node=child_node - ) + workflow_mode = WorkflowMode.KNOWLEDGE_LOOP if WorkflowMode.KNOWLEDGE == self.workflow_manage.flow.workflow_mode else WorkflowMode.APPLICATION_LOOP + c = KnowledgeLoopWorkflowManage if workflow_mode == WorkflowMode.KNOWLEDGE_LOOP else LoopWorkflowManage + workflow_manage = c(Workflow.new_instance(loop_body, workflow_mode), + self.workflow_manage.params, + LoopWorkFlowPostHandler( + self.workflow_manage.work_flow_post_handler.chat_info), + self.workflow_manage, + loop_data, + self.get_loop_context, + base_to_response=LoopToResponse(), + start_node_id=start_node_id, + start_node_data=start_node_data, + chat_record=chat_record, + child_node=child_node + ) return workflow_manage return NodeResult({'workflow_manage_new_instance': workflow_manage_new_instance}, {}, - _write_context=get_write_context(loop_type, array, number, loop_body, stream), + _write_context=get_write_context(loop_type, array, number, loop_body), _is_interrupt=_is_interrupt_exec) def get_loop_context_data(self): diff --git a/apps/application/flow/step_node/loop_start_node/i_loop_start_node.py b/apps/application/flow/step_node/loop_start_node/i_loop_start_node.py index 21a059b76..3c50b33b2 100644 --- a/apps/application/flow/step_node/loop_start_node/i_loop_start_node.py +++ b/apps/application/flow/step_node/loop_start_node/i_loop_start_node.py @@ -6,15 +6,16 @@ @date:2024/6/3 16:54 @desc: """ - +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult class ILoopStarNode(INode): type = 'loop-start-node' + support = [WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE_LOOP] def _run(self): return self.execute(**self.flow_params_serializer.data) - def execute(self, **kwargs) -> NodeResult: + def execute(self, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/loop_start_node/impl/base_start_node.py b/apps/application/flow/step_node/loop_start_node/impl/base_start_node.py index 4f691f9e7..0a56527e0 100644 --- a/apps/application/flow/step_node/loop_start_node/impl/base_start_node.py +++ b/apps/application/flow/step_node/loop_start_node/impl/base_start_node.py @@ -10,6 +10,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import NodeResult from application.flow.step_node.loop_start_node.i_loop_start_node import ILoopStarNode @@ -31,7 +32,8 @@ class BaseLoopStartStepNode(ILoopStarNode): 'index': loop_params.get("index"), 'item': loop_params.get("item") } - self.workflow_manage.chat_context = self.workflow_manage.get_chat_info().get_chat_variable() + if WorkflowMode.APPLICATION_LOOP == self.workflow_manage.flow.workflow_mode: + self.workflow_manage.chat_context = self.workflow_manage.get_chat_info().get_chat_variable() return NodeResult(node_variable, {}) def get_details(self, index: int, **kwargs): diff --git a/apps/application/flow/step_node/mcp_node/i_mcp_node.py b/apps/application/flow/step_node/mcp_node/i_mcp_node.py index cdd3f7e6a..d0a7be133 100644 --- a/apps/application/flow/step_node/mcp_node/i_mcp_node.py +++ b/apps/application/flow/step_node/mcp_node/i_mcp_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -19,6 +20,8 @@ class McpNodeSerializer(serializers.Serializer): class IMcpNode(INode): type = 'mcp-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return McpNodeSerializer diff --git a/apps/application/flow/step_node/parameter_extraction_node/i_parameter_extraction_node.py b/apps/application/flow/step_node/parameter_extraction_node/i_parameter_extraction_node.py index 6ff670057..a0a9bc5cb 100644 --- a/apps/application/flow/step_node/parameter_extraction_node/i_parameter_extraction_node.py +++ b/apps/application/flow/step_node/parameter_extraction_node/i_parameter_extraction_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -23,6 +24,8 @@ class VariableSplittingNodeParamsSerializer(serializers.Serializer): class IParameterExtractionNode(INode): type = 'parameter-extraction-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return VariableSplittingNodeParamsSerializer diff --git a/apps/application/flow/step_node/question_node/i_question_node.py b/apps/application/flow/step_node/question_node/i_question_node.py index 74153bbfb..193999760 100644 --- a/apps/application/flow/step_node/question_node/i_question_node.py +++ b/apps/application/flow/step_node/question_node/i_question_node.py @@ -11,6 +11,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -31,6 +32,8 @@ class QuestionNodeSerializer(serializers.Serializer): class IQuestionNode(INode): type = 'question-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return QuestionNodeSerializer diff --git a/apps/application/flow/step_node/reranker_node/i_reranker_node.py b/apps/application/flow/step_node/reranker_node/i_reranker_node.py index d0164393d..0421c55b0 100644 --- a/apps/application/flow/step_node/reranker_node/i_reranker_node.py +++ b/apps/application/flow/step_node/reranker_node/i_reranker_node.py @@ -10,6 +10,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from django.utils.translation import gettext_lazy as _ @@ -41,6 +42,7 @@ class RerankerStepNodeSerializer(serializers.Serializer): class IRerankerNode(INode): type = 'reranker-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return RerankerStepNodeSerializer @@ -57,6 +59,6 @@ class IRerankerNode(INode): reranker_list=reranker_list) - def execute(self, question, reranker_setting, reranker_list, reranker_model_id,show_knowledge, + def execute(self, question, reranker_setting, reranker_list, reranker_model_id, show_knowledge, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/search_document_node/i_search_document_node.py b/apps/application/flow/step_node/search_document_node/i_search_document_node.py index 65eb87d96..2d55ef64b 100644 --- a/apps/application/flow/step_node/search_document_node/i_search_document_node.py +++ b/apps/application/flow/step_node/search_document_node/i_search_document_node.py @@ -4,6 +4,7 @@ from typing import Type, List from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -42,6 +43,7 @@ class SearchDocumentStepNodeSerializer(serializers.Serializer): class ISearchDocumentStepNode(INode): type = 'search-document-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return SearchDocumentStepNodeSerializer diff --git a/apps/application/flow/step_node/search_knowledge_node/i_search_knowledge_node.py b/apps/application/flow/step_node/search_knowledge_node/i_search_knowledge_node.py index 7f9311e2d..17da82a4a 100644 --- a/apps/application/flow/step_node/search_knowledge_node/i_search_knowledge_node.py +++ b/apps/application/flow/step_node/search_knowledge_node/i_search_knowledge_node.py @@ -13,6 +13,7 @@ from django.core import validators from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from common.utils.common import flat_map @@ -67,6 +68,7 @@ def get_paragraph_list(chat_record, node_id): class ISearchKnowledgeStepNode(INode): type = 'search-knowledge-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return SearchDatasetStepNodeSerializer diff --git a/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py b/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py index 719e4201e..a071b46d1 100644 --- a/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py +++ b/apps/application/flow/step_node/speech_to_text_step_node/i_speech_to_text_node.py @@ -2,10 +2,11 @@ from typing import Type +from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult -from django.utils.translation import gettext_lazy as _ class SpeechToTextNodeSerializer(serializers.Serializer): @@ -22,6 +23,8 @@ class SpeechToTextNodeSerializer(serializers.Serializer): class ISpeechToTextNode(INode): type = 'speech-to-text-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return SpeechToTextNodeSerializer @@ -36,7 +39,7 @@ class ISpeechToTextNode(INode): return self.execute(audio=res, **self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self, stt_model_id, chat_id, + def execute(self, stt_model_id, audio, model_params_setting=None, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py b/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py index 613599d0a..021f13170 100644 --- a/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py +++ b/apps/application/flow/step_node/speech_to_text_step_node/impl/base_speech_to_text_node.py @@ -20,7 +20,7 @@ class BaseSpeechToTextNode(ISpeechToTextNode): if self.node_params.get('is_result', False): self.answer_text = details.get('answer') - def execute(self, stt_model_id, chat_id, audio, model_params_setting=None, **kwargs) -> NodeResult: + def execute(self, stt_model_id, audio, model_params_setting=None, **kwargs) -> NodeResult: workspace_id = self.workflow_manage.get_body().get('workspace_id') stt_model = get_model_instance_by_model_workspace_id(stt_model_id, workspace_id, **model_params_setting) audio_list = audio diff --git a/apps/application/flow/step_node/start_node/i_start_node.py b/apps/application/flow/step_node/start_node/i_start_node.py index 41d73f218..40caf0199 100644 --- a/apps/application/flow/step_node/start_node/i_start_node.py +++ b/apps/application/flow/step_node/start_node/i_start_node.py @@ -6,12 +6,13 @@ @date:2024/6/3 16:54 @desc: """ - +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult class IStarNode(INode): type = 'start-node' + support = [WorkflowMode.APPLICATION] def _run(self): return self.execute(**self.flow_params_serializer.data) diff --git a/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py b/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py index 539c8dbb5..975a6e981 100644 --- a/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py +++ b/apps/application/flow/step_node/text_to_speech_step_node/i_text_to_speech_node.py @@ -4,6 +4,7 @@ from typing import Type from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from django.utils.translation import gettext_lazy as _ @@ -22,6 +23,8 @@ class TextToSpeechNodeSerializer(serializers.Serializer): class ITextToSpeechNode(INode): type = 'text-to-speech-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return TextToSpeechNodeSerializer @@ -31,7 +34,7 @@ class ITextToSpeechNode(INode): self.node_params_serializer.data.get('content_list')[1:]) return self.execute(content=content, **self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self, tts_model_id, chat_id, + def execute(self, tts_model_id, content, model_params_setting=None, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py b/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py index e0373eb41..9adf8d627 100644 --- a/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py +++ b/apps/application/flow/step_node/text_to_speech_step_node/impl/base_text_to_speech_node.py @@ -4,6 +4,7 @@ import mimetypes from django.core.files.uploadedfile import InMemoryUploadedFile +from application.flow.common import WorkflowMode from application.flow.i_step_node import NodeResult from application.flow.step_node.text_to_speech_step_node.i_text_to_speech_node import ITextToSpeechNode from common.utils.common import _remove_empty_lines @@ -42,7 +43,7 @@ class BaseTextToSpeechNode(ITextToSpeechNode): if self.node_params.get('is_result', False): self.answer_text = details.get('answer') - def execute(self, tts_model_id, chat_id, + def execute(self, tts_model_id, content, model_params_setting=None, max_length=1024, **kwargs) -> NodeResult: # 分割文本为合理片段 @@ -77,25 +78,10 @@ class BaseTextToSpeechNode(ITextToSpeechNode): output_buffer = io.BytesIO() combined_audio.export(output_buffer, format="mp3") combined_bytes = output_buffer.getvalue() - - # 存储合并后的音频文件 file_name = 'combined_audio.mp3' file = bytes_to_uploaded_file(combined_bytes, file_name) - - application = self.workflow_manage.work_flow_post_handler.chat_info.application - meta = { - 'debug': False if application.id else True, - 'chat_id': chat_id, - 'application_id': str(application.id) if application.id else None, - } - - file_url = FileSerializer(data={ - 'file': file, - 'meta': meta, - 'source_id': meta['application_id'], - 'source_type': FileSourceType.APPLICATION.value - }).upload() - + # 存储合并后的音频文件 + file_url = self.upload_file(file) # 生成音频标签 audio_label = f'' file_id = file_url.split('/')[-1] @@ -111,6 +97,42 @@ class BaseTextToSpeechNode(ITextToSpeechNode): 'result': audio_list }, {}) + def upload_file(self, file): + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.upload_knowledge_file(file) + return self.upload_application_file(file) + + def upload_knowledge_file(self, file): + knowledge_id = self.workflow_params.get('knowledge_id') + meta = { + 'debug': False, + 'knowledge_id': knowledge_id, + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': knowledge_id, + 'source_type': FileSourceType.KNOWLEDGE.value + }).upload() + return file_url + + def upload_application_file(self, file): + application = self.workflow_manage.work_flow_post_handler.chat_info.application + chat_id = self.workflow_params.get('chat_id') + meta = { + 'debug': False if application.id else True, + 'chat_id': chat_id, + 'application_id': str(application.id) if application.id else None, + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': meta['application_id'], + 'source_type': FileSourceType.APPLICATION.value + }).upload() + return file_url + def get_details(self, index: int, **kwargs): return { 'name': self.node.properties.get('stepName'), diff --git a/apps/application/flow/step_node/text_to_video_step_node/i_text_to_video_node.py b/apps/application/flow/step_node/text_to_video_step_node/i_text_to_video_node.py index c91d70f59..e596f00d1 100644 --- a/apps/application/flow/step_node/text_to_video_step_node/i_text_to_video_node.py +++ b/apps/application/flow/step_node/text_to_video_step_node/i_text_to_video_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -31,14 +32,21 @@ class TextToVideoNodeSerializer(serializers.Serializer): class ITextToVideoNode(INode): type = 'text-to-video-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return TextToVideoNodeSerializer def _run(self): - return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data, + **{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None}) + else: + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, model_params_setting, chat_record_id, **kwargs) -> NodeResult: diff --git a/apps/application/flow/step_node/text_to_video_step_node/impl/base_text_to_video_node.py b/apps/application/flow/step_node/text_to_video_step_node/impl/base_text_to_video_node.py index 9d1dba37d..a225911a9 100644 --- a/apps/application/flow/step_node/text_to_video_step_node/impl/base_text_to_video_node.py +++ b/apps/application/flow/step_node/text_to_video_step_node/impl/base_text_to_video_node.py @@ -5,6 +5,7 @@ from typing import List import requests from langchain_core.messages import BaseMessage, HumanMessage, AIMessage +from application.flow.common import WorkflowMode from application.flow.i_step_node import NodeResult from application.flow.step_node.text_to_video_step_node.i_text_to_video_node import ITextToVideoNode from common.utils.common import bytes_to_uploaded_file @@ -20,11 +21,10 @@ class BaseTextToVideoNode(ITextToVideoNode): if self.node_params.get('is_result', False): self.answer_text = details.get('answer') - def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id, + def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, model_params_setting, chat_record_id, **kwargs) -> NodeResult: - application = self.workflow_manage.work_flow_post_handler.chat_info.application workspace_id = self.workflow_manage.get_body().get('workspace_id') ttv_model = get_model_instance_by_model_workspace_id(model_id, workspace_id, **model_params_setting) @@ -44,6 +44,36 @@ class BaseTextToVideoNode(ITextToVideoNode): if isinstance(video_urls, str) and video_urls.startswith('http'): video_urls = requests.get(video_urls).content file = bytes_to_uploaded_file(video_urls, file_name) + file_url = self.upload_file(file) + video_label = f'' + video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}] + return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list, + 'video': video_list, + 'history_message': history_message, 'question': question}, {}) + + def upload_file(self, file): + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.upload_knowledge_file(file) + return self.upload_application_file(file) + + def upload_knowledge_file(self, file): + knowledge_id = self.workflow_params.get('knowledge_id') + meta = { + 'debug': False, + 'knowledge_id': knowledge_id + } + file_url = FileSerializer(data={ + 'file': file, + 'meta': meta, + 'source_id': knowledge_id, + 'source_type': FileSourceType.KNOWLEDGE.value + }).upload() + return file_url + + def upload_application_file(self, file): + application = self.workflow_manage.work_flow_post_handler.chat_info.application + chat_id = self.workflow_params.get('chat_id') meta = { 'debug': False if application.id else True, 'chat_id': chat_id, @@ -55,11 +85,7 @@ class BaseTextToVideoNode(ITextToVideoNode): 'source_id': meta['application_id'], 'source_type': FileSourceType.APPLICATION.value }).upload() - video_label = f'' - video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}] - return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list, - 'video': video_list, - 'history_message': history_message, 'question': question}, {}) + return file_url def generate_history_ai_message(self, chat_record): for val in chat_record.details.values(): diff --git a/apps/application/flow/step_node/tool_lib_node/i_tool_lib_node.py b/apps/application/flow/step_node/tool_lib_node/i_tool_lib_node.py index 43a93cc3f..e1d9335ce 100644 --- a/apps/application/flow/step_node/tool_lib_node/i_tool_lib_node.py +++ b/apps/application/flow/step_node/tool_lib_node/i_tool_lib_node.py @@ -13,6 +13,7 @@ from django.db.models import QuerySet from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from common.field.common import ObjectField from tools.models.tool import Tool @@ -40,6 +41,8 @@ class FunctionLibNodeParamsSerializer(serializers.Serializer): class IToolLibNode(INode): type = 'tool-lib-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return FunctionLibNodeParamsSerializer diff --git a/apps/application/flow/step_node/tool_lib_node/impl/base_tool_lib_node.py b/apps/application/flow/step_node/tool_lib_node/impl/base_tool_lib_node.py index 15b4ab3ca..66dc921b8 100644 --- a/apps/application/flow/step_node/tool_lib_node/impl/base_tool_lib_node.py +++ b/apps/application/flow/step_node/tool_lib_node/impl/base_tool_lib_node.py @@ -126,6 +126,16 @@ def valid_function(tool_lib, workspace_id): if not tool_lib.is_active: raise Exception(_("Tool is not active")) +def _filter_file_bytes(data): + """递归过滤掉所有层级的 file_bytes""" + if isinstance(data, dict): + return {k: _filter_file_bytes(v) for k, v in data.items() if k != 'file_bytes'} + elif isinstance(data, list): + return [_filter_file_bytes(item) for item in data] + else: + return data + + class BaseToolLibNodeNode(IToolLibNode): def save_context(self, details, workflow_manage): @@ -138,7 +148,7 @@ class BaseToolLibNodeNode(IToolLibNode): tool_lib = QuerySet(Tool).filter(id=tool_lib_id).first() valid_function(tool_lib, workspace_id) params = { - field.get('name'): convert_value( + field.get('name'): convert_value( field.get('name'), field.get('value'), field.get('type'), field.get('is_required'), field.get('source'), self @@ -157,14 +167,20 @@ class BaseToolLibNodeNode(IToolLibNode): all_params = init_params_default_value | json.loads(rsa_long_decrypt(tool_lib.init_params)) | params else: all_params = init_params_default_value | params + if self.node.properties.get('kind') == 'data-source': + all_params = {**all_params, **self.workflow_params.get('data_source')} result = function_executor.exec_code(tool_lib.code, all_params) - return NodeResult({'result': result}, {}, _write_context=write_context) + return NodeResult({'result': result}, + (self.workflow_manage.params.get('knowledge_base') or {}) if self.node.properties.get( + 'kind') == 'data-source' else {}, _write_context=write_context) def get_details(self, index: int, **kwargs): + result = _filter_file_bytes(self.context.get('result')) + return { 'name': self.node.properties.get('stepName'), "index": index, - "result": self.context.get('result'), + "result": result, "params": self.context.get('params'), 'run_time': self.context.get('run_time'), 'type': self.node.type, diff --git a/apps/application/flow/step_node/tool_node/i_tool_node.py b/apps/application/flow/step_node/tool_node/i_tool_node.py index 2180efb77..6ee077ed7 100644 --- a/apps/application/flow/step_node/tool_node/i_tool_node.py +++ b/apps/application/flow/step_node/tool_node/i_tool_node.py @@ -10,15 +10,15 @@ import re from typing import Type from django.core import validators +from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from rest_framework.utils.formatting import lazy_format +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult from common.exception.app_exception import AppApiException from common.field.common import ObjectField -from django.utils.translation import gettext_lazy as _ -from rest_framework.utils.formatting import lazy_format - class InputField(serializers.Serializer): name = serializers.CharField(required=True, label=_('Variable Name')) @@ -53,6 +53,8 @@ class FunctionNodeParamsSerializer(serializers.Serializer): class IToolNode(INode): type = 'tool-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return FunctionNodeParamsSerializer diff --git a/apps/application/flow/step_node/variable_aggregation_node/i_variable_aggregation_node.py b/apps/application/flow/step_node/variable_aggregation_node/i_variable_aggregation_node.py index d5a2f332a..4878f0c4c 100644 --- a/apps/application/flow/step_node/variable_aggregation_node/i_variable_aggregation_node.py +++ b/apps/application/flow/step_node/variable_aggregation_node/i_variable_aggregation_node.py @@ -5,11 +5,10 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult - - class VariableListSerializer(serializers.Serializer): v_id = serializers.CharField(required=True, label=_("Variable id")) variable = serializers.ListField(required=True, label=_("Variable")) @@ -29,15 +28,13 @@ class VariableAggregationNodeSerializer(serializers.Serializer): class IVariableAggregation(INode): type = 'variable-aggregation-node' - + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return VariableAggregationNodeSerializer def _run(self): - return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) + return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self,strategy,group_list,**kwargs) -> NodeResult: + def execute(self, strategy, group_list, **kwargs) -> NodeResult: pass - - diff --git a/apps/application/flow/step_node/variable_aggregation_node/impl/base_variable_aggregation_node.py b/apps/application/flow/step_node/variable_aggregation_node/impl/base_variable_aggregation_node.py index c65dc44dc..46b21153f 100644 --- a/apps/application/flow/step_node/variable_aggregation_node/impl/base_variable_aggregation_node.py +++ b/apps/application/flow/step_node/variable_aggregation_node/impl/base_variable_aggregation_node.py @@ -9,6 +9,15 @@ from application.flow.i_step_node import NodeResult from application.flow.step_node.variable_aggregation_node.i_variable_aggregation_node import IVariableAggregation +def _filter_file_bytes(data): + """递归过滤掉所有层级的 file_bytes""" + if isinstance(data, dict): + return {k: _filter_file_bytes(v) for k, v in data.items() if k != 'file_bytes'} + elif isinstance(data, list): + return [_filter_file_bytes(item) for item in data] + else: + return data + class BaseVariableAggregationNode(IVariableAggregation): @@ -63,14 +72,16 @@ class BaseVariableAggregationNode(IVariableAggregation): {'result': result, 'strategy': strategy, 'group_list': self.reset_group_list(group_list), **result}, {}) def get_details(self, index: int, **kwargs): + result = _filter_file_bytes(self.context.get('result')) + group_list = _filter_file_bytes(self.context.get('group_list')) return { 'name': self.node.properties.get('stepName'), "index": index, 'run_time': self.context.get('run_time'), 'type': self.node.type, - 'result': self.context.get('result'), + 'result': result, 'strategy': self.context.get('strategy'), - 'group_list': self.context.get('group_list'), + 'group_list': group_list, 'status': self.status, 'err_message': self.err_message } diff --git a/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py b/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py index 1eb21266c..b65d8c812 100644 --- a/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py +++ b/apps/application/flow/step_node/variable_assign_node/i_variable_assign_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -15,6 +16,8 @@ class VariableAssignNodeParamsSerializer(serializers.Serializer): class IVariableAssignNode(INode): type = 'variable-assign-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return VariableAssignNodeParamsSerializer @@ -22,5 +25,5 @@ class IVariableAssignNode(INode): def _run(self): return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self, variable_list, stream, **kwargs) -> NodeResult: + def execute(self, variable_list, **kwargs) -> NodeResult: pass diff --git a/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py b/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py index 2bef6d6a5..a13dc048c 100644 --- a/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py +++ b/apps/application/flow/step_node/variable_assign_node/impl/base_variable_assign_node.py @@ -58,7 +58,7 @@ class BaseVariableAssignNode(IVariableAssignNode): result['output_value'] = reference return result - def execute(self, variable_list, stream, **kwargs) -> NodeResult: + def execute(self, variable_list, **kwargs) -> NodeResult: # result_list = [] is_chat = False diff --git a/apps/application/flow/step_node/variable_splitting_node/i_variable_splitting_node.py b/apps/application/flow/step_node/variable_splitting_node/i_variable_splitting_node.py index 52cff8eb2..78983ca0a 100644 --- a/apps/application/flow/step_node/variable_splitting_node/i_variable_splitting_node.py +++ b/apps/application/flow/step_node/variable_splitting_node/i_variable_splitting_node.py @@ -5,6 +5,7 @@ from typing import Type from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult @@ -18,6 +19,8 @@ class VariableSplittingNodeParamsSerializer(serializers.Serializer): class IVariableSplittingNode(INode): type = 'variable-splitting-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return VariableSplittingNodeParamsSerializer diff --git a/apps/application/flow/step_node/video_understand_step_node/i_video_understand_node.py b/apps/application/flow/step_node/video_understand_step_node/i_video_understand_node.py index 3266a8e0b..0b362415d 100644 --- a/apps/application/flow/step_node/video_understand_step_node/i_video_understand_node.py +++ b/apps/application/flow/step_node/video_understand_step_node/i_video_understand_node.py @@ -2,12 +2,12 @@ from typing import Type +from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from application.flow.common import WorkflowMode from application.flow.i_step_node import INode, NodeResult -from django.utils.translation import gettext_lazy as _ - class VideoUnderstandNodeSerializer(serializers.Serializer): model_id = serializers.CharField(required=True, label=_("Model id")) @@ -30,6 +30,8 @@ class VideoUnderstandNodeSerializer(serializers.Serializer): class IVideoUnderstandNode(INode): type = 'video-understand-node' + support = [WorkflowMode.APPLICATION, WorkflowMode.APPLICATION_LOOP, WorkflowMode.KNOWLEDGE, + WorkflowMode.KNOWLEDGE_LOOP] def get_node_params_serializer_class(self) -> Type[serializers.Serializer]: return VideoUnderstandNodeSerializer @@ -37,9 +39,15 @@ class IVideoUnderstandNode(INode): def _run(self): res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('video_list')[0], self.node_params_serializer.data.get('video_list')[1:]) - return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data) - def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id, + if [WorkflowMode.KNOWLEDGE, WorkflowMode.KNOWLEDGE_LOOP].__contains__( + self.workflow_manage.flow.workflow_mode): + return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data, + **{'history_chat_record': [], 'stream': True, 'chat_id': None, 'chat_record_id': None}) + else: + return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data) + + def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, model_params_setting, chat_record_id, video, diff --git a/apps/application/flow/step_node/video_understand_step_node/impl/base_video_understand_node.py b/apps/application/flow/step_node/video_understand_step_node/impl/base_video_understand_node.py index 9a478e6c9..a1fda2e6c 100644 --- a/apps/application/flow/step_node/video_understand_step_node/impl/base_video_understand_node.py +++ b/apps/application/flow/step_node/video_understand_step_node/impl/base_video_understand_node.py @@ -70,7 +70,7 @@ class BaseVideoUnderstandNode(IVideoUnderstandNode): if self.node_params.get('is_result', False): self.answer_text = details.get('answer') - def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id, + def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, model_params_setting, chat_record_id, video, diff --git a/apps/application/flow/workflow_manage.py b/apps/application/flow/workflow_manage.py index 2eeeaad3a..5ccde0ae8 100644 --- a/apps/application/flow/workflow_manage.py +++ b/apps/application/flow/workflow_manage.py @@ -21,7 +21,7 @@ from rest_framework import status from application.flow import tools from application.flow.common import Workflow -from application.flow.i_step_node import INode, WorkFlowPostHandler, NodeResult +from application.flow.i_step_node import INode, WorkFlowPostHandler, NodeResult, FlowParamsSerializer from application.flow.step_node import get_node from common.handle.base_to_response import BaseToResponse from common.handle.impl.response.system_to_response import SystemToResponse @@ -316,7 +316,7 @@ class WorkflowManage: except Exception as e: return True - def await_result(self): + def await_result(self, is_cleanup=True): try: while self.is_run(): while True: @@ -339,12 +339,13 @@ class WorkflowManage: answer_tokens = sum([row.get('answer_tokens') for row in details.values() if 'answer_tokens' in row and row.get('answer_tokens') is not None]) self.work_flow_post_handler.handler(self) - yield self.base_to_response.to_stream_chunk_response(self.params['chat_id'], - self.params['chat_record_id'], + yield self.base_to_response.to_stream_chunk_response(self.params.get('chat_id'), + self.params.get('chat_record_id'), '', [], '', True, message_tokens, answer_tokens, {}) - self._cleanup() + if is_cleanup: + self._cleanup() def run_chain_async(self, current_node, node_result_future, language='zh'): future = executor.submit(self.run_chain_manage, current_node, node_result_future, language) @@ -354,7 +355,7 @@ class WorkflowManage: translation.activate(language) if current_node is None: start_node = self.get_start_node() - current_node = get_node(start_node.type)(start_node, self.params, self) + current_node = get_node(start_node.type, self.flow.workflow_mode)(start_node, self.params, self) self.node_chunk_manage.add_node_chunk(current_node.node_chunk) # 添加节点 self.append_node(current_node) @@ -440,8 +441,8 @@ class WorkflowManage: node_type = r.get("node_type") view_type = r.get('view_type') reasoning_content = r.get('reasoning_content') - chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'], - self.params['chat_record_id'], + chunk = self.base_to_response.to_stream_chunk_response(self.params.get('chat_id'), + self.params.get('chat_record_id'), current_node.id, current_node.up_node_id_list, content, False, 0, 0, @@ -455,8 +456,8 @@ class WorkflowManage: 'node_status': "SUCCESS"}) current_node.node_chunk.add_chunk(chunk) chunk = (self.base_to_response - .to_stream_chunk_response(self.params['chat_id'], - self.params['chat_record_id'], + .to_stream_chunk_response(self.params.get('chat_id'), + self.params.get('chat_record_id'), current_node.id, current_node.up_node_id_list, '', False, 0, 0, {'node_is_end': True, @@ -473,9 +474,10 @@ class WorkflowManage: return current_result except Exception as e: # 添加节点 + maxkb_logger.error(f'Exception: {e}', exc_info=True) - chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'], - self.params['chat_record_id'], + chunk = self.base_to_response.to_stream_chunk_response(self.params.get('chat_id'), + self.params.get('chat_id'), current_node.id, current_node.up_node_id_list, 'Exception:' + str(e), False, 0, 0, @@ -543,7 +545,7 @@ class WorkflowManage: return self._has_next_node(self.get_start_node() if self.current_node is None else self.current_node, node_result) - def get_runtime_details(self): + def get_runtime_details(self, get_details=lambda n, index: n.get_details(index)): details_result = {} for index in range(len(self.node_context)): node = self.node_context[index] @@ -552,7 +554,7 @@ class WorkflowManage: if details is not None and self.start_node.runtime_node_id != node.runtime_node_id: details_result[node.runtime_node_id] = details continue - details = node.get_details(index) + details = get_details(node, index) details['node_id'] = node.id details['up_node_id_list'] = node.up_node_id_list details['runtime_node_id'] = node.runtime_node_id @@ -736,8 +738,9 @@ class WorkflowManage: get_node_params=lambda node: node.properties.get('node_data')): for node in self.flow.nodes: if node.id == node_id: - node_instance = get_node(node.type)(node, - self.params, self, up_node_id_list, get_node_params) + node_instance = get_node(node.type, self.flow.workflow_mode)(node, + self.params, self, up_node_id_list, + get_node_params) return node_instance return None @@ -750,3 +753,6 @@ class WorkflowManage: def get_node_reference(self, reference_address: Dict): node = self.get_node_by_id(reference_address.get('node_id')) return node.context[reference_address.get('node_field')] + + def get_params_serializer_class(self): + return FlowParamsSerializer diff --git a/apps/common/constants/permission_constants.py b/apps/common/constants/permission_constants.py index 67cb272a1..58448a6a5 100644 --- a/apps/common/constants/permission_constants.py +++ b/apps/common/constants/permission_constants.py @@ -39,9 +39,12 @@ class Group(Enum): SYSTEM_RES_KNOWLEDGE = "SYSTEM_RESOURCE_KNOWLEDGE" KNOWLEDGE_HIT_TEST = "KNOWLEDGE_HIT_TEST" KNOWLEDGE_DOCUMENT = "KNOWLEDGE_DOCUMENT" + KNOWLEDGE_WORKFLOW = "KNOWLEDGE_WORKFLOW" KNOWLEDGE_TAG = "KNOWLEDGE_TAG" SYSTEM_KNOWLEDGE_DOCUMENT = "SYSTEM_KNOWLEDGE_DOCUMENT" + SYSTEM_KNOWLEDGE_WORKFLOW = "SYSTEM_KNOWLEDGE_WORKFLOW" SYSTEM_RES_KNOWLEDGE_DOCUMENT = "SYSTEM_RESOURCE_KNOWLEDGE_DOCUMENT" + SYSTEM_RES_KNOWLEDGE_WORKFLOW = "SYSTEM_RESOURCE_KNOWLEDGE_WORKFLOW" SYSTEM_RES_KNOWLEDGE_TAG = "SYSTEM_RES_KNOWLEDGE_TAG" SYSTEM_KNOWLEDGE_TAG = "SYSTEM_KNOWLEDGE_TAG" @@ -328,6 +331,7 @@ Permission_Label = { Group.APPLICATION.value: _("Application"), Group.KNOWLEDGE.value: _("Knowledge"), Group.KNOWLEDGE_DOCUMENT.value: _("Document"), + Group.KNOWLEDGE_WORKFLOW.value: _("Workflow"), Group.KNOWLEDGE_TAG.value: _("Tag"), Group.KNOWLEDGE_PROBLEM.value: _("Problem"), Group.KNOWLEDGE_HIT_TEST.value: _("Hit-Test"), @@ -375,6 +379,7 @@ Permission_Label = { Group.SYSTEM_MODEL.value: _("Model"), Group.SYSTEM_KNOWLEDGE.value: _("Knowledge"), Group.SYSTEM_KNOWLEDGE_DOCUMENT.value: _("Document"), + Group.SYSTEM_KNOWLEDGE_WORKFLOW.value: _("Workflow"), Group.SYSTEM_KNOWLEDGE_TAG.value: _("Tag"), Group.SYSTEM_KNOWLEDGE_PROBLEM.value: _("Problem"), Group.SYSTEM_KNOWLEDGE_HIT_TEST.value: _("Hit-Test"), @@ -383,6 +388,7 @@ Permission_Label = { Group.SYSTEM_RES_MODEL.value: _("Model"), Group.SYSTEM_RES_KNOWLEDGE.value: _("Knowledge"), Group.SYSTEM_RES_KNOWLEDGE_DOCUMENT.value: _("Document"), + Group.SYSTEM_RES_KNOWLEDGE_WORKFLOW.value: _("Workflow"), Group.SYSTEM_RES_KNOWLEDGE_TAG.value: _("Tag"), Group.SYSTEM_RES_KNOWLEDGE_PROBLEM.value: _("Problem"), Group.SYSTEM_RES_KNOWLEDGE_HIT_TEST.value: _("Hit-Test"), @@ -616,6 +622,16 @@ class PermissionConstants(Enum): resource_permission_group_list=[ResourcePermissionConst.KNOWLEDGE_MANGE], parent_group=[WorkspaceGroup.KNOWLEDGE, UserGroup.KNOWLEDGE] ) + KNOWLEDGE_WORKFLOW_READ = Permission( + group=Group.KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN, RoleConstants.USER], + resource_permission_group_list=[ResourcePermissionConst.KNOWLEDGE_VIEW], + parent_group=[WorkspaceGroup.KNOWLEDGE, UserGroup.KNOWLEDGE] + ) + KNOWLEDGE_WORKFLOW_EDIT = Permission( + group=Group.KNOWLEDGE_WORKFLOW, operate=Operate.EDIT, role_list=[RoleConstants.ADMIN, RoleConstants.USER], + resource_permission_group_list=[ResourcePermissionConst.KNOWLEDGE_MANGE], + parent_group=[WorkspaceGroup.KNOWLEDGE, UserGroup.KNOWLEDGE] + ) KNOWLEDGE_DOCUMENT_READ = Permission( group=Group.KNOWLEDGE_DOCUMENT, operate=Operate.READ, role_list=[RoleConstants.ADMIN, RoleConstants.USER], @@ -1209,6 +1225,14 @@ class PermissionConstants(Enum): group=Group.SYSTEM_KNOWLEDGE, operate=Operate.DELETE, role_list=[RoleConstants.ADMIN], parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE" ) + SHARED_KNOWLEDGE_WORKFLOW_READ = Permission( + group=Group.SYSTEM_KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN], + parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE" + ) + SHARED_KNOWLEDGE_WORKFLOW_EDIT = Permission( + group=Group.SYSTEM_KNOWLEDGE_WORKFLOW, operate=Operate.EDIT, role_list=[RoleConstants.ADMIN], + parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE" + ) SHARED_KNOWLEDGE_DOCUMENT_READ = Permission( group=Group.SYSTEM_KNOWLEDGE_DOCUMENT, operate=Operate.READ, role_list=[RoleConstants.ADMIN], parent_group=[SystemGroup.SHARED_KNOWLEDGE], is_ee=settings.edition == "EE" @@ -1437,6 +1461,14 @@ class PermissionConstants(Enum): parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE" ) # 文档 + RESOURCE_KNOWLEDGE_WORKFLOW_READ = Permission( + group=Group.SYSTEM_RES_KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN], + parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE" + ) + RESOURCE_KNOWLEDGE_WORKFLOW_EDIT = Permission( + group=Group.SYSTEM_RES_KNOWLEDGE_WORKFLOW, operate=Operate.READ, role_list=[RoleConstants.ADMIN], + parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE" + ) RESOURCE_KNOWLEDGE_DOCUMENT_READ = Permission( group=Group.SYSTEM_RES_KNOWLEDGE_DOCUMENT, operate=Operate.READ, role_list=[RoleConstants.ADMIN], parent_group=[SystemGroup.RESOURCE_KNOWLEDGE], is_ee=settings.edition == "EE" diff --git a/apps/common/handle/impl/table/xlsx_parse_table_handle.py b/apps/common/handle/impl/table/xlsx_parse_table_handle.py index 0a16fa25e..867be2c55 100644 --- a/apps/common/handle/impl/table/xlsx_parse_table_handle.py +++ b/apps/common/handle/impl/table/xlsx_parse_table_handle.py @@ -112,5 +112,5 @@ class XlsxParseTableHandle(BaseParseTableHandle): return md_tables except Exception as e: - max_kb.error(f'excel split handle error: {e}') + maxkb_logger.error(f'excel split handle error: {e}') return f'error: {e}' diff --git a/apps/common/handle/impl/text/doc_split_handle.py b/apps/common/handle/impl/text/doc_split_handle.py index 33f234767..8648e2040 100644 --- a/apps/common/handle/impl/text/doc_split_handle.py +++ b/apps/common/handle/impl/text/doc_split_handle.py @@ -114,7 +114,11 @@ def get_image_id_func(): title_font_list = [ [36, 100], - [30, 36] + [26, 36], + [24, 26], + [22, 24], + [18, 22], + [16, 18] ] @@ -125,12 +129,12 @@ def get_title_level(paragraph: Paragraph): if psn.startswith('Heading') or psn.startswith('TOC 标题') or psn.startswith('标题'): return int(psn.replace("Heading ", '').replace('TOC 标题', '').replace('标题', '')) - if len(paragraph.runs) == 1: + if len(paragraph.runs) >= 1: font_size = paragraph.runs[0].font.size pt = font_size.pt - if pt >= 30: + if pt >= 16: for _value, index in zip(title_font_list, range(len(title_font_list))): - if pt >= _value[0] and pt < _value[1]: + if pt >= _value[0] and pt < _value[1] and any([run.font.bold for run in paragraph.runs]): return index + 1 except Exception as e: pass diff --git a/apps/common/utils/tool_code.py b/apps/common/utils/tool_code.py index 3058861b9..67d47df37 100644 --- a/apps/common/utils/tool_code.py +++ b/apps/common/utils/tool_code.py @@ -2,6 +2,7 @@ import ast import base64 import gzip +import io import json import os import socket @@ -76,10 +77,12 @@ class ToolExecutor: f.write(f"SANDBOX_PYTHON_ALLOW_SUBPROCESS={allow_subprocess}\n") os.system(f"chmod -R 550 {self.sandbox_path}") - def exec_code(self, code_str, keywords): + def exec_code(self, code_str, keywords, function_name=None): _id = str(uuid.uuid7()) success = '{"code":200,"msg":"成功","data":exec_result}' err = '{"code":500,"msg":str(e),"data":None}' + action_function = f'({function_name !a}, locals_v.get({function_name !a}))' if function_name else 'locals_v.popitem()' + result_path = f'{self.sandbox_path}/result/{_id}.result' python_paths = CONFIG.get_sandbox_python_package_paths().split(',') _exec_code = f""" try: @@ -92,7 +95,7 @@ try: globals_v={'{}'} os.environ.clear() exec({dedent(code_str)!a}, globals_v, locals_v) - f_name, f = locals_v.popitem() + f_name, f = {action_function} for local in locals_v: globals_v[local] = locals_v[local] exec_result=f(**keywords) @@ -216,7 +219,10 @@ exec({dedent(code)!a}) else: tool_config = { 'command': sys.executable, - 'args': f'import base64,gzip; exec(gzip.decompress(base64.b64decode(\'{compressed_and_base64_encoded_code_str}\')).decode())', + 'args': [ + '-c', + f'import base64,gzip; exec(gzip.decompress(base64.b64decode(\'{compressed_and_base64_encoded_code_str}\')).decode())', + ], 'transport': 'stdio', } return tool_config diff --git a/apps/knowledge/api/knowledge_workflow.py b/apps/knowledge/api/knowledge_workflow.py new file mode 100644 index 000000000..38296bb89 --- /dev/null +++ b/apps/knowledge/api/knowledge_workflow.py @@ -0,0 +1,11 @@ +# coding=utf-8 + +from common.mixins.api_mixin import APIMixin + + +class KnowledgeWorkflowApi(APIMixin): + pass + + +class KnowledgeWorkflowVersionApi(APIMixin): + pass diff --git a/apps/knowledge/migrations/0004_alter_document_type_alter_knowledge_type_and_more.py b/apps/knowledge/migrations/0004_alter_document_type_alter_knowledge_type_and_more.py new file mode 100644 index 000000000..374aa4e38 --- /dev/null +++ b/apps/knowledge/migrations/0004_alter_document_type_alter_knowledge_type_and_more.py @@ -0,0 +1,59 @@ +# Generated by Django 5.2.4 on 2025-11-04 05:54 + +import django.db.models.deletion +import uuid_utils.compat +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('knowledge', '0003_tag_documenttag'), + ] + + operations = [ + migrations.AlterField( + model_name='document', + name='type', + field=models.IntegerField(choices=[(0, '通用类型'), (1, 'web站点类型'), (2, '飞书类型'), (3, '语雀类型'), (4, '工作流类型')], db_index=True, default=0, verbose_name='类型'), + ), + migrations.AlterField( + model_name='knowledge', + name='type', + field=models.IntegerField(choices=[(0, '通用类型'), (1, 'web站点类型'), (2, '飞书类型'), (3, '语雀类型'), (4, '工作流类型')], db_index=True, default=0, verbose_name='类型'), + ), + migrations.CreateModel( + name='KnowledgeWorkflow', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, db_index=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid_utils.compat.uuid7, editable=False, primary_key=True, serialize=False, verbose_name='主键id')), + ('workspace_id', models.CharField(db_index=True, default='default', max_length=64, verbose_name='工作空间id')), + ('work_flow', models.JSONField(default=dict, verbose_name='工作流数据')), + ('is_publish', models.BooleanField(db_index=True, default=False, verbose_name='是否发布')), + ('publish_time', models.DateTimeField(blank=True, null=True, verbose_name='发布时间')), + ('knowledge', models.OneToOneField(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, related_name='workflow', to='knowledge.knowledge', verbose_name='知识库')), + ], + options={ + 'db_table': 'knowledge_workflow', + }, + ), + migrations.CreateModel( + name='KnowledgeWorkflowVersion', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, db_index=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid_utils.compat.uuid7, editable=False, primary_key=True, serialize=False, verbose_name='主键id')), + ('workspace_id', models.CharField(db_index=True, default='default', max_length=64, verbose_name='工作空间id')), + ('work_flow', models.JSONField(default=dict, verbose_name='工作流数据')), + ('publish_user_id', models.UUIDField(default=None, null=True, verbose_name='发布者id')), + ('publish_user_name', models.CharField(default='', max_length=128, verbose_name='发布者名称')), + ('knowledge', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, to='knowledge.knowledge', verbose_name='知识库')), + ('workflow', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.CASCADE, related_name='versions', to='knowledge.knowledgeworkflow', verbose_name='工作流')), + ], + options={ + 'db_table': 'knowledge_workflow_version', + 'unique_together': {('knowledge',)}, + }, + ), + ] diff --git a/apps/knowledge/migrations/0005_knowledgeaction.py b/apps/knowledge/migrations/0005_knowledgeaction.py new file mode 100644 index 000000000..3633a8d23 --- /dev/null +++ b/apps/knowledge/migrations/0005_knowledgeaction.py @@ -0,0 +1,32 @@ +# Generated by Django 5.2.8 on 2025-11-19 06:06 + +import common.encoder.encoder +import django.db.models.deletion +import uuid_utils.compat +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('knowledge', '0004_alter_document_type_alter_knowledge_type_and_more'), + ] + + operations = [ + migrations.CreateModel( + name='KnowledgeAction', + fields=[ + ('create_time', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='创建时间')), + ('update_time', models.DateTimeField(auto_now=True, db_index=True, verbose_name='修改时间')), + ('id', models.UUIDField(default=uuid_utils.compat.uuid7, editable=False, primary_key=True, serialize=False, verbose_name='主键id')), + ('state', models.CharField(choices=[('PENDING', 'Pending'), ('STARTED', 'Started'), ('SUCCESS', 'Success'), ('FAILURE', 'Failure'), ('REVOKE', 'Revoke'), ('REVOKED', 'Revoked')], default='STARTED', max_length=20, verbose_name='状态')), + ('details', models.JSONField(default=dict, encoder=common.encoder.encoder.SystemEncoder, verbose_name='执行详情')), + ('run_time', models.FloatField(default=0, verbose_name='运行时长')), + ('meta', models.JSONField(default=dict, verbose_name='元数据')), + ('knowledge', models.ForeignKey(db_constraint=False, on_delete=django.db.models.deletion.DO_NOTHING, to='knowledge.knowledge', verbose_name='知识库')), + ], + options={ + 'db_table': 'knowledge_action', + }, + ), + ] diff --git a/apps/knowledge/migrations/0006_paragraph_chunks.py b/apps/knowledge/migrations/0006_paragraph_chunks.py new file mode 100644 index 000000000..10131e488 --- /dev/null +++ b/apps/knowledge/migrations/0006_paragraph_chunks.py @@ -0,0 +1,19 @@ +# Generated by Django 5.2.8 on 2025-11-24 07:09 + +import django.contrib.postgres.fields +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('knowledge', '0005_knowledgeaction'), + ] + + operations = [ + migrations.AddField( + model_name='paragraph', + name='chunks', + field=django.contrib.postgres.fields.ArrayField(base_field=models.CharField(), default=list, size=None, verbose_name='块'), + ), + ] diff --git a/apps/knowledge/models/knowledge.py b/apps/knowledge/models/knowledge.py index dca940b48..ef72f4647 100644 --- a/apps/knowledge/models/knowledge.py +++ b/apps/knowledge/models/knowledge.py @@ -3,6 +3,7 @@ import zipfile from enum import Enum import uuid_utils.compat as uuid +from django.contrib.postgres.fields import ArrayField from django.contrib.postgres.search import SearchVectorField from django.db import models from django.db.models import QuerySet @@ -23,6 +24,7 @@ class KnowledgeType(models.IntegerChoices): WEB = 1, 'web站点类型' LARK = 2, '飞书类型' YUQUE = 3, '语雀类型' + WORKFLOW = 4, '工作流类型' class TaskType(Enum): @@ -135,6 +137,40 @@ class Knowledge(AppModelMixin): db_table = "knowledge" +class KnowledgeWorkflow(AppModelMixin): + """ + 知识库工作流表 + """ + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid7, editable=False, verbose_name="主键id") + knowledge = models.OneToOneField(Knowledge, on_delete=models.CASCADE, verbose_name="知识库", + db_constraint=False, related_name='workflow') + workspace_id = models.CharField(max_length=64, verbose_name="工作空间id", default="default", db_index=True) + work_flow = models.JSONField(verbose_name="工作流数据", default=dict) + is_publish = models.BooleanField(verbose_name="是否发布", default=False, db_index=True) + publish_time = models.DateTimeField(verbose_name="发布时间", null=True, blank=True) + + class Meta: + db_table = "knowledge_workflow" + + +class KnowledgeWorkflowVersion(AppModelMixin): + """ + 知识库工作流版本表 - 记录工作流历史版本 + """ + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid7, editable=False, verbose_name="主键id") + knowledge = models.ForeignKey(Knowledge, on_delete=models.CASCADE, verbose_name="知识库", db_constraint=False) + workflow = models.ForeignKey(KnowledgeWorkflow, on_delete=models.CASCADE, verbose_name="工作流", + db_constraint=False, related_name='versions') + workspace_id = models.CharField(max_length=64, verbose_name="工作空间id", default="default", db_index=True) + work_flow = models.JSONField(verbose_name="工作流数据", default=dict) + publish_user_id = models.UUIDField(verbose_name="发布者id", max_length=128, default=None, null=True) + publish_user_name = models.CharField(verbose_name="发布者名称", max_length=128, default="") + + class Meta: + db_table = "knowledge_workflow_version" + unique_together = [['knowledge']] # 同一知识库的版本号唯一 + + def get_default_status(): return Status('').__str__() @@ -162,6 +198,7 @@ class Document(AppModelMixin): class Meta: db_table = "document" + class Tag(AppModelMixin): """ 标签表 - 存储标签的key-value定义 @@ -206,6 +243,7 @@ class Paragraph(AppModelMixin): hit_num = models.IntegerField(verbose_name="命中次数", default=0) is_active = models.BooleanField(default=True, db_index=True) position = models.IntegerField(verbose_name="段落顺序", default=0, db_index=True) + chunks = ArrayField(verbose_name="块", base_field=models.CharField(), default=list) class Meta: db_table = "paragraph" diff --git a/apps/knowledge/models/knowledge_action.py b/apps/knowledge/models/knowledge_action.py new file mode 100644 index 000000000..0825415b3 --- /dev/null +++ b/apps/knowledge/models/knowledge_action.py @@ -0,0 +1,49 @@ +# coding=utf-8 +""" + @project: MaxKB + @Author:虎虎 + @file: knowledge_action.py + @date:2025/11/18 17:59 + @desc: +""" +import uuid_utils.compat as uuid + +from django.db import models + +from common.encoder.encoder import SystemEncoder +from common.mixins.app_model_mixin import AppModelMixin +from knowledge.models import Knowledge + + +class State(models.TextChoices): + # 等待 + PENDING = 'PENDING' + # 执行中 + STARTED = 'STARTED' + # 成功 + SUCCESS = 'SUCCESS' + # 失败 + FAILURE = 'FAILURE' + # 取消任务 + REVOKE = 'REVOKE' + # 取消成功 + REVOKED = 'REVOKED' + + +class KnowledgeAction(AppModelMixin): + id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid7, editable=False, verbose_name="主键id") + + knowledge = models.ForeignKey(Knowledge, on_delete=models.DO_NOTHING, verbose_name="知识库", db_constraint=False) + + state = models.CharField(verbose_name='状态', max_length=20, + choices=State.choices, + default=State.STARTED) + + details = models.JSONField(verbose_name="执行详情", default=dict, encoder=SystemEncoder) + + run_time = models.FloatField(verbose_name="运行时长", default=0) + + meta = models.JSONField(verbose_name="元数据", default=dict) + + class Meta: + db_table = "knowledge_action" diff --git a/apps/knowledge/serializers/knowledge.py b/apps/knowledge/serializers/knowledge.py index 9daeb7dad..420823db6 100644 --- a/apps/knowledge/serializers/knowledge.py +++ b/apps/knowledge/serializers/knowledge.py @@ -31,7 +31,7 @@ from common.utils.fork import Fork, ChildLink from common.utils.logger import maxkb_logger from common.utils.split_model import get_split_model from knowledge.models import Knowledge, KnowledgeScope, KnowledgeType, Document, Paragraph, Problem, \ - ProblemParagraphMapping, TaskType, State, SearchMode, KnowledgeFolder, File, Tag + ProblemParagraphMapping, TaskType, State, SearchMode, KnowledgeFolder, File, Tag, KnowledgeWorkflow from knowledge.serializers.common import ProblemParagraphManage, drop_knowledge_index, \ get_embedding_model_id_by_knowledge_id, MetaSerializer, \ GenerateRelatedSerializer, get_embedding_model_by_knowledge_id, list_paragraph, write_image, zip_dir @@ -342,8 +342,15 @@ class KnowledgeSerializer(serializers.Serializer): ) ) ), with_search_one=True) + workflow = {} + if knowledge_dict.get('type') == 4: + from knowledge.models import KnowledgeWorkflow + k = QuerySet(KnowledgeWorkflow).filter(knowledge_id=knowledge_dict.get('id')).first() + if k: + workflow = k.work_flow return { **knowledge_dict, + 'work_flow': workflow, 'meta': json.loads(knowledge_dict.get('meta', '{}')), 'application_id_list': list(filter( lambda application_id: all_application_list.__contains__(application_id), @@ -406,7 +413,15 @@ class KnowledgeSerializer(serializers.Serializer): application_id=application_id, knowledge_id=self.data.get('knowledge_id') ) for application_id in application_id_list ]) if len(application_id_list) > 0 else None - + if instance.get("work_flow"): + QuerySet(KnowledgeWorkflow).update_or_create(knowledge_id=self.data.get("knowledge_id"), + create_defaults={'id': uuid.uuid7(), + 'knowledge_id': self.data.get("knowledge_id"), + "workspace_id": self.data.get('workspace_id'), + 'work_flow': instance.get('work_flow', {}), }, + defaults={ + 'work_flow': instance.get('work_flow') + }) knowledge.save() if select_one: return self.one() diff --git a/apps/knowledge/serializers/knowledge_workflow.py b/apps/knowledge/serializers/knowledge_workflow.py new file mode 100644 index 000000000..9e6b7ee2f --- /dev/null +++ b/apps/knowledge/serializers/knowledge_workflow.py @@ -0,0 +1,187 @@ +# coding=utf-8 +import asyncio +import json +from typing import Dict + +import uuid_utils.compat as uuid +from django.db import transaction +from django.db.models import QuerySet +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from application.flow.common import Workflow, WorkflowMode +from application.flow.i_step_node import KnowledgeWorkflowPostHandler +from application.flow.knowledge_workflow_manage import KnowledgeWorkflowManage +from application.flow.step_node import get_node +from application.serializers.application import get_mcp_tools +from common.exception.app_exception import AppApiException +from common.utils.rsa_util import rsa_long_decrypt +from common.utils.tool_code import ToolExecutor +from knowledge.models import KnowledgeScope, Knowledge, KnowledgeType, KnowledgeWorkflow +from knowledge.models.knowledge_action import KnowledgeAction, State +from knowledge.serializers.knowledge import KnowledgeModelSerializer +from maxkb.const import CONFIG +from system_manage.models import AuthTargetType +from system_manage.serializers.user_resource_permission import UserResourcePermissionSerializer +from tools.models import Tool + +tool_executor = ToolExecutor(CONFIG.get('SANDBOX')) + + +class KnowledgeWorkflowModelSerializer(serializers.ModelSerializer): + class Meta: + model = KnowledgeWorkflow + fields = '__all__' + + +class KnowledgeWorkflowActionSerializer(serializers.Serializer): + workspace_id = serializers.CharField(required=True, label=_('workspace id')) + knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id')) + + def action(self, instance: Dict, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + knowledge_workflow = QuerySet(KnowledgeWorkflow).filter(knowledge_id=self.data.get("knowledge_id")).first() + knowledge_action_id = uuid.uuid7() + KnowledgeAction(id=knowledge_action_id, knowledge_id=self.data.get("knowledge_id"), state=State.STARTED).save() + work_flow_manage = KnowledgeWorkflowManage( + Workflow.new_instance(knowledge_workflow.work_flow, WorkflowMode.KNOWLEDGE), + {'knowledge_id': self.data.get("knowledge_id"), 'knowledge_action_id': knowledge_action_id, 'stream': True, + 'workspace_id': self.data.get("workspace_id"), + **instance}, + KnowledgeWorkflowPostHandler(None, knowledge_action_id)) + work_flow_manage.run() + return {'id': knowledge_action_id, 'knowledge_id': self.data.get("knowledge_id"), 'state': State.STARTED, + 'details': {}} + + class Operate(serializers.Serializer): + workspace_id = serializers.CharField(required=True, label=_('workspace id')) + knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id')) + id = serializers.UUIDField(required=True, label=_('knowledge action id')) + + def one(self, is_valid=True): + if is_valid: + self.is_valid(raise_exception=True) + knowledge_action_id = self.data.get("id") + knowledge_action = QuerySet(KnowledgeAction).filter(id=knowledge_action_id).first() + return {'id': knowledge_action_id, 'knowledge_id': knowledge_action.knowledge_id, + 'state': knowledge_action.state, + 'details': knowledge_action.details} + + +class KnowledgeWorkflowSerializer(serializers.Serializer): + class Datasource(serializers.Serializer): + type = serializers.CharField(required=True, label=_('type')) + id = serializers.CharField(required=True, label=_('type')) + params = serializers.DictField(required=True, label="") + function_name = serializers.CharField(required=True, label=_('function_name')) + + def action(self): + self.is_valid(raise_exception=True) + if self.data.get('type') == 'local': + node = get_node(self.data.get('id'), WorkflowMode.KNOWLEDGE) + return node.__getattribute__(node, self.data.get("function_name"))(**self.data.get("params")) + elif self.data.get('type') == 'tool': + tool = QuerySet(Tool).filter(id=self.data.get("id")).first() + init_params = json.loads(rsa_long_decrypt(tool.init_params)) + return tool_executor.exec_code(tool.code, {**init_params, **self.data.get('params')}, + self.data.get('function_name')) + + class Create(serializers.Serializer): + user_id = serializers.UUIDField(required=True, label=_('user id')) + workspace_id = serializers.CharField(required=True, label=_('workspace id')) + scope = serializers.ChoiceField( + required=False, label=_('scope'), default=KnowledgeScope.WORKSPACE, choices=KnowledgeScope.choices + ) + + @transaction.atomic + def save_workflow(self, instance: Dict): + self.is_valid(raise_exception=True) + + folder_id = instance.get('folder_id', self.data.get('workspace_id')) + if QuerySet(Knowledge).filter( + workspace_id=self.data.get('workspace_id'), folder_id=folder_id, name=instance.get('name') + ).exists(): + raise AppApiException(500, _('Knowledge base name duplicate!')) + + knowledge_id = uuid.uuid7() + knowledge = Knowledge( + id=knowledge_id, + name=instance.get('name'), + desc=instance.get('desc'), + user_id=self.data.get('user_id'), + type=instance.get('type', KnowledgeType.WORKFLOW), + scope=self.data.get('scope', KnowledgeScope.WORKSPACE), + folder_id=folder_id, + workspace_id=self.data.get('workspace_id'), + embedding_model_id=instance.get('embedding_model_id'), + meta={}, + ) + knowledge.save() + # 自动资源给授权当前用户 + UserResourcePermissionSerializer(data={ + 'workspace_id': self.data.get('workspace_id'), + 'user_id': self.data.get('user_id'), + 'auth_target_type': AuthTargetType.KNOWLEDGE.value + }).auth_resource(str(knowledge_id)) + + knowledge_workflow = KnowledgeWorkflow( + id=uuid.uuid7(), + knowledge_id=knowledge_id, + workspace_id=self.data.get('workspace_id'), + work_flow=instance.get('work_flow', {}), + ) + + knowledge_workflow.save() + + return {**KnowledgeModelSerializer(knowledge).data, 'document_list': []} + + class Operate(serializers.Serializer): + user_id = serializers.UUIDField(required=True, label=_('user id')) + workspace_id = serializers.CharField(required=True, label=_('workspace id')) + knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id')) + + def edit(self, instance: Dict): + pass + + def one(self): + self.is_valid(raise_exception=True) + workflow = QuerySet(KnowledgeWorkflow).filter(knowledge_id=self.data.get('knowledge_id')).first() + return {**KnowledgeWorkflowModelSerializer(workflow).data} + +class McpServersSerializer(serializers.Serializer): + mcp_servers = serializers.JSONField(required=True) + +class KnowledgeWorkflowMcpSerializer(serializers.Serializer): + knowledge_id = serializers.UUIDField(required=True, label=_('knowledge id')) + user_id = serializers.UUIDField(required=True, label=_("User ID")) + workspace_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, label=_("Workspace ID")) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + workspace_id = self.data.get('workspace_id') + query_set = QuerySet(Knowledge).filter(id=self.data.get('knowledge_id')) + if workspace_id: + query_set = query_set.filter(workspace_id=workspace_id) + if not query_set.exists(): + raise AppApiException(500, _('Knowledge id does not exist')) + + def get_mcp_servers(self, instance, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + McpServersSerializer(data=instance).is_valid(raise_exception=True) + servers = json.loads(instance.get('mcp_servers')) + for server, config in servers.items(): + if config.get('transport') not in ['sse', 'streamable_http']: + raise AppApiException(500, _('Only support transport=sse or transport=streamable_http')) + tools = [] + for server in servers: + tools += [ + { + 'server': server, + 'name': tool.name, + 'description': tool.description, + 'args_schema': tool.args_schema, + } + for tool in asyncio.run(get_mcp_tools({server: servers[server]}))] + return tools \ No newline at end of file diff --git a/apps/knowledge/urls.py b/apps/knowledge/urls.py index 3b41be1e9..738ad05fe 100644 --- a/apps/knowledge/urls.py +++ b/apps/knowledge/urls.py @@ -9,12 +9,14 @@ urlpatterns = [ path('workspace/knowledge/document/table_template/export', views.TableTemplate.as_view()), path('workspace//knowledge', views.KnowledgeView.as_view()), path('workspace//knowledge/base', views.KnowledgeBaseView.as_view()), + path('workspace//knowledge/workflow', views.KnowledgeWorkflowView.as_view()), path('workspace//knowledge/web', views.KnowledgeWebView.as_view()), path('workspace//knowledge/model', views.KnowledgeView.Model.as_view()), path('workspace//knowledge/embedding_model', views.KnowledgeView.EmbeddingModel.as_view()), path('workspace//knowledge/tags', views.KnowledgeView.Tags.as_view()), path('workspace//knowledge/', views.KnowledgeView.Operate.as_view()), path('workspace//knowledge//sync', views.KnowledgeView.SyncWeb.as_view()), + path('workspace//knowledge//workfolw', views.KnowledgeWorkflowView.Operate.as_view()), path('workspace//knowledge//generate_related', views.KnowledgeView.GenerateRelated.as_view()), path('workspace//knowledge//embedding', views.KnowledgeView.Embedding.as_view()), path('workspace//knowledge//hit_test', views.KnowledgeView.HitTest.as_view()), @@ -67,5 +69,9 @@ urlpatterns = [ path('workspace//knowledge//problem//', views.ProblemView.Page.as_view()), path('workspace//knowledge//document//', views.DocumentView.Page.as_view()), path('workspace//knowledge//', views.KnowledgeView.Page.as_view()), - + path('workspace//knowledge//datasource///form_list', views.KnowledgeDatasourceFormListView.as_view()), + path('workspace//knowledge//datasource///', views.KnowledgeDatasourceView.as_view()), + path('workspace//knowledge//action', views.KnowledgeWorkflowActionView.as_view()), + path('workspace//knowledge//action/', views.KnowledgeWorkflowActionView.Operate.as_view()), + path('workspace//knowledge//mcp_tools', views.McpServers.as_view()), ] diff --git a/apps/knowledge/vector/base_vector.py b/apps/knowledge/vector/base_vector.py index 2a7731fce..57171189c 100644 --- a/apps/knowledge/vector/base_vector.py +++ b/apps/knowledge/vector/base_vector.py @@ -23,7 +23,7 @@ lock = threading.Lock() def chunk_data(data: Dict): if str(data.get('source_type')) == str(SourceType.PARAGRAPH.value): text = data.get('text') - chunk_list = text_to_chunk(text) + chunk_list = data.get('chunks') if data.get('chunks') else text_to_chunk(text) return [{**data, 'text': chunk} for chunk in chunk_list] return [data] @@ -63,7 +63,8 @@ class BaseVectorStore(ABC): BaseVectorStore.vector_exists = True return True - def save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str, source_id: str, + def save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str, + source_id: str, is_active: bool, embedding: Embeddings): """ @@ -104,7 +105,8 @@ class BaseVectorStore(ABC): break @abstractmethod - def _save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str, source_id: str, + def _save(self, text, source_type: SourceType, knowledge_id: str, document_id: str, paragraph_id: str, + source_id: str, is_active: bool, embedding: Embeddings): pass diff --git a/apps/knowledge/views/__init__.py b/apps/knowledge/views/__init__.py index ed401ad8a..98a57a228 100644 --- a/apps/knowledge/views/__init__.py +++ b/apps/knowledge/views/__init__.py @@ -3,3 +3,4 @@ from .knowledge import * from .paragraph import * from .problem import * from .tag import * +from .knowledge_workflow import * diff --git a/apps/knowledge/views/knowledge_workflow.py b/apps/knowledge/views/knowledge_workflow.py new file mode 100644 index 000000000..20f2c17ea --- /dev/null +++ b/apps/knowledge/views/knowledge_workflow.py @@ -0,0 +1,155 @@ +# coding=utf-8 + +from django.utils.translation import gettext_lazy as _ +from drf_spectacular.utils import extend_schema +from rest_framework.request import Request +from rest_framework.views import APIView + +from application.api.application_api import SpeechToTextAPI +from common.auth import TokenAuth +from common.auth.authentication import has_permissions +from common.constants.permission_constants import PermissionConstants, RoleConstants, ViewPermission, CompareConstants +from common.log.log import log +from common.result import result +from knowledge.api.knowledge_workflow import KnowledgeWorkflowApi +from knowledge.serializers.common import get_knowledge_operation_object +from knowledge.serializers.knowledge_workflow import KnowledgeWorkflowSerializer, KnowledgeWorkflowActionSerializer, \ + KnowledgeWorkflowMcpSerializer + + +class KnowledgeDatasourceFormListView(APIView): + authentication_classes = [TokenAuth] + + def post(self, request: Request, workspace_id: str, knowledge_id: str, type: str, id: str): + return result.success(KnowledgeWorkflowSerializer.Datasource( + data={'type': type, 'id': id, 'params': request.data, 'function_name': 'get_form_list'}).action()) + + +class KnowledgeDatasourceView(APIView): + def post(self, request: Request, workspace_id: str, knowledge_id: str, type: str, id: str, function_name: str): + return result.success(KnowledgeWorkflowSerializer.Datasource( + data={'type': type, 'id': id, 'params': request.data, 'function_name': function_name}).action()) + + +class KnowledgeWorkflowActionView(APIView): + authentication_classes = [TokenAuth] + + def post(self, request: Request, workspace_id: str, knowledge_id: str): + return result.success(KnowledgeWorkflowActionSerializer( + data={'workspace_id': workspace_id, 'knowledge_id': knowledge_id}).action(request.data, True)) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + def get(self, request, workspace_id: str, knowledge_id: str, knowledge_action_id: str): + return result.success(KnowledgeWorkflowActionSerializer.Operate( + data={'workspace_id': workspace_id, 'knowledge_id': knowledge_id, 'id': knowledge_action_id}) + .one()) + + +class KnowledgeWorkflowView(APIView): + authentication_classes = [TokenAuth] + + @extend_schema( + methods=['POST'], + description=_('Create knowledge workflow'), + summary=_('Create knowledge workflow'), + operation_id=_('Create knowledge workflow'), # type: ignore + parameters=KnowledgeWorkflowApi.get_parameters(), + responses=KnowledgeWorkflowApi.get_response(), + tags=[_('Knowledge Base')] # type: ignore + ) + @has_permissions( + PermissionConstants.KNOWLEDGE_CREATE.get_workspace_permission(), + RoleConstants.WORKSPACE_MANAGE.get_workspace_role(), RoleConstants.USER.get_workspace_role() + ) + def post(self, request: Request, workspace_id: str): + return result.success(KnowledgeWorkflowSerializer.Create( + data={'user_id': request.user.id, 'workspace_id': workspace_id} + ).save_workflow(request.data)) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @extend_schema( + methods=['PUT'], + description=_('Edit knowledge workflow'), + summary=_('Edit knowledge workflow'), + operation_id=_('Edit knowledge workflow'), # type: ignore + parameters=KnowledgeWorkflowApi.get_parameters(), + request=KnowledgeWorkflowApi.get_request(), + responses=KnowledgeWorkflowApi.get_response(), + tags=[_('Knowledge Base')] # type: ignore + ) + @has_permissions( + PermissionConstants.KNOWLEDGE_EDIT.get_workspace_knowledge_permission(), + PermissionConstants.KNOWLEDGE_EDIT.get_workspace_permission_workspace_manage_role(), + RoleConstants.WORKSPACE_MANAGE.get_workspace_role(), + ViewPermission( + [RoleConstants.USER.get_workspace_role()], + [PermissionConstants.KNOWLEDGE.get_workspace_knowledge_permission()], + CompareConstants.AND + ) + ) + @log( + menu='Knowledge Base', operate="Modify knowledge workflow", + get_operation_object=lambda r, keywords: get_knowledge_operation_object(keywords.get('knowledge_id')), + ) + def put(self, request: Request, workspace_id: str, knowledge_id: str): + return result.success(KnowledgeWorkflowSerializer.Operate( + data={'user_id': request.user.id, 'workspace_id': workspace_id, 'knowledge_id': knowledge_id} + ).edit(request.data)) + + @extend_schema( + methods=['GET'], + description=_('Get knowledge workflow'), + summary=_('Get knowledge workflow'), + operation_id=_('Get knowledge workflow'), # type: ignore + parameters=KnowledgeWorkflowApi.get_parameters(), + responses=KnowledgeWorkflowApi.get_response(), + tags=[_('Knowledge Base')] # type: ignore + ) + @has_permissions( + PermissionConstants.KNOWLEDGE_READ.get_workspace_knowledge_permission(), + PermissionConstants.KNOWLEDGE_READ.get_workspace_permission_workspace_manage_role(), + RoleConstants.WORKSPACE_MANAGE.get_workspace_role(), + ViewPermission( + [RoleConstants.USER.get_workspace_role()], + [PermissionConstants.KNOWLEDGE.get_workspace_knowledge_permission()], + CompareConstants.AND + ), + ) + def get(self, request: Request, workspace_id: str, knowledge_id: str): + return result.success(KnowledgeWorkflowSerializer.Operate( + data={'user_id': request.user.id, 'workspace_id': workspace_id, 'knowledge_id': knowledge_id} + ).one()) + + +class KnowledgeWorkflowVersionView(APIView): + pass + + +class McpServers(APIView): + authentication_classes = [TokenAuth] + + @extend_schema( + methods=['GET'], + description=_("speech to text"), + summary=_("speech to text"), + operation_id=_("speech to text"), # type: ignore + parameters=SpeechToTextAPI.get_parameters(), + request=SpeechToTextAPI.get_request(), + responses=SpeechToTextAPI.get_response(), + tags=[_('Knowledge Base')] # type: ignore + ) + @has_permissions(PermissionConstants.KNOWLEDGE_READ.get_workspace_application_permission(), + PermissionConstants.KNOWLEDGE_READ.get_workspace_permission_workspace_manage_role(), + ViewPermission([RoleConstants.USER.get_workspace_role()], + [PermissionConstants.KNOWLEDGE.get_workspace_application_permission()], + CompareConstants.AND), + RoleConstants.WORKSPACE_MANAGE.get_workspace_role()) + def post(self, request: Request, workspace_id, knowledge_id: str): + return result.success(KnowledgeWorkflowMcpSerializer( + data={'mcp_servers': request.query_params.get('mcp_servers'), 'workspace_id': workspace_id, + 'user_id': request.user.id, + 'knowledge_id': knowledge_id}).get_mcp_servers(request.data)) diff --git a/apps/maxkb/wsgi/web.py b/apps/maxkb/wsgi/web.py index d1fa687d8..6180f08b6 100644 --- a/apps/maxkb/wsgi/web.py +++ b/apps/maxkb/wsgi/web.py @@ -22,8 +22,8 @@ class TorchBlocker: ['torch'] if i in name.lower()]) > 0: - print(f"Disable package is being imported: 【{name}】", file=sys.stderr) - pass + import types + return types.ModuleType(name) else: return self.original_import(name, *args, **kwargs) diff --git a/apps/tools/migrations/0004_alter_tool_tool_type.py b/apps/tools/migrations/0004_alter_tool_tool_type.py new file mode 100644 index 000000000..e561675b0 --- /dev/null +++ b/apps/tools/migrations/0004_alter_tool_tool_type.py @@ -0,0 +1,18 @@ +# Generated by Django 5.2.8 on 2025-11-17 07:07 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + dependencies = [ + ('tools', '0003_alter_tool_template_id'), + ] + + operations = [ + migrations.AlterField( + model_name='tool', + name='tool_type', + field=models.CharField(choices=[('INTERNAL', '内置'), ('CUSTOM', '自定义'), ('MCP', 'MCP工具'), ('DATA_SOURCE', '数据源')], db_index=True, default='CUSTOM', max_length=20, verbose_name='工具类型'), + ), + ] diff --git a/apps/tools/models/tool.py b/apps/tools/models/tool.py index e58eb14ef..712f2029e 100644 --- a/apps/tools/models/tool.py +++ b/apps/tools/models/tool.py @@ -32,6 +32,7 @@ class ToolType(models.TextChoices): INTERNAL = "INTERNAL", '内置' CUSTOM = "CUSTOM", "自定义" MCP = "MCP", "MCP工具" + DATA_SOURCE = "DATA_SOURCE", "数据源" class Tool(AppModelMixin): diff --git a/apps/tools/serializers/tool.py b/apps/tools/serializers/tool.py index a2e380585..b7be45512 100644 --- a/apps/tools/serializers/tool.py +++ b/apps/tools/serializers/tool.py @@ -607,6 +607,7 @@ class ToolSerializer(serializers.Serializer): workspace_id=self.data.get('workspace_id'), input_field_list=tool.get('input_field_list'), init_field_list=tool.get('init_field_list', []), + tool_type=tool.get('tool_type'), folder_id=folder_id, scope=scope, is_active=False diff --git a/ui/src/api/knowledge/knowledge.ts b/ui/src/api/knowledge/knowledge.ts index 5e0e79ecc..ed345e002 100644 --- a/ui/src/api/knowledge/knowledge.ts +++ b/ui/src/api/knowledge/knowledge.ts @@ -1,7 +1,7 @@ import { Result } from '@/request/Result' import { get, post, del, put, exportFile, exportExcel } from '@/request/index' import { type Ref } from 'vue' -import type { pageRequest } from '@/api/type/common' +import type { Dict, pageRequest } from '@/api/type/common' import type { knowledgeData } from '@/api/type/knowledge' import useStore from '@/stores' @@ -196,6 +196,18 @@ const postKnowledge: (data: knowledgeData, loading?: Ref) => Promise) => Promise> = ( + data, + loading, +) => { + return post(`${prefix.value}/workflow`, data, undefined, loading) +} /** * 获取当前用户可使用的向量化模型列表 (没用到) * @param application_id @@ -250,7 +262,7 @@ const postLarkKnowledge: (data: any, loading?: Ref) => Promise + loading?: Ref, ) => Promise> = (knowledge_id, data, loading) => { return put(`${prefix.value}/lark/${knowledge_id}`, data, undefined, loading) } @@ -262,47 +274,115 @@ const getAllTags: (params: any, loading?: Ref) => Promise> return get(`${prefix.value}/tags`, params, loading) } -const getTags: (knowledge_id: string, params: any, loading?: Ref) => Promise> = ( - knowledge_id, - params, - loading, -) => { +const getTags: ( + knowledge_id: string, + params: any, + loading?: Ref, +) => Promise> = (knowledge_id, params, loading) => { return get(`${prefix.value}/${knowledge_id}/tags`, params, loading) } -const postTags: (knowledge_id: string, tags: any, loading?: Ref) => Promise> = ( - knowledge_id, - tags, - loading, -) => { +const postTags: ( + knowledge_id: string, + tags: any, + loading?: Ref, +) => Promise> = (knowledge_id, tags, loading) => { return post(`${prefix.value}/${knowledge_id}/tags`, tags, null, loading) } -const putTag: (knowledge_id: string, tag_id: string, tag: any, loading?: Ref) => Promise> = ( - knowledge_id, - tag_id, - tag, - loading, -) => { +const putTag: ( + knowledge_id: string, + tag_id: string, + tag: any, + loading?: Ref, +) => Promise> = (knowledge_id, tag_id, tag, loading) => { return put(`${prefix.value}/${knowledge_id}/tags/${tag_id}`, tag, null, loading) } -const delTag: (knowledge_id: string, tag_id: string, type: string, loading?: Ref) => Promise> = ( - knowledge_id, - tag_id, - type, - loading, -) => { +const delTag: ( + knowledge_id: string, + tag_id: string, + type: string, + loading?: Ref, +) => Promise> = (knowledge_id, tag_id, type, loading) => { return del(`${prefix.value}/${knowledge_id}/tags/${tag_id}/${type}`, null, loading) } -const delMulTag: (knowledge_id: string, tags: any, loading?: Ref) => Promise> = ( - knowledge_id, - tags, - loading, -) => { +const delMulTag: ( + knowledge_id: string, + tags: any, + loading?: Ref, +) => Promise> = (knowledge_id, tags, loading) => { return put(`${prefix.value}/${knowledge_id}/tags/batch_delete`, tags, null, loading) } +const getKnowledgeWorkflowFormList: ( + knowledge_id: string, + type: 'loacl' | 'tool', + id: string, + node: any, + loading?: Ref, +) => Promise> = ( + knowledge_id: string, + type: 'loacl' | 'tool', + id: string, + node, + loading, +) => { + return post( + `${prefix.value}/${knowledge_id}/datasource/${type}/${id}/form_list`, + { node }, + {}, + loading, + ) +} +const getKnowledgeWorkflowDatasourceDetails: ( + knowledge_id: string, + type: 'loacl' | 'tool', + id: string, + params: any, + function_name: string, + loading?: Ref, +) => Promise> = ( + knowledge_id: string, + type: 'loacl' | 'tool', + id: string, + params, + function_name, + loading, +) => { + return post( + `${prefix.value}/${knowledge_id}/datasource/${type}/${id}/${function_name}`, + params, + {}, + loading, + ) +} +const workflowAction: ( + knowledge_id: string, + instance: Dict, + loading?: Ref, +) => Promise> = (knowledge_id: string, instance, loading) => { + return post(`${prefix.value}/${knowledge_id}/action`, instance, {}, loading) +} +const getWorkflowAction: ( + knowledge_id: string, + knowledge_action_id: string, + loading?: Ref, +) => Promise> = (knowledge_id: string, knowledge_action_id, loading) => { + return get(`${prefix.value}/${knowledge_id}/action/${knowledge_action_id}`, {}, loading) +} + +/** + * mcp 节点 + */ +const getMcpTools: ( + knowledge_id: string, + mcp_servers: any, + loading?: Ref, +) => Promise> = (knowledge_id, mcp_servers, loading) => { + return post(`${prefix.value}/${knowledge_id}/mcp_tools`, { mcp_servers }, {}, loading) +} + export default { getKnowledgeList, @@ -326,5 +406,11 @@ export default { postTags, putTag, delTag, - delMulTag + delMulTag, + createWorkflowKnowledge, + getKnowledgeWorkflowFormList, + workflowAction, + getWorkflowAction, + getKnowledgeWorkflowDatasourceDetails, + getMcpTools, } diff --git a/ui/src/api/system-shared/knowledge.ts b/ui/src/api/system-shared/knowledge.ts index 2f901f6ce..fe6a4e211 100644 --- a/ui/src/api/system-shared/knowledge.ts +++ b/ui/src/api/system-shared/knowledge.ts @@ -189,6 +189,19 @@ const postKnowledge: (data: knowledgeData, loading?: Ref) => Promise) => Promise> = ( + data, + loading, +) => { + return post(`${prefix}/workflow`, data, undefined, loading) +} + /** * 获取当前用户可使用的向量化模型列表(没用到) * @param application_id @@ -313,6 +326,7 @@ export default { postKnowledge, getKnowledgeModel, postWebKnowledge, + createWorkflowKnowledge, postLarkKnowledge, putLarkKnowledge, getAllTags, diff --git a/ui/src/assets/knowledge/icon_basic_template.svg b/ui/src/assets/knowledge/icon_basic_template.svg new file mode 100644 index 000000000..9ed91c3d8 --- /dev/null +++ b/ui/src/assets/knowledge/icon_basic_template.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/ui/src/assets/knowledge/logo_workflow.svg b/ui/src/assets/knowledge/logo_workflow.svg new file mode 100644 index 000000000..7de1fb422 --- /dev/null +++ b/ui/src/assets/knowledge/logo_workflow.svg @@ -0,0 +1,7 @@ + + + + + + + diff --git a/ui/src/assets/tool/icon_datasource.svg b/ui/src/assets/tool/icon_datasource.svg new file mode 100644 index 000000000..88862f01d --- /dev/null +++ b/ui/src/assets/tool/icon_datasource.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/workflow/icon_mcp.svg b/ui/src/assets/tool/icon_mcp.svg similarity index 100% rename from ui/src/assets/workflow/icon_mcp.svg rename to ui/src/assets/tool/icon_mcp.svg diff --git a/ui/src/assets/workflow/icon_tool.svg b/ui/src/assets/tool/icon_tool.svg similarity index 100% rename from ui/src/assets/workflow/icon_tool.svg rename to ui/src/assets/tool/icon_tool.svg diff --git a/ui/src/assets/workflow/icon_tool_custom.svg b/ui/src/assets/tool/icon_tool_custom.svg similarity index 100% rename from ui/src/assets/workflow/icon_tool_custom.svg rename to ui/src/assets/tool/icon_tool_custom.svg diff --git a/ui/src/assets/icon_tool_shop.svg b/ui/src/assets/tool/icon_tool_shop.svg similarity index 100% rename from ui/src/assets/icon_tool_shop.svg rename to ui/src/assets/tool/icon_tool_shop.svg diff --git a/ui/src/assets/workflow/icon_data-source-local.svg b/ui/src/assets/workflow/icon_data-source-local.svg new file mode 100644 index 000000000..9ec61b967 --- /dev/null +++ b/ui/src/assets/workflow/icon_data-source-local.svg @@ -0,0 +1,9 @@ + + + + + + + + + diff --git a/ui/src/assets/workflow/icon_document-split.svg b/ui/src/assets/workflow/icon_document-split.svg new file mode 100644 index 000000000..86fa913e1 --- /dev/null +++ b/ui/src/assets/workflow/icon_document-split.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/workflow/icon_knowledge-write.svg b/ui/src/assets/workflow/icon_knowledge-write.svg new file mode 100644 index 000000000..da1dea616 --- /dev/null +++ b/ui/src/assets/workflow/icon_knowledge-write.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/ui/src/assets/workflow/icon_knowledge_write.svg b/ui/src/assets/workflow/icon_knowledge_write.svg new file mode 100644 index 000000000..755e027c1 --- /dev/null +++ b/ui/src/assets/workflow/icon_knowledge_write.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/ui/src/components/ai-chat/component/knowledge-source-component/ExecutionDetailCard.vue b/ui/src/components/ai-chat/component/knowledge-source-component/ExecutionDetailCard.vue index 2b6e4af57..e96e687d8 100644 --- a/ui/src/components/ai-chat/component/knowledge-source-component/ExecutionDetailCard.vue +++ b/ui/src/components/ai-chat/component/knowledge-source-component/ExecutionDetailCard.vue @@ -27,10 +27,15 @@ " >{{ data?.message_tokens + data?.answer_tokens }} tokens - {{ data?.run_time?.toFixed(2) || 0.0 }} s + {{ data?.run_time?.toFixed(2) || 0.0 }} s + + + @@ -224,7 +229,7 @@
- {{ $t('views.applicationWorkflow.nodes.aiChatNode.think') }} + {{ $t('views.workflow.nodes.aiChatNode.think') }}
{{ data.reasoning_content || '-' }} @@ -666,7 +671,7 @@
- {{ $t('views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label') }} + {{ $t('views.workflow.nodes.imageGenerateNode.negative_prompt.label') }}
{{ data.negative_prompt || '-' }} @@ -704,7 +709,7 @@
- {{ $t('views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label') }} + {{ $t('views.workflow.nodes.imageGenerateNode.negative_prompt.label') }}
{{ data.negative_prompt || '-' }} @@ -743,7 +748,7 @@
- {{ $t('views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label') }} + {{ $t('views.workflow.nodes.imageGenerateNode.negative_prompt.label') }}
{{ data.negative_prompt || '-' }} @@ -751,7 +756,7 @@
- {{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.label') }} + {{ $t('views.workflow.nodes.imageToVideoGenerate.first_frame.label') }}
@@ -780,7 +785,7 @@
- {{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.label') }} + {{ $t('views.workflow.nodes.imageToVideoGenerate.last_frame.label') }}
@@ -884,7 +889,7 @@ + diff --git a/ui/src/components/dynamics-form/items/upload/LocalFileUpload.vue b/ui/src/components/dynamics-form/items/upload/LocalFileUpload.vue new file mode 100644 index 000000000..767a97178 --- /dev/null +++ b/ui/src/components/dynamics-form/items/upload/LocalFileUpload.vue @@ -0,0 +1,152 @@ + + + diff --git a/ui/src/components/dynamics-form/type.ts b/ui/src/components/dynamics-form/type.ts index 88a3af373..03e98a051 100644 --- a/ui/src/components/dynamics-form/type.ts +++ b/ui/src/components/dynamics-form/type.ts @@ -173,5 +173,6 @@ interface FormField { children?: Array required_asterisk?: boolean + [propName: string]: any } export type { FormField } diff --git a/ui/src/components/markdown/ReasoningRander.vue b/ui/src/components/markdown/ReasoningRander.vue index 128fa51db..d51966634 100644 --- a/ui/src/components/markdown/ReasoningRander.vue +++ b/ui/src/components/markdown/ReasoningRander.vue @@ -1,7 +1,7 @@ + + diff --git a/ui/src/views/model/component/CreateModelDialog.vue b/ui/src/views/model/component/CreateModelDialog.vue index 4f28c2487..1c25b1d21 100644 --- a/ui/src/views/model/component/CreateModelDialog.vue +++ b/ui/src/views/model/component/CreateModelDialog.vue @@ -239,7 +239,6 @@ import type { FormField } from '@/components/dynamics-form/type' import DynamicsForm from '@/components/dynamics-form/index.vue' import type { FormRules } from 'element-plus' import { MsgError, MsgSuccess, MsgWarning } from '@/utils/message' -import { PermissionType, PermissionDesc } from '@/enums/model' import { input_type_list } from '@/components/dynamics-form/constructor/data' import AddParamDrawer from '@/views/model/component/AddParamDrawer.vue' import { t } from '@/locales' diff --git a/ui/src/views/problem/index.vue b/ui/src/views/problem/index.vue index 4fca4c31f..b9ad53946 100644 --- a/ui/src/views/problem/index.vue +++ b/ui/src/views/problem/index.vue @@ -31,7 +31,7 @@ - {{ $t('views.application.status.published') }} + {{ $t('common.status.published') }}
- {{ $t('views.application.status.unpublished') }} + {{ $t('common.status.unpublished') }}
@@ -462,11 +462,11 @@ const statusVisible = ref(false) const statusArr = ref([]) const statusOptions = ref([ { - label: t('views.application.status.published'), + label: t('common.status.published'), value: true, }, { - label: t('views.application.status.unpublished'), + label: t('common.status.unpublished'), value: false, }, ]) diff --git a/ui/src/views/system-resource-management/ToolResourceIndex.vue b/ui/src/views/system-resource-management/ToolResourceIndex.vue index 2983d41d2..cadc8ada5 100644 --- a/ui/src/views/system-resource-management/ToolResourceIndex.vue +++ b/ui/src/views/system-resource-management/ToolResourceIndex.vue @@ -82,6 +82,7 @@ diff --git a/ui/src/views/tool/DataSourceToolFormDrawer.vue b/ui/src/views/tool/DataSourceToolFormDrawer.vue new file mode 100644 index 000000000..563651e29 --- /dev/null +++ b/ui/src/views/tool/DataSourceToolFormDrawer.vue @@ -0,0 +1,542 @@ + + + + diff --git a/ui/src/views/tool/McpToolFormDrawer.vue b/ui/src/views/tool/McpToolFormDrawer.vue index c05d33422..8dfd1b902 100644 --- a/ui/src/views/tool/McpToolFormDrawer.vue +++ b/ui/src/views/tool/McpToolFormDrawer.vue @@ -36,7 +36,7 @@ - + - + { throw new Error('Code must be a valid JSON object') } } catch (e) { - MsgError(t('views.applicationWorkflow.nodes.mcpNode.mcpServerTip')) + MsgError(t('views.workflow.nodes.mcpNode.mcpServerTip')) return } loading.value = true diff --git a/ui/src/views/tool/ToolFormDrawer.vue b/ui/src/views/tool/ToolFormDrawer.vue index 43c3db06a..1ef90dbe4 100644 --- a/ui/src/views/tool/ToolFormDrawer.vue +++ b/ui/src/views/tool/ToolFormDrawer.vue @@ -34,7 +34,7 @@ - +
- +
- +
{{ $t('views.tool.createTool') }}
@@ -64,13 +64,24 @@
- +
{{ $t('views.tool.createMcpTool') }}
+ + +
+ + + +
+
{{ $t('views.tool.dataSource.createDataSource') }}
+
+
+
-
@@ -200,7 +199,7 @@ {{ t('views.shared.title') }} - + + @@ -378,6 +382,7 @@ import { useRoute, onBeforeRouteLeave } from 'vue-router' import InitParamDrawer from '@/views/tool/component/InitParamDrawer.vue' import ToolFormDrawer from '@/views/tool/ToolFormDrawer.vue' import McpToolFormDrawer from '@/views/tool/McpToolFormDrawer.vue' +import DataSourceToolFormDrawer from '@/views/tool/DataSourceToolFormDrawer.vue' import CreateFolderDialog from '@/components/folder-tree/CreateFolderDialog.vue' import AuthorizedWorkspace from '@/views/system-shared/AuthorizedWorkspaceDialog.vue' import ToolStoreDialog from '@/views/tool/toolStore/ToolStoreDialog.vue' @@ -460,8 +465,10 @@ const search_type_change = () => { } const ToolFormDrawerRef = ref() const McpToolFormDrawerRef = ref() +const DataSourceToolFormDrawerRef = ref() const ToolDrawertitle = ref('') const McpToolDrawertitle = ref('') +const DataSourceToolDrawertitle = ref('') const MoveToDialogRef = ref() function openMoveToDialog(data: any) { @@ -494,6 +501,12 @@ function openCreateDialog(data?: any) { openCreateMcpDialog(data) return } + // 数据源工具 + if (data?.tool_type === 'DATA_SOURCE') { + bus.emit('select_node', data.folder_id) + openCreateDataSourceDialog(data) + return + } // 有版本号的展示readme,是商店更新过来的 if (data?.version) { let readMe = '' @@ -551,6 +564,29 @@ function openCreateMcpDialog(data?: any) { } } +function openCreateDataSourceDialog(data?: any) { + // 有template_id的不允许编辑,是模板转换来的 + if (data?.template_id) { + return + } + // 共享过来的工具不让编辑 + if (isShared.value) { + return + } + DataSourceToolDrawertitle.value = data + ? t('views.tool.dataSource.editDataSource') + : t('views.tool.dataSource.createDataSource') + if (data) { + loadSharedApi({ type: 'tool', systemType: apiType.value }) + .getToolById(data?.id, loading) + .then((res: any) => { + DataSourceToolFormDrawerRef.value.open(res.data) + }) + } else { + DataSourceToolFormDrawerRef.value.open(data) + } +} + async function changeState(row: any) { if (row.is_active) { MsgConfirm( @@ -611,6 +647,18 @@ async function changeState(row: any) { } async function copyTool(row: any) { + // mcp工具 + if (row?.tool_type === 'MCP') { + bus.emit('select_node', row.folder_id) + await copyMcpTool(row) + return + } + // 数据源工具 + if (row?.tool_type === 'DATA_SOURCE') { + bus.emit('select_node', row.folder_id) + await copyDataSource(row) + return + } ToolDrawertitle.value = t('views.tool.copyTool') const res = await loadSharedApi({ type: 'tool', systemType: apiType.value }).getToolById( row.id, @@ -622,6 +670,30 @@ async function copyTool(row: any) { ToolFormDrawerRef.value.open(obj) } +async function copyMcpTool(row: any) { + McpToolDrawertitle.value = t('views.tool.copyMcpTool') + const res = await loadSharedApi({ type: 'tool', systemType: apiType.value }).getToolById( + row.id, + changeStateloading, + ) + const obj = cloneDeep(res.data) + delete obj['id'] + obj['name'] = obj['name'] + ` ${t('common.copyTitle')}` + McpToolFormDrawerRef.value.open(obj) +} + +async function copyDataSource(row: any) { + DataSourceToolDrawertitle.value = t('views.tool.dataSource.copyDataSource') + const res = await loadSharedApi({ type: 'tool', systemType: apiType.value }).getToolById( + row.id, + changeStateloading, + ) + const obj = cloneDeep(res.data) + delete obj['id'] + obj['name'] = obj['name'] + ` ${t('common.copyTitle')}` + DataSourceToolFormDrawerRef.value.open(obj) +} + function exportTool(row: any) { loadSharedApi({ type: 'tool', systemType: apiType.value }) .exportTool(row.id, row.name, loading) diff --git a/ui/src/views/tool/index.vue b/ui/src/views/tool/index.vue index 050785870..5252f3c56 100644 --- a/ui/src/views/tool/index.vue +++ b/ui/src/views/tool/index.vue @@ -26,6 +26,7 @@ {{ $t('views.tool.all') }} {{ $t('views.tool.title') }} MCP + {{ $t('views.tool.dataSource.title') }} diff --git a/ui/src/views/tool/toolStore/ToolStoreDialog.vue b/ui/src/views/tool/toolStore/ToolStoreDialog.vue index 6398f9d5e..940d9a174 100644 --- a/ui/src/views/tool/toolStore/ToolStoreDialog.vue +++ b/ui/src/views/tool/toolStore/ToolStoreDialog.vue @@ -13,12 +13,6 @@

{{ $t('views.tool.toolStore.title') }}

- - {{ - $t('views.tool.toolStore.internal') - }} - {{ $t('views.tool.toolStore.title') }} -
([ - // 第一版不上 - // { - // id: 'recommend', - // title: t('views.tool.toolStore.recommend'), - // tools: [] - // }, { id: 'web_search', title: t('views.tool.toolStore.webSearch'), @@ -145,32 +132,11 @@ const defaultCategories = ref([ title: t('views.tool.toolStore.databaseQuery'), tools: [], }, - // { - // id: 'image', - // title: t('views.tool.toolStore.image'), - // tools: [] - // }, - // { - // id: 'developer', - // title: t('views.tool.toolStore.developer'), - // tools: [] - // }, - // { - // id: 'communication', - // title: t('views.tool.toolStore.communication'), - // tools: [] - // } ]) const categories = ref([...defaultCategories.value]) const filterList = ref(null) -watch(dialogVisible, (bool) => { - if (!bool) { - toolType.value = 'APPSTORE' - } -}) - function getSubTitle(tool: any) { return categories.value.find((i) => i.id === tool.label)?.title ?? '' } @@ -184,22 +150,34 @@ function open(id: string) { } async function getList() { - if (toolType.value === 'INTERNAL') { - await getInternalToolList() - } else { - await getStoreToolList() - } + filterList.value = null + const [v1, v2] = await Promise.all([ + getInternalToolList(), + getStoreToolList() + ]) + + const merged = [...v1, ...v2].reduce((acc, category) => { + const existing = acc.find((item: any) => item.id === category.id) + if (existing) { + existing.tools = [...existing.tools, ...category.tools] + } else { + acc.push({...category}) + } + return acc + }, [] as ToolCategory[]) + + categories.value = merged } async function getInternalToolList() { try { - categories.value = defaultCategories.value + const categories = defaultCategories.value const res = await ToolStoreApi.getInternalToolList({ name: searchValue.value }, loading) if (searchValue.value.length) { - filterList.value = res.data + filterList.value = [...res.data, ...filterList.value || []] } else { filterList.value = null - categories.value.forEach((category) => { + categories.forEach((category) => { // if (category.id === 'recommend') { // category.tools = res.data // } else { @@ -207,8 +185,10 @@ async function getInternalToolList() { // } }) } + return categories } catch (error) { console.error(error) + return [] } } @@ -217,22 +197,25 @@ async function getStoreToolList() { const res = await ToolStoreApi.getStoreToolList({ name: searchValue.value }, loading) const tags = res.data.additionalProperties.tags const storeTools = res.data.apps + let categories = [] // storeTools.forEach((tool: any) => { tool.desc = tool.description }) if (searchValue.value.length) { - filterList.value = res.data.apps + filterList.value = [...res.data.apps, ...filterList.value || []] } else { filterList.value = null - categories.value = tags.map((tag: any) => ({ + categories = tags.map((tag: any) => ({ id: tag.key, title: tag.name, // 国际化 tools: storeTools.filter((tool: any) => tool.label === tag.key), })) } + return categories } catch (error) { console.error(error) + return [] } } @@ -242,7 +225,8 @@ const handleClick = (e: MouseEvent) => { const internalDescDrawerRef = ref>() async function handleDetail(tool: any) { - if (toolType.value === 'INTERNAL') { + console.log(tool) + if (tool.tool_type === 'INTERNAL') { const index = tool.icon.replace('icon.png', 'detail.md') const response = await fetch(index) const content = await response.text() @@ -259,7 +243,7 @@ function handleOpenAdd(data?: any, isEdit?: boolean) { const addLoading = ref(false) async function handleAdd(tool: any) { - if (toolType.value === 'INTERNAL') { + if (tool.tool_type === 'INTERNAL') { await handleInternalAdd(tool) } else { await handleStoreAdd(tool) diff --git a/ui/src/workflow/common/NodeCascader.vue b/ui/src/workflow/common/NodeCascader.vue index dbfd64a31..32dfc1dd8 100644 --- a/ui/src/workflow/common/NodeCascader.vue +++ b/ui/src/workflow/common/NodeCascader.vue @@ -62,21 +62,21 @@ function visibleChange(bool: boolean) { const validate = () => { const incomingNodeValue = getOptionsValue() if (!data.value || data.value.length === 0) { - return Promise.reject(t('views.applicationWorkflow.variable.ReferencingRequired')) + return Promise.reject(t('views.workflow.variable.ReferencingRequired')) } if (data.value.length < 2) { - return Promise.reject(t('views.applicationWorkflow.variable.ReferencingError')) + return Promise.reject(t('views.workflow.variable.ReferencingError')) } const node_id = data.value[0] const node_field = data.value[1] const nodeParent = incomingNodeValue.find((item: any) => item.value === node_id) if (!nodeParent) { data.value = [] - return Promise.reject(t('views.applicationWorkflow.variable.NoReferencing')) + return Promise.reject(t('views.workflow.variable.NoReferencing')) } if (!nodeParent.children.some((item: any) => item.value === node_field)) { data.value = [] - return Promise.reject(t('views.applicationWorkflow.variable.NoReferencing')) + return Promise.reject(t('views.workflow.variable.NoReferencing')) } return Promise.resolve('') } @@ -93,7 +93,7 @@ const get_up_node_field_list = (contain_self: boolean, use_cache: boolean) => { return result.filter((v: any) => v.children && v.children.length > 0) } const getOptionsValue = () => { - if (workflowMode == WorkflowMode.ApplicationLoop) { + if ([WorkflowMode.ApplicationLoop, WorkflowMode.KnowledgeLoop].includes(workflowMode)) { return props.global ? get_up_node_field_list(false, true).filter( (v: any) => @@ -101,7 +101,7 @@ const getOptionsValue = () => { ) : get_up_node_field_list(false, true).filter((v: any) => v.children && v.children.length > 0) } else { - return props.global + const result = props.global ? props.nodeModel .get_up_node_field_list(false, true) .filter( @@ -110,6 +110,7 @@ const getOptionsValue = () => { : props.nodeModel .get_up_node_field_list(false, true) .filter((v: any) => v.children && v.children.length > 0) + return result } } const initOptions = () => { diff --git a/ui/src/workflow/common/NodeContainer.vue b/ui/src/workflow/common/NodeContainer.vue index e62d1ee4b..81a866318 100644 --- a/ui/src/workflow/common/NodeContainer.vue +++ b/ui/src/workflow/common/NodeContainer.vue @@ -42,17 +42,14 @@ @@ -84,8 +81,8 @@ class="mb-16" :title=" props.nodeModel.type === 'application-node' - ? $t('views.applicationWorkflow.tip.applicationNodeError') - : $t('views.applicationWorkflow.tip.toolNodeError') + ? $t('views.workflow.tip.applicationNodeError') + : $t('views.workflow.tip.toolNodeError') " type="error" show-icon @@ -105,7 +102,7 @@ {{ item.label }} {{ '{' + item.value + '}' }} @@ -136,7 +133,7 @@ diff --git a/ui/src/workflow/icons/data-source-web-node-icon.vue b/ui/src/workflow/icons/data-source-web-node-icon.vue new file mode 100644 index 000000000..db6c243f8 --- /dev/null +++ b/ui/src/workflow/icons/data-source-web-node-icon.vue @@ -0,0 +1,6 @@ + + diff --git a/ui/src/workflow/icons/document-split-node-icon.vue b/ui/src/workflow/icons/document-split-node-icon.vue new file mode 100644 index 000000000..d945ad86a --- /dev/null +++ b/ui/src/workflow/icons/document-split-node-icon.vue @@ -0,0 +1,6 @@ + + diff --git a/ui/src/workflow/icons/knowledge-base-node-icon.vue b/ui/src/workflow/icons/knowledge-base-node-icon.vue new file mode 100644 index 000000000..9165d9e30 --- /dev/null +++ b/ui/src/workflow/icons/knowledge-base-node-icon.vue @@ -0,0 +1,6 @@ + + diff --git a/ui/src/workflow/icons/knowledge-write-node-icon.vue b/ui/src/workflow/icons/knowledge-write-node-icon.vue new file mode 100644 index 000000000..9db374db1 --- /dev/null +++ b/ui/src/workflow/icons/knowledge-write-node-icon.vue @@ -0,0 +1,6 @@ + + diff --git a/ui/src/workflow/icons/mcp-node-icon.vue b/ui/src/workflow/icons/mcp-node-icon.vue index 625de8f67..1385569c5 100644 --- a/ui/src/workflow/icons/mcp-node-icon.vue +++ b/ui/src/workflow/icons/mcp-node-icon.vue @@ -1,6 +1,6 @@ diff --git a/ui/src/workflow/icons/tool-lib-node-icon.vue b/ui/src/workflow/icons/tool-lib-node-icon.vue index fc88799ba..c52d8ec8f 100644 --- a/ui/src/workflow/icons/tool-lib-node-icon.vue +++ b/ui/src/workflow/icons/tool-lib-node-icon.vue @@ -9,7 +9,7 @@ - + diff --git a/ui/src/workflow/index.vue b/ui/src/workflow/index.vue index b7f7f55b1..bd8d4007e 100644 --- a/ui/src/workflow/index.vue +++ b/ui/src/workflow/index.vue @@ -6,7 +6,7 @@ + + diff --git a/ui/src/workflow/nodes/data-source-web-node/index.ts b/ui/src/workflow/nodes/data-source-web-node/index.ts new file mode 100644 index 000000000..0bc7c5283 --- /dev/null +++ b/ui/src/workflow/nodes/data-source-web-node/index.ts @@ -0,0 +1,12 @@ +import DataSourceWebNodeVue from './index.vue' +import { AppNode, AppNodeModel } from '@/workflow/common/app-node' +class DataSourceWebNode extends AppNode { + constructor(props: any) { + super(props, DataSourceWebNodeVue) + } +} +export default { + type: 'data-source-web-node', + model: AppNodeModel, + view: DataSourceWebNode, +} diff --git a/ui/src/workflow/nodes/data-source-web-node/index.vue b/ui/src/workflow/nodes/data-source-web-node/index.vue new file mode 100644 index 000000000..64a223906 --- /dev/null +++ b/ui/src/workflow/nodes/data-source-web-node/index.vue @@ -0,0 +1,36 @@ + + + + + diff --git a/ui/src/workflow/nodes/document-extract-node/index.vue b/ui/src/workflow/nodes/document-extract-node/index.vue index b92d6f469..003560360 100644 --- a/ui/src/workflow/nodes/document-extract-node/index.vue +++ b/ui/src/workflow/nodes/document-extract-node/index.vue @@ -1,6 +1,6 @@ + + + + diff --git a/ui/src/workflow/nodes/form-node/index.vue b/ui/src/workflow/nodes/form-node/index.vue index f02ac149e..7c4978d6c 100644 --- a/ui/src/workflow/nodes/form-node/index.vue +++ b/ui/src/workflow/nodes/form-node/index.vue @@ -1,6 +1,6 @@ - + - + + diff --git a/ui/src/workflow/nodes/knowledge-base-node/component/UserInputFieldTable.vue b/ui/src/workflow/nodes/knowledge-base-node/component/UserInputFieldTable.vue new file mode 100644 index 000000000..455e92c64 --- /dev/null +++ b/ui/src/workflow/nodes/knowledge-base-node/component/UserInputFieldTable.vue @@ -0,0 +1,218 @@ + + + + + diff --git a/ui/src/workflow/nodes/knowledge-base-node/component/UserInputTitleDialog.vue b/ui/src/workflow/nodes/knowledge-base-node/component/UserInputTitleDialog.vue new file mode 100644 index 000000000..08c2f48c3 --- /dev/null +++ b/ui/src/workflow/nodes/knowledge-base-node/component/UserInputTitleDialog.vue @@ -0,0 +1,83 @@ + + + diff --git a/ui/src/workflow/nodes/knowledge-base-node/index.ts b/ui/src/workflow/nodes/knowledge-base-node/index.ts new file mode 100644 index 000000000..c51b52ea0 --- /dev/null +++ b/ui/src/workflow/nodes/knowledge-base-node/index.ts @@ -0,0 +1,22 @@ +import BaseNodeVue from './index.vue' +import { AppNode, AppNodeModel } from '@/workflow/common/app-node' + +class BaseNode extends AppNode { + constructor(props: any) { + super(props, BaseNodeVue) + } +} + +class BaseModel extends AppNodeModel { + constructor(data: any, graphModel: any) { + super(data, graphModel) + } + get_width() { + return 600 + } +} +export default { + type: 'knowledge-base-node', + model: BaseModel, + view: BaseNode, +} diff --git a/ui/src/workflow/nodes/knowledge-base-node/index.vue b/ui/src/workflow/nodes/knowledge-base-node/index.vue new file mode 100644 index 000000000..16ebd572b --- /dev/null +++ b/ui/src/workflow/nodes/knowledge-base-node/index.vue @@ -0,0 +1,66 @@ + + + diff --git a/ui/src/workflow/nodes/knowledge-write-node/index.ts b/ui/src/workflow/nodes/knowledge-write-node/index.ts new file mode 100644 index 000000000..0318f1c19 --- /dev/null +++ b/ui/src/workflow/nodes/knowledge-write-node/index.ts @@ -0,0 +1,15 @@ +import KnowledgeWriteVue from './index.vue' +import { AppNode, AppNodeModel } from '@/workflow/common/app-node' + + +class KnowledgeWriteNode extends AppNode { + constructor(props: any) { + super(props, KnowledgeWriteVue) + } +} + +export default { + type: 'knowledge-write-node', + model: AppNodeModel, + view: KnowledgeWriteNode, +} \ No newline at end of file diff --git a/ui/src/workflow/nodes/knowledge-write-node/index.vue b/ui/src/workflow/nodes/knowledge-write-node/index.vue new file mode 100644 index 000000000..12be70eaf --- /dev/null +++ b/ui/src/workflow/nodes/knowledge-write-node/index.vue @@ -0,0 +1,73 @@ + + + + + diff --git a/ui/src/workflow/nodes/loop-body-node/LoopBodyContainer.vue b/ui/src/workflow/nodes/loop-body-node/LoopBodyContainer.vue index 808344e04..2d37515dd 100644 --- a/ui/src/workflow/nodes/loop-body-node/LoopBodyContainer.vue +++ b/ui/src/workflow/nodes/loop-body-node/LoopBodyContainer.vue @@ -34,8 +34,8 @@ class="mb-16" :title=" props.nodeModel.type === 'application-node' - ? $t('views.applicationWorkflow.tip.applicationNodeError') - : $t('views.applicationWorkflow.tip.functionNodeError') + ? $t('views.workflow.tip.applicationNodeError') + : $t('views.workflow.tip.functionNodeError') " type="error" show-icon @@ -58,7 +58,7 @@ {{ item.label }} {{ '{' + item.value + '}' }} @@ -75,7 +75,7 @@