mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-28 23:32:48 +00:00
feat: add video understanding node and related components
This commit is contained in:
parent
80c790bc8c
commit
622780e77d
|
|
@ -32,6 +32,7 @@ from .tool_lib_node import *
|
|||
from .tool_node import *
|
||||
from .variable_assign_node import BaseVariableAssignNode
|
||||
from .variable_splitting_node import BaseVariableSplittingNode
|
||||
from .video_understand_step_node import BaseVideoUnderstandNode
|
||||
|
||||
node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseQuestionNode,
|
||||
BaseConditionNode, BaseReplyNode,
|
||||
|
|
@ -39,6 +40,7 @@ node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseQuest
|
|||
BaseDocumentExtractNode,
|
||||
BaseImageUnderstandNode, BaseFormNode, BaseSpeechToTextNode, BaseTextToSpeechNode,
|
||||
BaseImageGenerateNode, BaseVariableAssignNode, BaseMcpNode, BaseTextToVideoNode, BaseImageToVideoNode,
|
||||
BaseVideoUnderstandNode,
|
||||
BaseIntentNode, BaseLoopNode, BaseLoopStartStepNode,
|
||||
BaseLoopContinueNode,
|
||||
BaseLoopBreakNode, BaseVariableSplittingNode]
|
||||
|
|
|
|||
|
|
@ -169,13 +169,16 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
|
|||
# 处理多张图片
|
||||
images = []
|
||||
for img in image:
|
||||
file_id = img['file_id']
|
||||
file = QuerySet(File).filter(id=file_id).first()
|
||||
image_bytes = file.get_bytes()
|
||||
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
||||
image_format = what(None, image_bytes)
|
||||
images.append(
|
||||
{'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
|
||||
if isinstance(img, str) and img.startswith('http'):
|
||||
images.append({'type': 'image_url', 'image_url': {'url': img}})
|
||||
else:
|
||||
file_id = img['file_id']
|
||||
file = QuerySet(File).filter(id=file_id).first()
|
||||
image_bytes = file.get_bytes()
|
||||
base64_image = base64.b64encode(image_bytes).decode("utf-8")
|
||||
image_format = what(None, image_bytes)
|
||||
images.append(
|
||||
{'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
|
||||
messages = [HumanMessage(
|
||||
content=[
|
||||
{'type': 'text', 'text': self.workflow_manage.generate_prompt(prompt)},
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ class BaseStartStepNode(IStarNode):
|
|||
self.context['document'] = details.get('document_list')
|
||||
self.context['image'] = details.get('image_list')
|
||||
self.context['audio'] = details.get('audio_list')
|
||||
self.context['video'] = details.get('video_list')
|
||||
self.context['other'] = details.get('other_list')
|
||||
self.status = details.get('status')
|
||||
self.err_message = details.get('err_message')
|
||||
|
|
@ -73,6 +74,7 @@ class BaseStartStepNode(IStarNode):
|
|||
'image': self.workflow_manage.image_list,
|
||||
'document': self.workflow_manage.document_list,
|
||||
'audio': self.workflow_manage.audio_list,
|
||||
'video': self.workflow_manage.video_list,
|
||||
'other': self.workflow_manage.other_list,
|
||||
|
||||
}
|
||||
|
|
@ -97,6 +99,7 @@ class BaseStartStepNode(IStarNode):
|
|||
'status': self.status,
|
||||
'err_message': self.err_message,
|
||||
'image_list': self.context.get('image'),
|
||||
'video_list': self.context.get('video'),
|
||||
'document_list': self.context.get('document'),
|
||||
'audio_list': self.context.get('audio'),
|
||||
'other_list': self.context.get('other'),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .impl import *
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Type
|
||||
|
||||
from rest_framework import serializers
|
||||
|
||||
from application.flow.i_step_node import INode, NodeResult
|
||||
|
||||
from django.utils.translation import gettext_lazy as _
|
||||
|
||||
|
||||
class VideoUnderstandNodeSerializer(serializers.Serializer):
|
||||
model_id = serializers.CharField(required=True, label=_("Model id"))
|
||||
system = serializers.CharField(required=False, allow_blank=True, allow_null=True,
|
||||
label=_("Role Setting"))
|
||||
prompt = serializers.CharField(required=True, label=_("Prompt word"))
|
||||
# 多轮对话数量
|
||||
dialogue_number = serializers.IntegerField(required=True, label=_("Number of multi-round conversations"))
|
||||
|
||||
dialogue_type = serializers.CharField(required=True, label=_("Conversation storage type"))
|
||||
|
||||
is_result = serializers.BooleanField(required=False,
|
||||
label=_('Whether to return content'))
|
||||
|
||||
video_list = serializers.ListField(required=False, label=_("video"))
|
||||
|
||||
model_params_setting = serializers.JSONField(required=False, default=dict,
|
||||
label=_("Model parameter settings"))
|
||||
|
||||
|
||||
class IVideoUnderstandNode(INode):
|
||||
type = 'video-understand-node'
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
return VideoUnderstandNodeSerializer
|
||||
|
||||
def _run(self):
|
||||
res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('video_list')[0],
|
||||
self.node_params_serializer.data.get('video_list')[1:])
|
||||
return self.execute(video=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
video,
|
||||
**kwargs) -> NodeResult:
|
||||
pass
|
||||
|
|
@ -0,0 +1,3 @@
|
|||
# coding=utf-8
|
||||
|
||||
from .base_video_understand_node import BaseVideoUnderstandNode
|
||||
|
|
@ -0,0 +1,229 @@
|
|||
# coding=utf-8
|
||||
import base64
|
||||
import time
|
||||
from functools import reduce
|
||||
from imghdr import what
|
||||
from typing import List, Dict
|
||||
|
||||
from django.db.models import QuerySet
|
||||
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
|
||||
|
||||
from application.flow.i_step_node import NodeResult, INode
|
||||
from application.flow.step_node.video_understand_step_node.i_video_understand_node import IVideoUnderstandNode
|
||||
from knowledge.models import File
|
||||
from models_provider.tools import get_model_instance_by_model_workspace_id
|
||||
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
|
||||
chat_model = node_variable.get('chat_model')
|
||||
message_tokens = node_variable['usage_metadata']['output_tokens'] if 'usage_metadata' in node_variable else 0
|
||||
answer_tokens = chat_model.get_num_tokens(answer)
|
||||
node.context['message_tokens'] = message_tokens
|
||||
node.context['answer_tokens'] = answer_tokens
|
||||
node.context['answer'] = answer
|
||||
node.context['history_message'] = node_variable['history_message']
|
||||
node.context['question'] = node_variable['question']
|
||||
node.context['run_time'] = time.time() - node.context['start_time']
|
||||
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
|
||||
node.answer_text = answer
|
||||
|
||||
|
||||
def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||
"""
|
||||
写入上下文数据 (流式)
|
||||
@param node_variable: 节点数据
|
||||
@param workflow_variable: 全局数据
|
||||
@param node: 节点
|
||||
@param workflow: 工作流管理器
|
||||
"""
|
||||
response = node_variable.get('result')
|
||||
answer = ''
|
||||
for chunk in response:
|
||||
answer += chunk.content
|
||||
yield chunk.content
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
||||
|
||||
|
||||
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
|
||||
"""
|
||||
写入上下文数据
|
||||
@param node_variable: 节点数据
|
||||
@param workflow_variable: 全局数据
|
||||
@param node: 节点实例对象
|
||||
@param workflow: 工作流管理器
|
||||
"""
|
||||
response = node_variable.get('result')
|
||||
answer = response.content
|
||||
_write_context(node_variable, workflow_variable, node, workflow, answer)
|
||||
|
||||
|
||||
def file_id_to_base64(file_id: str):
|
||||
file = QuerySet(File).filter(id=file_id).first()
|
||||
file_bytes = file.get_bytes()
|
||||
base64_video = base64.b64encode(file_bytes).decode("utf-8")
|
||||
return [base64_video, what(None, file_bytes)]
|
||||
|
||||
|
||||
class BaseVideoUnderstandNode(IVideoUnderstandNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
model_params_setting,
|
||||
chat_record_id,
|
||||
video,
|
||||
**kwargs) -> NodeResult:
|
||||
# 处理不正确的参数
|
||||
if video is None or not isinstance(video, list):
|
||||
video = []
|
||||
workspace_id = self.workflow_manage.get_body().get('workspace_id')
|
||||
video_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
|
||||
**model_params_setting)
|
||||
# 执行详情中的历史消息不需要图片内容
|
||||
history_message = self.get_history_message_for_details(history_chat_record, dialogue_number)
|
||||
self.context['history_message'] = history_message
|
||||
question = self.generate_prompt_question(prompt)
|
||||
self.context['question'] = question.content
|
||||
# 生成消息列表, 真实的history_message
|
||||
message_list = self.generate_message_list(video_model, system, prompt,
|
||||
self.get_history_message(history_chat_record, dialogue_number), video)
|
||||
self.context['message_list'] = message_list
|
||||
self.context['video_list'] = video
|
||||
self.context['dialogue_type'] = dialogue_type
|
||||
if stream:
|
||||
r = video_model.stream(message_list)
|
||||
return NodeResult({'result': r, 'chat_model': video_model, 'message_list': message_list,
|
||||
'history_message': history_message, 'question': question.content}, {},
|
||||
_write_context=write_context_stream)
|
||||
else:
|
||||
r = video_model.invoke(message_list)
|
||||
return NodeResult({'result': r, 'chat_model': video_model, 'message_list': message_list,
|
||||
'history_message': history_message, 'question': question.content}, {},
|
||||
_write_context=write_context)
|
||||
|
||||
def get_history_message_for_details(self, history_chat_record, dialogue_number):
|
||||
start_index = len(history_chat_record) - dialogue_number
|
||||
history_message = reduce(lambda x, y: [*x, *y], [
|
||||
[self.generate_history_human_message_for_details(history_chat_record[index]),
|
||||
self.generate_history_ai_message(history_chat_record[index])]
|
||||
for index in
|
||||
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
|
||||
return history_message
|
||||
|
||||
def generate_history_ai_message(self, chat_record):
|
||||
for val in chat_record.details.values():
|
||||
if self.node.id == val['node_id'] and 'video_list' in val:
|
||||
if val['dialogue_type'] == 'WORKFLOW':
|
||||
return chat_record.get_ai_message()
|
||||
return AIMessage(content=val['answer'])
|
||||
return chat_record.get_ai_message()
|
||||
|
||||
def generate_history_human_message_for_details(self, chat_record):
|
||||
for data in chat_record.details.values():
|
||||
if self.node.id == data['node_id'] and 'video_list' in data:
|
||||
video_list = data['video_list']
|
||||
if len(video_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
file_id_list = [video.get('file_id') for video in video_list]
|
||||
return HumanMessage(content=[
|
||||
{'type': 'text', 'text': data['question']},
|
||||
*[{'type': 'video_url', 'video_url': {'url': f'./oss/file/{file_id}'}} for file_id in file_id_list]
|
||||
|
||||
])
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
|
||||
def get_history_message(self, history_chat_record, dialogue_number):
|
||||
start_index = len(history_chat_record) - dialogue_number
|
||||
history_message = reduce(lambda x, y: [*x, *y], [
|
||||
[self.generate_history_human_message(history_chat_record[index]),
|
||||
self.generate_history_ai_message(history_chat_record[index])]
|
||||
for index in
|
||||
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
|
||||
return history_message
|
||||
|
||||
def generate_history_human_message(self, chat_record):
|
||||
|
||||
for data in chat_record.details.values():
|
||||
if self.node.id == data['node_id'] and 'video_list' in data:
|
||||
video_list = data['video_list']
|
||||
if len(video_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
video_base64_list = [file_id_to_base64(video.get('file_id')) for video in video_list]
|
||||
return HumanMessage(
|
||||
content=[
|
||||
{'type': 'text', 'text': data['question']},
|
||||
*[{'type': 'video_url',
|
||||
'video_url': {'url': f'data:video/{base64_video[1]};base64,{base64_video[0]}'}} for
|
||||
base64_video in video_base64_list]
|
||||
])
|
||||
return HumanMessage(content=chat_record.problem_text)
|
||||
|
||||
def generate_prompt_question(self, prompt):
|
||||
return HumanMessage(self.workflow_manage.generate_prompt(prompt))
|
||||
|
||||
def generate_message_list(self, video_model, system: str, prompt: str, history_message, video):
|
||||
if video is not None and len(video) > 0:
|
||||
# 处理多张图片
|
||||
videos = []
|
||||
for img in video:
|
||||
if isinstance(img, str) and img.startswith('http'):
|
||||
videos.append({'type': 'video_url', 'video_url': {'url': img}})
|
||||
else:
|
||||
file_id = img['file_id']
|
||||
file = QuerySet(File).filter(id=file_id).first()
|
||||
video_bytes = file.get_bytes()
|
||||
base64_video = base64.b64encode(video_bytes).decode("utf-8")
|
||||
video_format = what(None, video_bytes)
|
||||
videos.append(
|
||||
{'type': 'video_url', 'video_url': {'url': f'data:video/{video_format};base64,{base64_video}'}})
|
||||
messages = [HumanMessage(
|
||||
content=[
|
||||
{'type': 'text', 'text': self.workflow_manage.generate_prompt(prompt)},
|
||||
*videos
|
||||
])]
|
||||
else:
|
||||
messages = [HumanMessage(self.workflow_manage.generate_prompt(prompt))]
|
||||
|
||||
if system is not None and len(system) > 0:
|
||||
return [
|
||||
SystemMessage(self.workflow_manage.generate_prompt(system)),
|
||||
*history_message,
|
||||
*messages
|
||||
]
|
||||
else:
|
||||
return [
|
||||
*history_message,
|
||||
*messages
|
||||
]
|
||||
|
||||
@staticmethod
|
||||
def reset_message_list(message_list: List[BaseMessage], answer_text):
|
||||
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
|
||||
message
|
||||
in
|
||||
message_list]
|
||||
result.append({'role': 'ai', 'content': answer_text})
|
||||
return result
|
||||
|
||||
def get_details(self, index: int, **kwargs):
|
||||
return {
|
||||
'name': self.node.properties.get('stepName'),
|
||||
"index": index,
|
||||
'run_time': self.context.get('run_time'),
|
||||
'system': self.node_params.get('system'),
|
||||
'history_message': [{'content': message.content, 'role': message.type} for message in
|
||||
(self.context.get('history_message') if self.context.get(
|
||||
'history_message') is not None else [])],
|
||||
'question': self.context.get('question'),
|
||||
'answer': self.context.get('answer'),
|
||||
'type': self.node.type,
|
||||
'message_tokens': self.context.get('message_tokens'),
|
||||
'answer_tokens': self.context.get('answer_tokens'),
|
||||
'status': self.status,
|
||||
'err_message': self.err_message,
|
||||
'video_list': self.context.get('video_list'),
|
||||
'dialogue_type': self.context.get('dialogue_type')
|
||||
}
|
||||
|
|
@ -94,6 +94,7 @@ class WorkflowManage:
|
|||
base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None,
|
||||
document_list=None,
|
||||
audio_list=None,
|
||||
video_list=None,
|
||||
other_list=None,
|
||||
start_node_id=None,
|
||||
start_node_data=None, chat_record=None, child_node=None):
|
||||
|
|
@ -105,12 +106,15 @@ class WorkflowManage:
|
|||
document_list = []
|
||||
if audio_list is None:
|
||||
audio_list = []
|
||||
if video_list is None:
|
||||
video_list = []
|
||||
if other_list is None:
|
||||
other_list = []
|
||||
self.start_node_id = start_node_id
|
||||
self.start_node = None
|
||||
self.form_data = form_data
|
||||
self.image_list = image_list
|
||||
self.video_list = video_list
|
||||
self.document_list = document_list
|
||||
self.audio_list = audio_list
|
||||
self.other_list = other_list
|
||||
|
|
|
|||
|
|
@ -375,6 +375,7 @@ class ChatSerializers(serializers.Serializer):
|
|||
chat_user_type = self.data.get('chat_user_type')
|
||||
form_data = instance.get('form_data')
|
||||
image_list = instance.get('image_list')
|
||||
video_list = instance.get('video_list')
|
||||
document_list = instance.get('document_list')
|
||||
audio_list = instance.get('audio_list')
|
||||
other_list = instance.get('other_list')
|
||||
|
|
@ -401,6 +402,7 @@ class ChatSerializers(serializers.Serializer):
|
|||
'application_id': str(chat_info.application_id)},
|
||||
WorkFlowPostHandler(chat_info),
|
||||
base_to_response, form_data, image_list, document_list, audio_list,
|
||||
video_list,
|
||||
other_list,
|
||||
instance.get('runtime_node_id'),
|
||||
instance.get('node_data'), chat_record, instance.get('child_node'))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<path fill-rule="evenodd" clip-rule="evenodd" d="M5.3335 3.33333C5.3335 2.59695 5.93045 2 6.66683 2H19.724C19.9008 2 20.0704 2.07024 20.1954 2.19526L26.4716 8.47141C26.5966 8.59643 26.6668 8.766 26.6668 8.94281V28.6667C26.6668 29.403 26.0699 30 25.3335 30H6.66683C5.93045 30 5.3335 29.403 5.3335 28.6667V3.33333Z" fill="#14C0FF"/>
|
||||
<path d="M20 2.05988C20.072 2.09264 20.1383 2.13825 20.1953 2.19526L26.4714 8.4714C26.5284 8.52841 26.574 8.59467 26.6068 8.66666H21.3333C20.597 8.66666 20 8.06971 20 7.33333V2.05988Z" fill="#11A3D9"/>
|
||||
<path d="M11.3335 16C12.4381 16 13.3335 15.1046 13.3335 14C13.3335 12.8954 12.4381 12 11.3335 12C10.2289 12 9.3335 12.8954 9.3335 14C9.3335 15.1046 10.2289 16 11.3335 16Z" fill="white"/>
|
||||
<path d="M22.2785 14.9317C22.4218 14.7884 22.6668 14.8899 22.6668 15.0925V24.0645C22.6668 24.1901 22.565 24.2919 22.4394 24.2919H13.4674L13.4587 24.2918H9.56142C9.35877 24.2918 9.25728 24.0468 9.40058 23.9035L14.366 18.938C14.4549 18.8492 14.5989 18.8492 14.6877 18.938L16.48 20.7302L22.2785 14.9317Z" fill="white"/>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.1 KiB |
|
|
@ -0,0 +1,11 @@
|
|||
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_5568_15549)">
|
||||
<path d="M0 10C0 5.28595 0 2.92893 1.46447 1.46447C2.92893 0 5.28595 0 10 0C14.714 0 17.0711 0 18.5355 1.46447C20 2.92893 20 5.28595 20 10C20 14.714 20 17.0711 18.5355 18.5355C17.0711 20 14.714 20 10 20C5.28595 20 2.92893 20 1.46447 18.5355C0 17.0711 0 14.714 0 10Z" fill="#3370FF"/>
|
||||
<path d="M15.7291 5.89844C15.7291 5.57483 15.4959 5.3125 15.2083 5.3125H4.79159C4.50394 5.3125 4.27075 5.57483 4.27075 5.89844V14.1016C4.27075 14.4252 4.50394 14.6875 4.79159 14.6875H15.2083C15.4959 14.6875 15.7291 14.4252 15.7291 14.1016V5.89844ZM8.93076 11.9669C8.67348 12.0955 8.36063 11.9912 8.23199 11.734C8.19583 11.6616 8.17701 11.5819 8.17701 11.501V8.49898C8.17701 8.21133 8.41019 7.97815 8.69784 7.97815C8.7787 7.97815 8.85844 7.99697 8.93076 8.03313L11.9328 9.53416C12.1901 9.6628 12.2944 9.97565 12.1657 10.2329C12.1153 10.3337 12.0336 10.4155 11.9328 10.4659L8.93076 11.9669Z" fill="white"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_5568_15549">
|
||||
<rect width="20" height="20" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 1.1 KiB |
|
|
@ -3,7 +3,8 @@
|
|||
<div class="text-center mb-8" v-if="loading">
|
||||
<el-button class="border-primary video-stop-button" @click="stopChat">
|
||||
<app-icon iconName="app-video-stop" class="mr-8"></app-icon>
|
||||
{{ $t('chat.operation.stopChat') }}</el-button
|
||||
{{ $t('chat.operation.stopChat') }}
|
||||
</el-button
|
||||
>
|
||||
</div>
|
||||
|
||||
|
|
@ -42,7 +43,7 @@
|
|||
@mouseleave.stop="mouseleave()"
|
||||
>
|
||||
<div class="flex align-center">
|
||||
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
|
||||
<img :src="getImgUrl(item && item?.name)" alt="" width="24"/>
|
||||
<div class="ml-4 ellipsis-1" :title="item && item?.name">
|
||||
{{ item && item?.name }}
|
||||
</div>
|
||||
|
|
@ -53,7 +54,7 @@
|
|||
v-if="showDelete === item.url"
|
||||
>
|
||||
<el-icon style="font-size: 16px; top: 2px">
|
||||
<CircleCloseFilled />
|
||||
<CircleCloseFilled/>
|
||||
</el-icon>
|
||||
</div>
|
||||
</div>
|
||||
|
|
@ -80,7 +81,7 @@
|
|||
@mouseleave.stop="mouseleave()"
|
||||
>
|
||||
<div class="flex align-center">
|
||||
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
|
||||
<img :src="getImgUrl(item && item?.name)" alt="" width="24"/>
|
||||
<div class="ml-4 ellipsis-1" :title="item && item?.name">
|
||||
{{ item && item?.name }}
|
||||
</div>
|
||||
|
|
@ -91,7 +92,7 @@
|
|||
v-if="showDelete === item.url"
|
||||
>
|
||||
<el-icon style="font-size: 16px; top: 2px">
|
||||
<CircleCloseFilled />
|
||||
<CircleCloseFilled/>
|
||||
</el-icon>
|
||||
</div>
|
||||
</div>
|
||||
|
|
@ -115,7 +116,7 @@
|
|||
@mouseleave.stop="mouseleave()"
|
||||
>
|
||||
<div class="flex align-center">
|
||||
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
|
||||
<img :src="getImgUrl(item && item?.name)" alt="" width="24"/>
|
||||
<div class="ml-4 ellipsis-1" :title="item && item?.name">
|
||||
{{ item && item?.name }}
|
||||
</div>
|
||||
|
|
@ -126,7 +127,7 @@
|
|||
v-if="showDelete === item.url"
|
||||
>
|
||||
<el-icon style="font-size: 16px; top: 2px">
|
||||
<CircleCloseFilled />
|
||||
<CircleCloseFilled/>
|
||||
</el-icon>
|
||||
</div>
|
||||
</div>
|
||||
|
|
@ -146,7 +147,7 @@
|
|||
v-if="showDelete === item.url"
|
||||
>
|
||||
<el-icon style="font-size: 16px; top: 2px">
|
||||
<CircleCloseFilled />
|
||||
<CircleCloseFilled/>
|
||||
</el-icon>
|
||||
</div>
|
||||
<el-image
|
||||
|
|
@ -160,6 +161,31 @@
|
|||
</div>
|
||||
</template>
|
||||
</el-space>
|
||||
<el-space wrap>
|
||||
<template v-for="(item, index) in uploadVideoList" :key="index">
|
||||
<div
|
||||
class="file file-image cursor border border-r-6"
|
||||
@mouseenter.stop="mouseenter(item)"
|
||||
@mouseleave.stop="mouseleave()"
|
||||
>
|
||||
<div
|
||||
@click="deleteFile(item)"
|
||||
class="delete-icon color-secondary"
|
||||
v-if="showDelete === item.url"
|
||||
>
|
||||
<el-icon style="font-size: 16px; top: 2px">
|
||||
<CircleCloseFilled/>
|
||||
</el-icon>
|
||||
</div>
|
||||
<video
|
||||
v-if="item.url"
|
||||
:src="item.url"
|
||||
controls style="width: 40px; height: 40px; display: block"
|
||||
class="border-r-6"
|
||||
/>
|
||||
</div>
|
||||
</template>
|
||||
</el-space>
|
||||
</div>
|
||||
</el-scrollbar>
|
||||
|
||||
|
|
@ -186,7 +212,7 @@
|
|||
|
||||
<div class="operate flex-between">
|
||||
<div>
|
||||
<slot name="userInput" />
|
||||
<slot name="userInput"/>
|
||||
</div>
|
||||
<div class="flex align-center">
|
||||
<template v-if="props.applicationDetails.stt_model_enable">
|
||||
|
|
@ -196,7 +222,7 @@
|
|||
<AppIcon v-if="isMicrophone" iconName="app-keyboard"></AppIcon>
|
||||
<el-icon v-else>
|
||||
<!-- 录音 -->
|
||||
<Microphone />
|
||||
<Microphone/>
|
||||
</el-icon>
|
||||
</el-button>
|
||||
</span>
|
||||
|
|
@ -208,13 +234,13 @@
|
|||
v-if="recorderStatus === 'STOP'"
|
||||
>
|
||||
<el-icon>
|
||||
<Microphone />
|
||||
<Microphone/>
|
||||
</el-icon>
|
||||
</el-button>
|
||||
|
||||
<div v-else class="operate flex align-center">
|
||||
<el-text type="info"
|
||||
>00:{{ recorderTime < 10 ? `0${recorderTime}` : recorderTime }}</el-text
|
||||
>00:{{ recorderTime < 10 ? `0${recorderTime}` : recorderTime }}</el-text
|
||||
>
|
||||
<el-button
|
||||
text
|
||||
|
|
@ -248,16 +274,18 @@
|
|||
>
|
||||
<template #content>
|
||||
<div class="break-all pre-wrap">
|
||||
{{ $t('chat.uploadFile.label') }}:{{ $t('chat.uploadFile.most')
|
||||
}}{{ props.applicationDetails.file_upload_setting.maxFiles
|
||||
{{ $t('chat.uploadFile.label') }}:{{
|
||||
$t('chat.uploadFile.most')
|
||||
}}{{
|
||||
props.applicationDetails.file_upload_setting.maxFiles
|
||||
}}{{ $t('chat.uploadFile.limit') }}
|
||||
{{ props.applicationDetails.file_upload_setting.fileLimit }}MB<br />{{
|
||||
{{ props.applicationDetails.file_upload_setting.fileLimit }}MB<br/>{{
|
||||
$t('chat.uploadFile.fileType')
|
||||
}}:{{ getAcceptList().replace(/\./g, '').replace(/,/g, '、').toUpperCase() }}
|
||||
</div>
|
||||
</template>
|
||||
<el-button text :disabled="checkMaxFilesLimit() || loading" class="mt-4">
|
||||
<el-icon><Paperclip /></el-icon>
|
||||
<el-icon><Paperclip/></el-icon>
|
||||
</el-button>
|
||||
</el-tooltip>
|
||||
</el-upload>
|
||||
|
|
@ -280,7 +308,7 @@
|
|||
src="@/assets/icon_send.svg"
|
||||
alt=""
|
||||
/>
|
||||
<SendIcon v-show="!isDisabledChat && !loading && !uploadLoading" />
|
||||
<SendIcon v-show="!isDisabledChat && !loading && !uploadLoading"/>
|
||||
</el-button>
|
||||
</template>
|
||||
</div>
|
||||
|
|
@ -297,24 +325,25 @@
|
|||
</div>
|
||||
</template>
|
||||
<script setup lang="ts">
|
||||
import { ref, computed, onMounted, nextTick, reactive, type Ref } from 'vue'
|
||||
import { t } from '@/locales'
|
||||
import {ref, computed, onMounted, nextTick, reactive, type Ref} from 'vue'
|
||||
import {t} from '@/locales'
|
||||
import Recorder from 'recorder-core'
|
||||
import TouchChat from './TouchChat.vue'
|
||||
import applicationApi from '@/api/application/application'
|
||||
import { MsgAlert } from '@/utils/message'
|
||||
import { type chatType } from '@/api/type/application'
|
||||
import { useRoute, useRouter } from 'vue-router'
|
||||
import { getImgUrl } from '@/utils/common'
|
||||
import {MsgAlert} from '@/utils/message'
|
||||
import {type chatType} from '@/api/type/application'
|
||||
import {useRoute, useRouter} from 'vue-router'
|
||||
import {getImgUrl} from '@/utils/common'
|
||||
import bus from '@/bus'
|
||||
import 'recorder-core/src/engine/mp3'
|
||||
import 'recorder-core/src/engine/mp3-engine'
|
||||
import { MsgWarning } from '@/utils/message'
|
||||
import {MsgWarning} from '@/utils/message'
|
||||
import chatAPI from '@/api/chat/chat'
|
||||
|
||||
const router = useRouter()
|
||||
const route = useRoute()
|
||||
const {
|
||||
query: { mode, question },
|
||||
query: {mode, question},
|
||||
} = route as any
|
||||
const quickInputRef = ref()
|
||||
const props = withDefaults(
|
||||
|
|
@ -373,12 +402,12 @@ const upload = ref()
|
|||
|
||||
const imageExtensions = ['JPG', 'JPEG', 'PNG', 'GIF', 'BMP']
|
||||
const documentExtensions = ['PDF', 'DOCX', 'TXT', 'XLS', 'XLSX', 'MD', 'HTML', 'CSV']
|
||||
const videoExtensions: any = []
|
||||
const videoExtensions: any = ['MP4', 'MOV', 'AVI']
|
||||
const audioExtensions = ['MP3', 'WAV', 'OGG', 'AAC', 'M4A']
|
||||
const otherExtensions = ref(['PPT', 'DOC'])
|
||||
|
||||
const getAcceptList = () => {
|
||||
const { image, document, audio, video, other } = props.applicationDetails.file_upload_setting
|
||||
const {image, document, audio, video, other} = props.applicationDetails.file_upload_setting
|
||||
let accepts: any = []
|
||||
if (image) {
|
||||
accepts = [...imageExtensions]
|
||||
|
|
@ -408,15 +437,15 @@ const checkMaxFilesLimit = () => {
|
|||
return (
|
||||
props.applicationDetails.file_upload_setting.maxFiles <=
|
||||
uploadImageList.value.length +
|
||||
uploadDocumentList.value.length +
|
||||
uploadAudioList.value.length +
|
||||
uploadVideoList.value.length +
|
||||
uploadOtherList.value.length
|
||||
uploadDocumentList.value.length +
|
||||
uploadAudioList.value.length +
|
||||
uploadVideoList.value.length +
|
||||
uploadOtherList.value.length
|
||||
)
|
||||
}
|
||||
const filePromisionDict: any = ref<any>({})
|
||||
const uploadFile = async (file: any, fileList: any) => {
|
||||
const { maxFiles, fileLimit } = props.applicationDetails.file_upload_setting
|
||||
const {maxFiles, fileLimit} = props.applicationDetails.file_upload_setting
|
||||
// 单次上传文件数量限制
|
||||
const file_limit_once =
|
||||
uploadImageList.value.length +
|
||||
|
|
@ -582,14 +611,17 @@ const TouchEnd = (bool?: boolean) => {
|
|||
}
|
||||
}
|
||||
// 取消录音控制台日志
|
||||
Recorder.CLog = function () {}
|
||||
Recorder.CLog = function () {
|
||||
}
|
||||
|
||||
class RecorderManage {
|
||||
recorder?: any
|
||||
uploadRecording: (blob: Blob, duration: number) => void
|
||||
|
||||
constructor(uploadRecording: (blob: Blob, duration: number) => void) {
|
||||
this.uploadRecording = uploadRecording
|
||||
}
|
||||
|
||||
open(callback?: () => void) {
|
||||
const recorder = new Recorder({
|
||||
type: 'mp3',
|
||||
|
|
@ -605,6 +637,7 @@ class RecorderManage {
|
|||
}, this.errorCallBack)
|
||||
}
|
||||
}
|
||||
|
||||
start() {
|
||||
if (this.recorder) {
|
||||
this.recorder.start()
|
||||
|
|
@ -624,6 +657,7 @@ class RecorderManage {
|
|||
}, this.errorCallBack)
|
||||
}
|
||||
}
|
||||
|
||||
stop() {
|
||||
if (this.recorder) {
|
||||
this.recorder.stop(
|
||||
|
|
@ -643,6 +677,7 @@ class RecorderManage {
|
|||
)
|
||||
}
|
||||
}
|
||||
|
||||
close() {
|
||||
if (this.recorder) {
|
||||
this.recorder.close()
|
||||
|
|
@ -673,6 +708,7 @@ class RecorderManage {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
const getSpeechToTextAPI = () => {
|
||||
if (props.type === 'ai-chat') {
|
||||
return (id?: any, data?: any, loading?: Ref<boolean>) => {
|
||||
|
|
@ -787,6 +823,7 @@ const getQuestion = () => {
|
|||
|
||||
return inputValue.value.trim()
|
||||
}
|
||||
|
||||
function autoSendMessage() {
|
||||
props
|
||||
.validate()
|
||||
|
|
@ -835,6 +872,7 @@ function sendChatHandle(event?: any) {
|
|||
insertNewlineAtCursor(event)
|
||||
}
|
||||
}
|
||||
|
||||
const insertNewlineAtCursor = (event?: any) => {
|
||||
const textarea = quickInputRef.value.$el.querySelector(
|
||||
'.el-textarea__inner',
|
||||
|
|
@ -865,6 +903,7 @@ function mouseleave() {
|
|||
function stopChat() {
|
||||
bus.emit('chat:stop')
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
bus.on('chat-input', (message: string) => {
|
||||
inputValue.value = message
|
||||
|
|
@ -876,16 +915,16 @@ onMounted(() => {
|
|||
// 获取当前路由信息
|
||||
const route = router.currentRoute.value
|
||||
// 复制query对象
|
||||
const query = { ...route.query }
|
||||
const query = {...route.query}
|
||||
// 删除特定的参数
|
||||
delete query.question
|
||||
const newRoute =
|
||||
Object.entries(query)?.length > 0
|
||||
? route.path +
|
||||
'?' +
|
||||
Object.entries(query)
|
||||
.map(([key, value]) => `${key}=${value}`)
|
||||
.join('&')
|
||||
'?' +
|
||||
Object.entries(query)
|
||||
.map(([key, value]) => `${key}=${value}`)
|
||||
.join('&')
|
||||
: route.path
|
||||
|
||||
history.pushState(null, '', '/chat' + newRoute)
|
||||
|
|
@ -929,12 +968,14 @@ onMounted(() => {
|
|||
|
||||
.operate {
|
||||
padding: 6px 10px;
|
||||
|
||||
.el-icon {
|
||||
font-size: 20px;
|
||||
}
|
||||
|
||||
.sent-button {
|
||||
max-height: none;
|
||||
|
||||
.el-icon {
|
||||
font-size: 24px;
|
||||
}
|
||||
|
|
@ -950,6 +991,7 @@ onMounted(() => {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
.file-image {
|
||||
position: relative;
|
||||
overflow: inherit;
|
||||
|
|
@ -974,12 +1016,14 @@ onMounted(() => {
|
|||
position: fixed;
|
||||
bottom: 0;
|
||||
font-size: 1rem;
|
||||
|
||||
.el-icon {
|
||||
font-size: 1.4rem !important;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
.popperUserInput {
|
||||
position: absolute;
|
||||
z-index: 999;
|
||||
|
|
|
|||
|
|
@ -554,6 +554,92 @@
|
|||
</div>
|
||||
</div>
|
||||
</template>
|
||||
<!-- 视频理解 -->
|
||||
<template v-if="data.type == WorkflowType.VideoUnderstandNode">
|
||||
<div class="card-never border-r-6" v-if="data.type !== WorkflowType.Application">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('views.application.form.roleSettings.label') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
{{ data.system || '-' }}
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-never border-r-6 mt-8" v-if="data.type !== WorkflowType.Application">
|
||||
<h5 class="p-8-12">{{ $t('chat.history') }}</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<template v-if="data.history_message?.length > 0">
|
||||
<p
|
||||
class="mt-4 mb-4"
|
||||
v-for="(history, historyIndex) in data.history_message"
|
||||
:key="historyIndex"
|
||||
>
|
||||
<span class="color-secondary mr-4">{{ history.role }}:</span>
|
||||
|
||||
<span v-if="Array.isArray(history.content)">
|
||||
<template v-for="(h, i) in history.content" :key="i">
|
||||
<el-image
|
||||
v-if="h.type === 'video_url'"
|
||||
:src="h.video_url.url"
|
||||
alt=""
|
||||
fit="cover"
|
||||
style="width: 40px; height: 40px; display: inline-block"
|
||||
class="border-r-6 mr-8"
|
||||
/>
|
||||
|
||||
<span v-else>{{ h.text }}<br /></span>
|
||||
</template>
|
||||
</span>
|
||||
|
||||
<span v-else>{{ history.content }}</span>
|
||||
</p>
|
||||
</template>
|
||||
<template v-else> -</template>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{ $t('chat.executionDetails.currentChat') }}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter pre-wrap">
|
||||
<div v-if="data.video_url?.length > 0">
|
||||
<el-space wrap>
|
||||
<template v-for="(f, i) in data.video_url" :key="i">
|
||||
<el-image
|
||||
:src="f.url"
|
||||
alt=""
|
||||
fit="cover"
|
||||
style="width: 40px; height: 40px; display: block"
|
||||
class="border-r-6"
|
||||
/>
|
||||
</template>
|
||||
</el-space>
|
||||
</div>
|
||||
<div>
|
||||
{{ data.question || '-' }}
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
<h5 class="p-8-12">
|
||||
{{
|
||||
data.type == WorkflowType.Application
|
||||
? $t('common.param.outputParam')
|
||||
: $t('chat.executionDetails.answer')
|
||||
}}
|
||||
</h5>
|
||||
<div class="p-8-12 border-t-dashed lighter">
|
||||
<MdPreview
|
||||
v-if="data.answer"
|
||||
ref="editorRef"
|
||||
editorId="preview-only"
|
||||
:modelValue="data.answer"
|
||||
style="background: none"
|
||||
noImgZoomIn
|
||||
/>
|
||||
<template v-else> -</template>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
<!-- 图片生成 -->
|
||||
<template v-if="data.type == WorkflowType.ImageGenerateNode">
|
||||
<div class="card-never border-r-6 mt-8">
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ export enum WorkflowType {
|
|||
LoopContinueNode = 'loop-continue-node',
|
||||
LoopBreakNode = 'loop-break-node',
|
||||
VariableSplittingNode = 'variable-splitting-node',
|
||||
VideoUnderstandNode = 'video-understand-node',
|
||||
}
|
||||
export enum WorkflowMode {
|
||||
// 应用工作流
|
||||
|
|
|
|||
|
|
@ -108,6 +108,7 @@ export default {
|
|||
label: 'File types allowed for upload',
|
||||
documentText: 'Requires "Document Content Extraction" node to parse document content',
|
||||
imageText: 'Requires "Image Understanding" node to parse image content',
|
||||
videoText: 'Requires "Video Understanding" node to parse video content',
|
||||
audioText: 'Requires "Speech-to-Text" node to parse audio content',
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -112,6 +112,7 @@ export default {
|
|||
documentText: '需要使用“文档内容提取”节点解析文档内容',
|
||||
imageText: '需要使用“视觉模型”节点解析图片内容',
|
||||
audioText: '需要使用“语音转文本”节点解析音频内容',
|
||||
videoText: '需要使用“视频理解”节点解析视频内容',
|
||||
otherText: '需要自行解析该类型文件',
|
||||
},
|
||||
},
|
||||
|
|
@ -286,6 +287,19 @@ export default {
|
|||
placeholder: '请描述不想生成的视频内容,比如:颜色、血腥内容',
|
||||
},
|
||||
},
|
||||
videoUnderstandNode: {
|
||||
label: '视频理解',
|
||||
text: '识别出视频中的对象、场景等信息回答用户问题',
|
||||
answer: 'AI 回答内容',
|
||||
model: {
|
||||
label: '视觉模型',
|
||||
requiredMessage: '请选择视觉模型',
|
||||
},
|
||||
image: {
|
||||
label: '选择视频',
|
||||
requiredMessage: '请选择视频',
|
||||
},
|
||||
},
|
||||
imageToVideoGenerate: {
|
||||
label: '图生视频',
|
||||
text: '根据提供的图片生成视频',
|
||||
|
|
|
|||
|
|
@ -109,6 +109,7 @@ export default {
|
|||
label: '上傳的文件類型',
|
||||
documentText: '需要使用「文檔內容提取」節點解析文檔內容',
|
||||
imageText: '需要使用「圖片理解」節點解析圖片內容',
|
||||
videoText: '需要使用「視頻理解」節點解析視頻內容',
|
||||
audioText: '需要使用「語音轉文本」節點解析音頻內容',
|
||||
},
|
||||
},
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
import { WorkflowType, WorkflowMode } from '@/enums/application'
|
||||
import { t } from '@/locales'
|
||||
import {WorkflowType, WorkflowMode} from '@/enums/application'
|
||||
import {t} from '@/locales'
|
||||
|
||||
export const startNode = {
|
||||
id: WorkflowType.Start,
|
||||
|
|
@ -17,7 +17,7 @@ export const startNode = {
|
|||
},
|
||||
],
|
||||
globalFields: [
|
||||
{ label: t('views.applicationWorkflow.nodes.startNode.currentTime'), value: 'time' },
|
||||
{label: t('views.applicationWorkflow.nodes.startNode.currentTime'), value: 'time'},
|
||||
{
|
||||
label: t('views.application.form.historyRecord.label'),
|
||||
value: 'history_context',
|
||||
|
|
@ -28,9 +28,9 @@ export const startNode = {
|
|||
},
|
||||
],
|
||||
},
|
||||
fields: [{ label: t('views.applicationWorkflow.nodes.startNode.question'), value: 'question' }],
|
||||
fields: [{label: t('views.applicationWorkflow.nodes.startNode.question'), value: 'question'}],
|
||||
globalFields: [
|
||||
{ label: t('views.applicationWorkflow.nodes.startNode.currentTime'), value: 'time' },
|
||||
{label: t('views.applicationWorkflow.nodes.startNode.currentTime'), value: 'time'},
|
||||
],
|
||||
showNode: true,
|
||||
},
|
||||
|
|
@ -53,7 +53,7 @@ export const baseNode = {
|
|||
},
|
||||
config: {},
|
||||
showNode: true,
|
||||
user_input_config: { title: t('chat.userInput') },
|
||||
user_input_config: {title: t('chat.userInput')},
|
||||
user_input_field_list: [],
|
||||
},
|
||||
}
|
||||
|
|
@ -271,6 +271,25 @@ export const imageUnderstandNode = {
|
|||
},
|
||||
}
|
||||
|
||||
export const videoUnderstandNode = {
|
||||
type: WorkflowType.VideoUnderstandNode,
|
||||
text: t('views.applicationWorkflow.nodes.videoUnderstandNode.text'),
|
||||
label: t('views.applicationWorkflow.nodes.videoUnderstandNode.label'),
|
||||
height: 252,
|
||||
properties: {
|
||||
stepName: t('views.applicationWorkflow.nodes.videoUnderstandNode.label'),
|
||||
config: {
|
||||
fields: [
|
||||
{
|
||||
label: t('views.applicationWorkflow.nodes.videoUnderstandNode.answer'),
|
||||
value: 'answer',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
export const variableAssignNode = {
|
||||
type: WorkflowType.VariableAssignNode,
|
||||
text: t('views.applicationWorkflow.nodes.variableAssignNode.text'),
|
||||
|
|
@ -546,6 +565,7 @@ export const menuNodes = [
|
|||
intentNode,
|
||||
imageGenerateNode,
|
||||
imageUnderstandNode,
|
||||
videoUnderstandNode,
|
||||
textToSpeechNode,
|
||||
speechToTextNode,
|
||||
textToVideoNode,
|
||||
|
|
@ -553,7 +573,7 @@ export const menuNodes = [
|
|||
questionNode,
|
||||
],
|
||||
},
|
||||
{ label: t('views.knowledge.title'), list: [searchKnowledgeNode, rerankerNode] },
|
||||
{label: t('views.knowledge.title'), list: [searchKnowledgeNode, rerankerNode]},
|
||||
{
|
||||
label: t('views.applicationWorkflow.nodes.classify.businessLogic'),
|
||||
list: [conditionNode, formNode, variableAssignNode, replyNode, loopNode],
|
||||
|
|
@ -575,6 +595,7 @@ export const applicationLoopMenuNodes = [
|
|||
intentNode,
|
||||
questionNode,
|
||||
imageGenerateNode,
|
||||
videoUnderstandNode,
|
||||
imageUnderstandNode,
|
||||
textToSpeechNode,
|
||||
speechToTextNode,
|
||||
|
|
@ -582,7 +603,7 @@ export const applicationLoopMenuNodes = [
|
|||
imageToVideoNode,
|
||||
],
|
||||
},
|
||||
{ label: t('views.knowledge.title'), list: [searchKnowledgeNode, rerankerNode] },
|
||||
{label: t('views.knowledge.title'), list: [searchKnowledgeNode, rerankerNode]},
|
||||
{
|
||||
label: t('views.applicationWorkflow.nodes.classify.businessLogic'),
|
||||
list: [conditionNode, formNode, variableAssignNode, replyNode, loopContinueNode, loopBreakNode],
|
||||
|
|
@ -646,22 +667,22 @@ export const applicationNode = {
|
|||
}
|
||||
|
||||
export const compareList = [
|
||||
{ value: 'is_null', label: t('views.applicationWorkflow.compare.is_null') },
|
||||
{ value: 'is_not_null', label: t('views.applicationWorkflow.compare.is_not_null') },
|
||||
{ value: 'contain', label: t('views.applicationWorkflow.compare.contain') },
|
||||
{ value: 'not_contain', label: t('views.applicationWorkflow.compare.not_contain') },
|
||||
{ value: 'eq', label: t('views.applicationWorkflow.compare.eq') },
|
||||
{ value: 'ge', label: t('views.applicationWorkflow.compare.ge') },
|
||||
{ value: 'gt', label: t('views.applicationWorkflow.compare.gt') },
|
||||
{ value: 'le', label: t('views.applicationWorkflow.compare.le') },
|
||||
{ value: 'lt', label: t('views.applicationWorkflow.compare.lt') },
|
||||
{ value: 'len_eq', label: t('views.applicationWorkflow.compare.len_eq') },
|
||||
{ value: 'len_ge', label: t('views.applicationWorkflow.compare.len_ge') },
|
||||
{ value: 'len_gt', label: t('views.applicationWorkflow.compare.len_gt') },
|
||||
{ value: 'len_le', label: t('views.applicationWorkflow.compare.len_le') },
|
||||
{ value: 'len_lt', label: t('views.applicationWorkflow.compare.len_lt') },
|
||||
{ value: 'is_true', label: t('views.applicationWorkflow.compare.is_true') },
|
||||
{ value: 'is_not_true', label: t('views.applicationWorkflow.compare.is_not_true') },
|
||||
{value: 'is_null', label: t('views.applicationWorkflow.compare.is_null')},
|
||||
{value: 'is_not_null', label: t('views.applicationWorkflow.compare.is_not_null')},
|
||||
{value: 'contain', label: t('views.applicationWorkflow.compare.contain')},
|
||||
{value: 'not_contain', label: t('views.applicationWorkflow.compare.not_contain')},
|
||||
{value: 'eq', label: t('views.applicationWorkflow.compare.eq')},
|
||||
{value: 'ge', label: t('views.applicationWorkflow.compare.ge')},
|
||||
{value: 'gt', label: t('views.applicationWorkflow.compare.gt')},
|
||||
{value: 'le', label: t('views.applicationWorkflow.compare.le')},
|
||||
{value: 'lt', label: t('views.applicationWorkflow.compare.lt')},
|
||||
{value: 'len_eq', label: t('views.applicationWorkflow.compare.len_eq')},
|
||||
{value: 'len_ge', label: t('views.applicationWorkflow.compare.len_ge')},
|
||||
{value: 'len_gt', label: t('views.applicationWorkflow.compare.len_gt')},
|
||||
{value: 'len_le', label: t('views.applicationWorkflow.compare.len_le')},
|
||||
{value: 'len_lt', label: t('views.applicationWorkflow.compare.len_lt')},
|
||||
{value: 'is_true', label: t('views.applicationWorkflow.compare.is_true')},
|
||||
{value: 'is_not_true', label: t('views.applicationWorkflow.compare.is_not_true')},
|
||||
]
|
||||
|
||||
export const nodeDict: any = {
|
||||
|
|
@ -693,7 +714,9 @@ export const nodeDict: any = {
|
|||
[WorkflowType.LoopBreakNode]: loopBodyNode,
|
||||
[WorkflowType.LoopContinueNode]: loopContinueNode,
|
||||
[WorkflowType.VariableSplittingNode]: variableSplittingNode,
|
||||
[WorkflowType.VideoUnderstandNode]: videoUnderstandNode,
|
||||
}
|
||||
|
||||
export function isWorkFlow(type: string | undefined) {
|
||||
return type === 'WORK_FLOW'
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
import { WorkflowType, WorkflowMode } from '@/enums/application'
|
||||
import {WorkflowType, WorkflowMode} from '@/enums/application'
|
||||
|
||||
import { t } from '@/locales'
|
||||
import {t} from '@/locales'
|
||||
|
||||
const end_nodes: Array<string> = [
|
||||
WorkflowType.AiChat,
|
||||
|
|
@ -18,6 +18,7 @@ const end_nodes: Array<string> = [
|
|||
WorkflowType.LoopBodyNode,
|
||||
WorkflowType.LoopNode,
|
||||
WorkflowType.LoopBreakNode,
|
||||
WorkflowType.VideoUnderstandNode
|
||||
]
|
||||
|
||||
const loop_end_nodes: Array<string> = [
|
||||
|
|
@ -26,6 +27,7 @@ const loop_end_nodes: Array<string> = [
|
|||
WorkflowType.ToolLib,
|
||||
WorkflowType.ToolLibCustom,
|
||||
WorkflowType.ImageUnderstandNode,
|
||||
WorkflowType.VideoUnderstandNode,
|
||||
WorkflowType.Application,
|
||||
WorkflowType.SpeechToTextNode,
|
||||
WorkflowType.TextToSpeechNode,
|
||||
|
|
@ -42,17 +44,20 @@ const end_nodes_dict = {
|
|||
[WorkflowMode.Application]: end_nodes,
|
||||
[WorkflowMode.ApplicationLoop]: loop_end_nodes,
|
||||
}
|
||||
|
||||
export class WorkFlowInstance {
|
||||
nodes
|
||||
edges
|
||||
workFlowNodes: Array<any>
|
||||
workflowModel: WorkflowMode
|
||||
|
||||
constructor(workflow: { nodes: Array<any>; edges: Array<any> }, workflowModel?: WorkflowMode) {
|
||||
this.nodes = workflow.nodes
|
||||
this.edges = workflow.edges
|
||||
this.workFlowNodes = []
|
||||
this.workflowModel = workflowModel ? workflowModel : WorkflowMode.Application
|
||||
}
|
||||
|
||||
/**
|
||||
* 校验开始节点
|
||||
*/
|
||||
|
|
@ -66,6 +71,7 @@ export class WorkFlowInstance {
|
|||
throw t('views.applicationWorkflow.validate.startNodeOnly')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 校验基本信息节点
|
||||
*/
|
||||
|
|
@ -77,6 +83,7 @@ export class WorkFlowInstance {
|
|||
throw t('views.applicationWorkflow.validate.baseNodeOnly')
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 校验节点
|
||||
*/
|
||||
|
|
@ -103,6 +110,7 @@ export class WorkFlowInstance {
|
|||
)
|
||||
return start_node_list[0]
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取基本节点
|
||||
* @returns 基本节点
|
||||
|
|
@ -111,9 +119,11 @@ export class WorkFlowInstance {
|
|||
const base_node_list = this.nodes.filter((item) => item.id === WorkflowType.Base)
|
||||
return base_node_list[0]
|
||||
}
|
||||
|
||||
exist_break_node() {
|
||||
return this.nodes.some((item) => item.type === WorkflowType.LoopBreakNode)
|
||||
}
|
||||
|
||||
/**
|
||||
* 校验工作流
|
||||
* @param up_node 上一个节点
|
||||
|
|
@ -129,6 +139,7 @@ export class WorkFlowInstance {
|
|||
this._is_valid_work_flow(next_node)
|
||||
}
|
||||
}
|
||||
|
||||
private is_valid_work_flow() {
|
||||
this.workFlowNodes = []
|
||||
this._is_valid_work_flow()
|
||||
|
|
@ -140,6 +151,7 @@ export class WorkFlowInstance {
|
|||
}
|
||||
this.workFlowNodes = []
|
||||
}
|
||||
|
||||
/**
|
||||
* 获取流程下一个节点列表
|
||||
* @param node 节点
|
||||
|
|
@ -156,6 +168,7 @@ export class WorkFlowInstance {
|
|||
}
|
||||
return node_list
|
||||
}
|
||||
|
||||
private is_valid_nodes() {
|
||||
for (const node of this.nodes) {
|
||||
if (
|
||||
|
|
@ -169,6 +182,7 @@ export class WorkFlowInstance {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* 校验节点
|
||||
* @param node 节点
|
||||
|
|
|
|||
|
|
@ -0,0 +1,6 @@
|
|||
<template>
|
||||
<el-avatar shape="square">
|
||||
<img src="@/assets/workflow/icon_video.svg" style="width: 100%" alt="" />
|
||||
</el-avatar>
|
||||
</template>
|
||||
<script setup lang="ts"></script>
|
||||
|
|
@ -126,6 +126,33 @@
|
|||
</div>
|
||||
<el-checkbox v-model="form_data.audio" @change="form_data.audio = !form_data.audio" />
|
||||
</div>
|
||||
</el-card>
|
||||
<el-card
|
||||
shadow="hover"
|
||||
class="card-checkbox cursor w-full mb-8"
|
||||
:class="form_data.video ? 'active' : ''"
|
||||
style="--el-card-padding: 8px 16px"
|
||||
@click.stop="form_data.video = !form_data.video"
|
||||
>
|
||||
<div class="flex-between">
|
||||
<div class="flex align-center">
|
||||
<img class="mr-12" src="@/assets/workflow/icon_file-video.svg" alt="" />
|
||||
<div>
|
||||
<p class="line-height-22 mt-4">
|
||||
{{ $t('common.fileUpload.video') }}
|
||||
<el-text class="color-secondary"
|
||||
>{{
|
||||
$t(
|
||||
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.videoText'
|
||||
)
|
||||
}}
|
||||
</el-text>
|
||||
</p>
|
||||
<p>{{ videoExtensions.join('、') }}</p>
|
||||
</div>
|
||||
</div>
|
||||
<el-checkbox v-model="form_data.video" @change="form_data.video = !form_data.video" />
|
||||
</div>
|
||||
</el-card>
|
||||
<el-card
|
||||
shadow="hover"
|
||||
|
|
@ -215,6 +242,7 @@ const InputRef = ref<InputInstance>()
|
|||
const documentExtensions = ['TXT', 'MD', 'DOCX', 'HTML', 'CSV', 'XLSX', 'XLS', 'PDF']
|
||||
const imageExtensions = ['JPG', 'JPEG', 'PNG', 'GIF']
|
||||
const audioExtensions = ['MP3', 'WAV', 'OGG', 'ACC', 'M4A']
|
||||
const videoExtensions: any = ['MP4', 'MOV', 'AVI']
|
||||
|
||||
const form_data = ref({
|
||||
maxFiles: 3,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,14 @@
|
|||
import VideoUnderstandNodeVue from './index.vue'
|
||||
import { AppNode, AppNodeModel } from '@/workflow/common/app-node'
|
||||
|
||||
class VideoUnderstandNode extends AppNode {
|
||||
constructor(props: any) {
|
||||
super(props, VideoUnderstandNodeVue)
|
||||
}
|
||||
}
|
||||
|
||||
export default {
|
||||
type: 'video-understand-node',
|
||||
model: AppNodeModel,
|
||||
view: VideoUnderstandNode
|
||||
}
|
||||
|
|
@ -0,0 +1,306 @@
|
|||
<template>
|
||||
<NodeContainer :node-model="nodeModel">
|
||||
<h5 class="title-decoration-1 mb-8">{{ $t('views.applicationWorkflow.nodeSetting') }}</h5>
|
||||
<el-card shadow="never" class="card-never">
|
||||
<el-form
|
||||
@submit.prevent
|
||||
:model="form_data"
|
||||
label-position="top"
|
||||
require-asterisk-position="right"
|
||||
label-width="auto"
|
||||
ref="aiChatNodeFormRef"
|
||||
hide-required-asterisk
|
||||
>
|
||||
<el-form-item
|
||||
:label="$t('views.applicationWorkflow.nodes.videoUnderstandNode.model.label')"
|
||||
prop="model_id"
|
||||
:rules="{
|
||||
required: true,
|
||||
message: $t(
|
||||
'views.applicationWorkflow.nodes.videoUnderstandNode.model.requiredMessage',
|
||||
),
|
||||
trigger: 'change',
|
||||
}"
|
||||
>
|
||||
<template #label>
|
||||
<div class="flex-between w-full">
|
||||
<div>
|
||||
<span
|
||||
>{{ t('views.applicationWorkflow.nodes.videoUnderstandNode.model.label')
|
||||
}}<span class="color-danger">*</span></span
|
||||
>
|
||||
</div>
|
||||
<el-button
|
||||
:disabled="!form_data.model_id"
|
||||
type="primary"
|
||||
link
|
||||
@click="openAIParamSettingDialog(form_data.model_id)"
|
||||
@refreshForm="refreshParam"
|
||||
>
|
||||
<AppIcon iconName="app-setting"></AppIcon>
|
||||
</el-button>
|
||||
</div>
|
||||
</template>
|
||||
|
||||
<ModelSelect
|
||||
@wheel="wheel"
|
||||
:teleported="false"
|
||||
v-model="form_data.model_id"
|
||||
:placeholder="
|
||||
$t('views.applicationWorkflow.nodes.videoUnderstandNode.model.requiredMessage')
|
||||
"
|
||||
:options="modelOptions"
|
||||
showFooter
|
||||
:model-type="'IMAGE'"
|
||||
></ModelSelect>
|
||||
</el-form-item>
|
||||
|
||||
<el-form-item>
|
||||
<template #label>
|
||||
<div class="flex-between">
|
||||
<div class="flex align-center">
|
||||
<span>{{ $t('views.application.form.roleSettings.label') }}</span>
|
||||
<el-tooltip
|
||||
effect="dark"
|
||||
:content="$t('views.application.form.roleSettings.tooltip')"
|
||||
placement="right"
|
||||
>
|
||||
<AppIcon iconName="app-warning" class="app-warning-icon ml-4"></AppIcon>
|
||||
</el-tooltip>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
<MdEditorMagnify
|
||||
:title="$t('views.application.form.roleSettings.label')"
|
||||
v-model="form_data.system"
|
||||
style="height: 100px"
|
||||
@submitDialog="submitSystemDialog"
|
||||
:placeholder="`${t('views.applicationWorkflow.SystemPromptPlaceholder')}{{${t('views.applicationWorkflow.nodes.startNode.label')}.question}}`"
|
||||
/>
|
||||
</el-form-item>
|
||||
<el-form-item
|
||||
:label="$t('views.application.form.prompt.label')"
|
||||
prop="prompt"
|
||||
:rules="{
|
||||
required: true,
|
||||
message: $t('views.application.form.prompt.requiredMessage'),
|
||||
trigger: 'blur',
|
||||
}"
|
||||
>
|
||||
<template #label>
|
||||
<div class="flex align-center">
|
||||
<div class="mr-4">
|
||||
<span
|
||||
>{{ $t('views.application.form.prompt.label')
|
||||
}}<span class="color-danger">*</span></span
|
||||
>
|
||||
</div>
|
||||
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
|
||||
<template #content>{{ $t('views.application.form.prompt.tooltip') }} </template>
|
||||
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
||||
</el-tooltip>
|
||||
</div>
|
||||
</template>
|
||||
<MdEditorMagnify
|
||||
@wheel="wheel"
|
||||
:title="$t('views.application.form.prompt.label')"
|
||||
v-model="form_data.prompt"
|
||||
style="height: 150px"
|
||||
@submitDialog="submitDialog"
|
||||
:placeholder="`${t('views.applicationWorkflow.UserPromptPlaceholder')}{{${t('views.applicationWorkflow.nodes.startNode.label')}.question}}`"
|
||||
/>
|
||||
</el-form-item>
|
||||
<el-form-item>
|
||||
<template #label>
|
||||
<div class="flex-between">
|
||||
<div>{{ $t('views.application.form.historyRecord.label') }}</div>
|
||||
<el-select v-model="form_data.dialogue_type" type="small" style="width: 100px">
|
||||
<el-option :label="$t('views.applicationWorkflow.node')" value="NODE" />
|
||||
<el-option :label="$t('views.applicationWorkflow.workflow')" value="WORKFLOW" />
|
||||
</el-select>
|
||||
</div>
|
||||
</template>
|
||||
<el-input-number
|
||||
v-model="form_data.dialogue_number"
|
||||
:min="0"
|
||||
:value-on-clear="0"
|
||||
controls-position="right"
|
||||
class="w-full"
|
||||
:step="1"
|
||||
:step-strictly="true"
|
||||
/>
|
||||
</el-form-item>
|
||||
<el-form-item
|
||||
:label="$t('views.applicationWorkflow.nodes.videoUnderstandNode.image.label')"
|
||||
:rules="{
|
||||
type: 'array',
|
||||
required: true,
|
||||
message: $t(
|
||||
'views.applicationWorkflow.nodes.videoUnderstandNode.image.requiredMessage',
|
||||
),
|
||||
trigger: 'change',
|
||||
}"
|
||||
>
|
||||
<template #label
|
||||
>{{ $t('views.applicationWorkflow.nodes.videoUnderstandNode.image.label')
|
||||
}}<span class="color-danger">*</span></template
|
||||
>
|
||||
<NodeCascader
|
||||
ref="nodeCascaderRef"
|
||||
:nodeModel="nodeModel"
|
||||
class="w-full"
|
||||
:placeholder="
|
||||
$t('views.applicationWorkflow.nodes.videoUnderstandNode.image.requiredMessage')
|
||||
"
|
||||
v-model="form_data.video_list"
|
||||
/>
|
||||
</el-form-item>
|
||||
<el-form-item
|
||||
:label="$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')"
|
||||
@click.prevent
|
||||
>
|
||||
<template #label>
|
||||
<div class="flex align-center">
|
||||
<div class="mr-4">
|
||||
<span>{{
|
||||
$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')
|
||||
}}</span>
|
||||
</div>
|
||||
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
|
||||
<template #content>
|
||||
{{ $t('views.applicationWorkflow.nodes.aiChatNode.returnContent.tooltip') }}
|
||||
</template>
|
||||
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
|
||||
</el-tooltip>
|
||||
</div>
|
||||
</template>
|
||||
<el-switch size="small" v-model="form_data.is_result" />
|
||||
</el-form-item>
|
||||
</el-form>
|
||||
</el-card>
|
||||
<AIModeParamSettingDialog ref="AIModeParamSettingDialogRef" @refresh="refreshParam" />
|
||||
</NodeContainer>
|
||||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import NodeContainer from '@/workflow/common/NodeContainer.vue'
|
||||
import { computed, onMounted, ref, inject } from 'vue'
|
||||
import { groupBy, set } from 'lodash'
|
||||
import NodeCascader from '@/workflow/common/NodeCascader.vue'
|
||||
import type { FormInstance } from 'element-plus'
|
||||
import AIModeParamSettingDialog from '@/views/application/component/AIModeParamSettingDialog.vue'
|
||||
import { t } from '@/locales'
|
||||
import { useRoute } from 'vue-router'
|
||||
import { loadSharedApi } from '@/utils/dynamics-api/shared-api'
|
||||
const getApplicationDetail = inject('getApplicationDetail') as any
|
||||
const route = useRoute()
|
||||
|
||||
const {
|
||||
params: { id },
|
||||
} = route as any
|
||||
|
||||
const apiType = computed(() => {
|
||||
if (route.path.includes('resource-management')) {
|
||||
return 'systemManage'
|
||||
} else {
|
||||
return 'workspace'
|
||||
}
|
||||
})
|
||||
|
||||
const props = defineProps<{ nodeModel: any }>()
|
||||
const modelOptions = ref<any>(null)
|
||||
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
|
||||
|
||||
const aiChatNodeFormRef = ref<FormInstance>()
|
||||
const nodeCascaderRef = ref()
|
||||
const validate = () => {
|
||||
return Promise.all([
|
||||
nodeCascaderRef.value ? nodeCascaderRef.value.validate() : Promise.resolve(''),
|
||||
aiChatNodeFormRef.value?.validate(),
|
||||
]).catch((err: any) => {
|
||||
return Promise.reject({ node: props.nodeModel, errMessage: err })
|
||||
})
|
||||
}
|
||||
|
||||
const wheel = (e: any) => {
|
||||
if (e.ctrlKey === true) {
|
||||
e.preventDefault()
|
||||
return true
|
||||
} else {
|
||||
e.stopPropagation()
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
const defaultPrompt = `{{${t('views.applicationWorkflow.nodes.startNode.label')}.question}}`
|
||||
|
||||
const form = {
|
||||
model_id: '',
|
||||
system: '',
|
||||
prompt: defaultPrompt,
|
||||
dialogue_number: 0,
|
||||
dialogue_type: 'NODE',
|
||||
is_result: true,
|
||||
temperature: null,
|
||||
max_tokens: null,
|
||||
video_list: ['start-node', 'video'],
|
||||
}
|
||||
|
||||
const form_data = computed({
|
||||
get: () => {
|
||||
if (props.nodeModel.properties.node_data) {
|
||||
return props.nodeModel.properties.node_data
|
||||
} else {
|
||||
set(props.nodeModel.properties, 'node_data', form)
|
||||
}
|
||||
return props.nodeModel.properties.node_data
|
||||
},
|
||||
set: (value) => {
|
||||
set(props.nodeModel.properties, 'node_data', value)
|
||||
},
|
||||
})
|
||||
|
||||
const application = getApplicationDetail()
|
||||
function getSelectModel() {
|
||||
const obj =
|
||||
apiType.value === 'systemManage'
|
||||
? {
|
||||
model_type: 'IMAGE',
|
||||
workspace_id: application.value?.workspace_id,
|
||||
}
|
||||
: {
|
||||
model_type: 'IMAGE',
|
||||
}
|
||||
loadSharedApi({ type: 'model', systemType: apiType.value })
|
||||
.getSelectModelList(obj)
|
||||
.then((res: any) => {
|
||||
modelOptions.value = groupBy(res?.data, 'provider')
|
||||
})
|
||||
}
|
||||
|
||||
function submitSystemDialog(val: string) {
|
||||
set(props.nodeModel.properties.node_data, 'system', val)
|
||||
}
|
||||
|
||||
function submitDialog(val: string) {
|
||||
set(props.nodeModel.properties.node_data, 'prompt', val)
|
||||
}
|
||||
|
||||
const openAIParamSettingDialog = (modelId: string) => {
|
||||
if (modelId) {
|
||||
AIModeParamSettingDialogRef.value?.open(modelId, id, form_data.value.model_params_setting)
|
||||
}
|
||||
}
|
||||
|
||||
function refreshParam(data: any) {
|
||||
set(props.nodeModel.properties.node_data, 'model_params_setting', data)
|
||||
}
|
||||
|
||||
onMounted(() => {
|
||||
getSelectModel()
|
||||
|
||||
set(props.nodeModel, 'validate', validate)
|
||||
})
|
||||
</script>
|
||||
|
||||
<style scoped lang="scss"></style>
|
||||
Loading…
Reference in New Issue