feat: implement text-to-video and image-to-video generation nodes with serializers and workflow integration

This commit is contained in:
wxg0103 2025-09-12 14:27:48 +08:00
parent ce6f801a35
commit 7dc132e35c
38 changed files with 1912 additions and 31 deletions

View File

@ -13,6 +13,7 @@ from .direct_reply_node import *
from .document_extract_node import *
from .form_node import *
from .image_generate_step_node import *
from .image_to_video_step_node import BaseImageToVideoNode
from .image_understand_step_node import *
from .mcp_node import BaseMcpNode
from .question_node import *
@ -21,6 +22,7 @@ from .search_knowledge_node import *
from .speech_to_text_step_node import BaseSpeechToTextNode
from .start_node import *
from .text_to_speech_step_node.impl.base_text_to_speech_node import BaseTextToSpeechNode
from .text_to_video_step_node.impl.base_text_to_video_node import BaseTextToVideoNode
from .tool_lib_node import *
from .tool_node import *
from .variable_assign_node import BaseVariableAssignNode
@ -31,7 +33,8 @@ node_list = [BaseStartStepNode, BaseChatNode, BaseSearchKnowledgeNode, BaseQuest
BaseToolNodeNode, BaseToolLibNodeNode, BaseRerankerNode, BaseApplicationNode,
BaseDocumentExtractNode,
BaseImageUnderstandNode, BaseFormNode, BaseSpeechToTextNode, BaseTextToSpeechNode,
BaseImageGenerateNode, BaseVariableAssignNode, BaseMcpNode,BaseIntentNode]
BaseImageGenerateNode, BaseVariableAssignNode, BaseMcpNode, BaseTextToVideoNode, BaseImageToVideoNode,
BaseIntentNode]
def get_node(node_type):

View File

@ -0,0 +1,3 @@
# coding=utf-8
from .impl import *

View File

@ -0,0 +1,64 @@
# coding=utf-8
from typing import Type
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
class ImageToVideoNodeSerializer(serializers.Serializer):
model_id = serializers.CharField(required=True, label=_("Model id"))
prompt = serializers.CharField(required=True, label=_("Prompt word (positive)"))
negative_prompt = serializers.CharField(required=False, label=_("Prompt word (negative)"),
allow_null=True, allow_blank=True, )
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=False, default=0,
label=_("Number of multi-round conversations"))
dialogue_type = serializers.CharField(required=False, default='NODE',
label=_("Conversation storage type"))
is_result = serializers.BooleanField(required=False,
label=_('Whether to return content'))
model_params_setting = serializers.JSONField(required=False, default=dict,
label=_("Model parameter settings"))
first_frame_url = serializers.ListField(required=True, label=_("First frame url"))
last_frame_url = serializers.ListField(required=False, label=_("Last frame url"))
class IImageToVideoNode(INode):
type = 'image-to-video-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ImageToVideoNodeSerializer
def _run(self):
first_frame_url = self.workflow_manage.get_reference_field(
self.node_params_serializer.data.get('first_frame_url')[0],
self.node_params_serializer.data.get('first_frame_url')[1:])
if first_frame_url is []:
raise ValueError(
_("First frame url cannot be empty"))
last_frame_url = None
if self.node_params_serializer.data.get('last_frame_url') is not None and self.node_params_serializer.data.get(
'last_frame_url') != []:
last_frame_url = self.workflow_manage.get_reference_field(
self.node_params_serializer.data.get('last_frame_url')[0],
self.node_params_serializer.data.get('last_frame_url')[1:])
node_params_data = {k: v for k, v in self.node_params_serializer.data.items()
if k not in ['first_frame_url', 'last_frame_url']}
return self.execute(first_frame_url=first_frame_url, last_frame_url=last_frame_url,
**node_params_data, **self.flow_params_serializer.data)
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
chat_record_id,
first_frame_url, last_frame_url,
**kwargs) -> NodeResult:
pass

View File

@ -0,0 +1,3 @@
# coding=utf-8
from .base_image_to_video_node import BaseImageToVideoNode

View File

@ -0,0 +1,153 @@
# coding=utf-8
import base64
from functools import reduce
from typing import List
import requests
from django.db.models import QuerySet
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
from application.flow.i_step_node import NodeResult
from application.flow.step_node.image_to_video_step_node.i_image_to_video_node import IImageToVideoNode
from common.utils.common import bytes_to_uploaded_file
from knowledge.models import FileSourceType, File
from oss.serializers.file import FileSerializer, mime_types
from models_provider.tools import get_model_instance_by_model_workspace_id
class BaseImageToVideoNode(IImageToVideoNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
chat_record_id,
first_frame_url, last_frame_url=None,
**kwargs) -> NodeResult:
application = self.workflow_manage.work_flow_post_handler.chat_info.application
workspace_id = self.workflow_manage.get_body().get('workspace_id')
ttv_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question
message_list = self.generate_message_list(question, history_message)
self.context['message_list'] = message_list
self.context['dialogue_type'] = dialogue_type
self.context['negative_prompt'] = negative_prompt
self.context['first_frame_url'] = first_frame_url
self.context['last_frame_url'] = last_frame_url
# 处理首尾帧图片 这块可以是url 也可以是file_id 如果是url 可以直接传递给模型 如果是file_id 需要传base64
# 判断是不是 url
first_frame_url = self.get_file_base64(first_frame_url)
last_frame_url = self.get_file_base64(last_frame_url)
video_urls = ttv_model.generate_video(question, negative_prompt, first_frame_url, last_frame_url)
# 保存图片
if video_urls is None:
return NodeResult({'answer': '生成视频失败'}, {})
file_name = 'generated_video.mp4'
if isinstance(video_urls, str) and video_urls.startswith('http'):
video_urls = requests.get(video_urls).content
file = bytes_to_uploaded_file(video_urls, file_name)
meta = {
'debug': False if application.id else True,
'chat_id': chat_id,
'application_id': str(application.id) if application.id else None,
}
file_url = FileSerializer(data={
'file': file,
'meta': meta,
'source_id': meta['application_id'],
'source_type': FileSourceType.APPLICATION.value
}).upload()
video_label = f'<video src="{file_url}" controls style="max-width: 100%; width: 100%; height: auto; max-height: 60vh;"></video>'
video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}]
return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list,
'video': video_list,
'history_message': history_message, 'question': question}, {})
def get_file_base64(self, image_url):
if isinstance(image_url, list):
image_url = image_url[0].get('file_id')
if isinstance(image_url, str) and not image_url.startswith('http'):
file = QuerySet(File).filter(id=image_url).first()
file_bytes = file.get_bytes()
# 如果我不知道content_type 可以用 magic 库去检测
file_type = file.file_name.split(".")[-1].lower()
content_type = mime_types.get(file_type, 'application/octet-stream')
encoded_bytes = base64.b64encode(file_bytes)
return f'data:{content_type};base64,{encoded_bytes.decode()}'
return image_url
def generate_history_ai_message(self, chat_record):
for val in chat_record.details.values():
if self.node.id == val['node_id'] and 'image_list' in val:
if val['dialogue_type'] == 'WORKFLOW':
return chat_record.get_ai_message()
image_list = val['image_list']
return AIMessage(content=[
*[{'type': 'image_url', 'image_url': {'url': f'{file_url}'}} for file_url in image_list]
])
return chat_record.get_ai_message()
def get_history_message(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
def generate_history_human_message(self, chat_record):
for data in chat_record.details.values():
if self.node.id == data['node_id'] and 'image_list' in data:
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
return HumanMessage(content=data['question'])
return HumanMessage(content=chat_record.problem_text)
def generate_prompt_question(self, prompt):
return self.workflow_manage.generate_prompt(prompt)
def generate_message_list(self, question: str, history_message):
return [
*history_message,
question
]
@staticmethod
def reset_message_list(message_list: List[BaseMessage], answer_text):
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
message
in
message_list]
result.append({'role': 'ai', 'content': answer_text})
return result
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'history_message': [{'content': message.content, 'role': message.type} for message in
(self.context.get('history_message') if self.context.get(
'history_message') is not None else [])],
'question': self.context.get('question'),
'answer': self.context.get('answer'),
'type': self.node.type,
'message_tokens': self.context.get('message_tokens'),
'answer_tokens': self.context.get('answer_tokens'),
'status': self.status,
'err_message': self.err_message,
'first_frame_url': self.context.get('first_frame_url'),
'last_frame_url': self.context.get('last_frame_url'),
'dialogue_type': self.context.get('dialogue_type'),
'negative_prompt': self.context.get('negative_prompt'),
}

View File

@ -0,0 +1,3 @@
# coding=utf-8
from .impl import *

View File

@ -0,0 +1,45 @@
# coding=utf-8
from typing import Type
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
class TextToVideoNodeSerializer(serializers.Serializer):
model_id = serializers.CharField(required=True, label=_("Model id"))
prompt = serializers.CharField(required=True, label=_("Prompt word (positive)"))
negative_prompt = serializers.CharField(required=False, label=_("Prompt word (negative)"),
allow_null=True, allow_blank=True, )
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=False, default=0,
label=_("Number of multi-round conversations"))
dialogue_type = serializers.CharField(required=False, default='NODE',
label=_("Conversation storage type"))
is_result = serializers.BooleanField(required=False,
label=_('Whether to return content'))
model_params_setting = serializers.JSONField(required=False, default=dict,
label=_("Model parameter settings"))
class ITextToVideoNode(INode):
type = 'text-to-video-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return TextToVideoNodeSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
chat_record_id,
**kwargs) -> NodeResult:
pass

View File

@ -0,0 +1,3 @@
# coding=utf-8
from .base_text_to_video_node import BaseTextToVideoNode

View File

@ -0,0 +1,132 @@
# coding=utf-8
from functools import reduce
from typing import List
import requests
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
from application.flow.i_step_node import NodeResult
from application.flow.step_node.text_to_video_step_node.i_text_to_video_node import ITextToVideoNode
from common.utils.common import bytes_to_uploaded_file
from knowledge.models import FileSourceType
from oss.serializers.file import FileSerializer
from models_provider.tools import get_model_instance_by_model_workspace_id
class BaseTextToVideoNode(ITextToVideoNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
chat_record_id,
**kwargs) -> NodeResult:
application = self.workflow_manage.work_flow_post_handler.chat_info.application
workspace_id = self.workflow_manage.get_body().get('workspace_id')
ttv_model = get_model_instance_by_model_workspace_id(model_id, workspace_id,
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question
message_list = self.generate_message_list(question, history_message)
self.context['message_list'] = message_list
self.context['dialogue_type'] = dialogue_type
self.context['negative_prompt'] = negative_prompt
video_urls = ttv_model.generate_video(question, negative_prompt)
print('video_urls', video_urls)
# 保存图片
if video_urls is None:
return NodeResult({'answer': '生成视频失败'}, {})
file_name = 'generated_video.mp4'
if isinstance(video_urls, str) and video_urls.startswith('http'):
video_urls = requests.get(video_urls).content
file = bytes_to_uploaded_file(video_urls, file_name)
meta = {
'debug': False if application.id else True,
'chat_id': chat_id,
'application_id': str(application.id) if application.id else None,
}
file_url = FileSerializer(data={
'file': file,
'meta': meta,
'source_id': meta['application_id'],
'source_type': FileSourceType.APPLICATION.value
}).upload()
print('file_url', file_url)
video_label = f'<video src="{file_url}" controls style="max-width: 100%; width: 100%; height: auto;"></video>'
video_list = [{'file_id': file_url.split('/')[-1], 'file_name': file_name, 'url': file_url}]
return NodeResult({'answer': video_label, 'chat_model': ttv_model, 'message_list': message_list,
'video': video_list,
'history_message': history_message, 'question': question}, {})
def generate_history_ai_message(self, chat_record):
for val in chat_record.details.values():
if self.node.id == val['node_id'] and 'image_list' in val:
if val['dialogue_type'] == 'WORKFLOW':
return chat_record.get_ai_message()
image_list = val['image_list']
return AIMessage(content=[
*[{'type': 'image_url', 'image_url': {'url': f'{file_url}'}} for file_url in image_list]
])
return chat_record.get_ai_message()
def get_history_message(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
def generate_history_human_message(self, chat_record):
for data in chat_record.details.values():
if self.node.id == data['node_id'] and 'image_list' in data:
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
return HumanMessage(content=data['question'])
return HumanMessage(content=chat_record.problem_text)
def generate_prompt_question(self, prompt):
return self.workflow_manage.generate_prompt(prompt)
def generate_message_list(self, question: str, history_message):
return [
*history_message,
question
]
@staticmethod
def reset_message_list(message_list: List[BaseMessage], answer_text):
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
message
in
message_list]
result.append({'role': 'ai', 'content': answer_text})
return result
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'history_message': [{'content': message.content, 'role': message.type} for message in
(self.context.get('history_message') if self.context.get(
'history_message') is not None else [])],
'question': self.context.get('question'),
'answer': self.context.get('answer'),
'type': self.node.type,
'message_tokens': self.context.get('message_tokens'),
'answer_tokens': self.context.get('answer_tokens'),
'status': self.status,
'err_message': self.err_message,
'image_list': self.context.get('image_list'),
'dialogue_type': self.context.get('dialogue_type'),
'negative_prompt': self.context.get('negative_prompt'),
}

View File

@ -8674,6 +8674,12 @@ msgstr ""
msgid "This folder contains resources that you dont have permission"
msgstr ""
msgid "Text to Video"
msgstr ""
msgid "Image to Video"
msgstr ""
msgid "Authentication failed. Please verify that the parameters are correct"
msgstr ""
@ -8684,4 +8690,14 @@ msgid "Prompt template"
msgstr ""
msgid "generate prompt"
msgstr ""
msgid "Watermark"
msgstr ""
msgid "Whether to add watermark"
msgstr ""
msgid "Resolution"
msgstr ""

View File

@ -8800,6 +8800,13 @@ msgstr "系统资源授权"
msgid "This folder contains resources that you dont have permission"
msgstr "此文件夹包含您没有权限的资源"
msgid "Text to Video"
msgstr "文生视频"
msgid "Image to Video"
msgstr "图生视频"
msgid "Authentication failed. Please verify that the parameters are correct"
msgstr "认证失败,请检查参数是否正确"
@ -8810,4 +8817,13 @@ msgid "Prompt template"
msgstr "提示词模板"
msgid "generate prompt"
msgstr "生成提示词"
msgstr "生成提示词"
msgid "Watermark"
msgstr "水印"
msgid "Whether to add watermark"
msgstr "是否添加水印"
msgid "Resolution"
msgstr "分辨率"

View File

@ -8800,6 +8800,13 @@ msgstr "系統資源授權"
msgid "This folder contains resources that you dont have permission"
msgstr "此資料夾包含您沒有許可權的資源"
msgid "Text to Video"
msgstr "文生視頻"
msgid "Image to Video"
msgstr "圖生視頻"
msgid "Authentication failed. Please verify that the parameters are correct"
msgstr "認證失敗,請檢查參數是否正確"
@ -8810,4 +8817,13 @@ msgid "Prompt template"
msgstr "提示詞範本"
msgid "generate prompt"
msgstr "生成提示詞"
msgstr "生成提示詞"
msgid "Watermark"
msgstr "水印"
msgid "Whether to add watermark"
msgstr "是否添加水印"
msgid "Resolution"
msgstr "分辨率"

View File

@ -147,6 +147,11 @@ class ModelTypeConst(Enum):
IMAGE = {'code': 'IMAGE', 'message': _('Vision Model')}
TTI = {'code': 'TTI', 'message': _('Image Generation')}
RERANKER = {'code': 'RERANKER', 'message': _('Rerank')}
#文生视频 图生视频
TTV = {'code': 'TTV', 'message': _('Text to Video')}
ITV = {'code': 'ITV', 'message': _('Image to Video')}
class ModelInfo:

View File

@ -0,0 +1,14 @@
# coding=utf-8
from abc import abstractmethod
from pydantic import BaseModel
class BaseGenerationVideo(BaseModel):
@abstractmethod
def check_auth(self):
pass
@abstractmethod
def generate_video(self, prompt: str, negative_prompt: str = None, first_frame_url=None, last_frame_url=None):
pass

View File

@ -15,6 +15,7 @@ from models_provider.impl.aliyun_bai_lian_model_provider.credential.asr_stt impo
from models_provider.impl.aliyun_bai_lian_model_provider.credential.embedding import \
AliyunBaiLianEmbeddingCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.image import QwenVLModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.itv import ImageToVideoModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.llm import BaiLianLLMModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.omni_stt import AliyunBaiLianOmiSTTModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.reranker import \
@ -22,6 +23,7 @@ from models_provider.impl.aliyun_bai_lian_model_provider.credential.reranker imp
from models_provider.impl.aliyun_bai_lian_model_provider.credential.stt import AliyunBaiLianSTTModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.tti import QwenTextToImageModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.tts import AliyunBaiLianTTSModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.credential.ttv import TextToVideoModelCredential
from models_provider.impl.aliyun_bai_lian_model_provider.model.asr_stt import AliyunBaiLianAsrSpeechToText
from models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding
from models_provider.impl.aliyun_bai_lian_model_provider.model.image import QwenVLChatModel
@ -34,6 +36,8 @@ from models_provider.impl.aliyun_bai_lian_model_provider.model.tts import Aliyun
from maxkb.conf import PROJECT_DIR
from django.utils.translation import gettext as _, gettext
from models_provider.impl.aliyun_bai_lian_model_provider.model.ttv import GenerationVideoModel
aliyun_bai_lian_model_credential = AliyunBaiLianRerankerCredential()
aliyun_bai_lian_tts_model_credential = AliyunBaiLianTTSModelCredential()
aliyun_bai_lian_stt_model_credential = AliyunBaiLianSTTModelCredential()
@ -43,6 +47,8 @@ aliyun_bai_lian_embedding_model_credential = AliyunBaiLianEmbeddingCredential()
aliyun_bai_lian_llm_model_credential = BaiLianLLMModelCredential()
qwenvl_model_credential = QwenVLModelCredential()
qwentti_model_credential = QwenTextToImageModelCredential()
aliyun_bai_lian_ttv_model_credential = TextToVideoModelCredential()
aliyun_bai_lian_itv_model_credential = ImageToVideoModelCredential()
model_info_list = [ModelInfo('gte-rerank',
_('With the GTE-Rerank text sorting series model developed by Alibaba Tongyi Lab, developers can integrate high-quality text retrieval and sorting through the LlamaIndex framework.'),
@ -104,6 +110,24 @@ module_info_tti_list = [
_('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'),
ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel),
]
model_info_ttv_list = [
ModelInfo('wan2.2-t2v-plus', '', ModelTypeConst.TTV, aliyun_bai_lian_ttv_model_credential,
GenerationVideoModel),
ModelInfo('wanx2.1-t2v-turbo', '', ModelTypeConst.TTV, aliyun_bai_lian_ttv_model_credential,
GenerationVideoModel),
ModelInfo('wanx2.1-t2v-plus', '', ModelTypeConst.TTV, aliyun_bai_lian_ttv_model_credential,
GenerationVideoModel),
]
module_info_itv_list = [
ModelInfo('wan2.2-i2v-flash', '', ModelTypeConst.ITV, aliyun_bai_lian_itv_model_credential,
GenerationVideoModel),
ModelInfo('wan2.2-i2v-plus', '', ModelTypeConst.ITV, aliyun_bai_lian_itv_model_credential,
GenerationVideoModel),
ModelInfo('wanx2.1-i2v-plus', '', ModelTypeConst.ITV, aliyun_bai_lian_itv_model_credential,
GenerationVideoModel),
ModelInfo('wanx2.1-i2v-turbo', '', ModelTypeConst.ITV, aliyun_bai_lian_itv_model_credential,
GenerationVideoModel),
]
model_info_manage = (
ModelInfoManage.builder()
@ -117,6 +141,10 @@ model_info_manage = (
.append_default_model_info(model_info_list[3])
.append_default_model_info(model_info_list[4])
.append_default_model_info(model_info_list[0])
.append_model_info_list(model_info_ttv_list)
.append_default_model_info(model_info_ttv_list[0])
.append_model_info_list(module_info_itv_list)
.append_default_model_info(module_info_itv_list[0])
.build()
)

View File

@ -0,0 +1,120 @@
# coding=utf-8
import traceback
from typing import Dict, Any
from django.utils.translation import gettext_lazy as _, gettext
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, PasswordInputField, SingleSelect, SliderField, TooltipLabel
from common.forms.switch_field import SwitchField
from models_provider.base_model_provider import BaseModelCredential, ValidCode
class QwenModelParams(BaseForm):
"""
Parameters class for the Qwen Image-to-Video model.
Defines fields such as Video size, number of Videos, and style.
"""
resolution = SingleSelect(
TooltipLabel(_('Resolution'), ''),
required=True,
default_value='480P',
option_list=[
{'value': '480P', 'label': '480P'},
{'value': '720P', 'label': '720P'},
{'value': '1080P', 'label': '1080P'},
],
text_field='label',
value_field='value'
)
watermark = SwitchField(
TooltipLabel(_('Watermark'), _('Whether to add watermark')),
default_value=False,
)
class ImageToVideoModelCredential(BaseForm, BaseModelCredential):
"""
Credential class for the Qwen Image-to-Video model.
Provides validation and encryption for the model credentials.
"""
api_key = PasswordInputField('API Key', required=True)
def is_valid(
self,
model_type: str,
model_name: str,
model_credential: Dict[str, Any],
model_params: Dict[str, Any],
provider,
raise_exception: bool = False
) -> bool:
"""
Validate the model credentials.
:param model_type: Type of the model (e.g., 'TEXT_TO_Video').
:param model_name: Name of the model.
:param model_credential: Dictionary containing the model credentials.
:param model_params: Parameters for the model.
:param provider: Model provider instance.
:param raise_exception: Whether to raise an exception on validation failure.
:return: Boolean indicating whether the credentials are valid.
"""
model_type_list = provider.get_model_type_list()
if not any(mt.get('value') == model_type for mt in model_type_list):
raise AppApiException(
ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type)
)
required_keys = ['api_key']
for key in required_keys:
if key not in model_credential:
if raise_exception:
raise AppApiException(
ValidCode.valid_error.value,
gettext('{key} is required').format(key=key)
)
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(
ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}'
).format(error=str(e))
)
return False
return True
def encryption_dict(self, model: Dict[str, object]) -> Dict[str, object]:
"""
Encrypt sensitive fields in the model dictionary.
:param model: Dictionary containing model details.
:return: Dictionary with encrypted sensitive fields.
"""
return {
**model,
'api_key': super().encryption(model.get('api_key', ''))
}
def get_model_params_setting_form(self, model_name: str):
"""
Get the parameter setting form for the specified model.
:param model_name: Name of the model.
:return: Parameter setting form.
"""
return QwenModelParams()

View File

@ -0,0 +1,122 @@
# coding=utf-8
import traceback
from typing import Dict, Any
from django.utils.translation import gettext_lazy as _, gettext
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, PasswordInputField, SingleSelect, SliderField, TooltipLabel
from common.forms.switch_field import SwitchField
from models_provider.base_model_provider import BaseModelCredential, ValidCode
class QwenModelParams(BaseForm):
"""
Parameters class for the Qwen Text-to-Video model.
Defines fields such as Video size, number of Videos, and style.
"""
size = SingleSelect(
TooltipLabel(_('Video size'), _('Specify the size of the generated Video, such as: 1024x1024')),
required=True,
default_value='1280*720',
option_list=[
{'value': '832*480', 'label': '832*480'},
{'value': '480*832', 'label': '480*832'},
{'value': '1280*720', 'label': '1280*720'},
{'value': '720*1280', 'label': '720*1280'},
],
text_field='label',
value_field='value'
)
watermark = SwitchField(
TooltipLabel(_('Watermark'), _('Whether to add watermark')),
default_value=False,
)
class TextToVideoModelCredential(BaseForm, BaseModelCredential):
"""
Credential class for the Qwen Text-to-Video model.
Provides validation and encryption for the model credentials.
"""
api_key = PasswordInputField('API Key', required=True)
def is_valid(
self,
model_type: str,
model_name: str,
model_credential: Dict[str, Any],
model_params: Dict[str, Any],
provider,
raise_exception: bool = False
) -> bool:
"""
Validate the model credentials.
:param model_type: Type of the model (e.g., 'TEXT_TO_Video').
:param model_name: Name of the model.
:param model_credential: Dictionary containing the model credentials.
:param model_params: Parameters for the model.
:param provider: Model provider instance.
:param raise_exception: Whether to raise an exception on validation failure.
:return: Boolean indicating whether the credentials are valid.
"""
model_type_list = provider.get_model_type_list()
if not any(mt.get('value') == model_type for mt in model_type_list):
raise AppApiException(
ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type)
)
required_keys = ['api_key']
for key in required_keys:
if key not in model_credential:
if raise_exception:
raise AppApiException(
ValidCode.valid_error.value,
gettext('{key} is required').format(key=key)
)
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(
ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}'
).format(error=str(e))
)
return False
return True
def encryption_dict(self, model: Dict[str, object]) -> Dict[str, object]:
"""
Encrypt sensitive fields in the model dictionary.
:param model: Dictionary containing model details.
:return: Dictionary with encrypted sensitive fields.
"""
return {
**model,
'api_key': super().encryption(model.get('api_key', ''))
}
def get_model_params_setting_form(self, model_name: str):
"""
Get the parameter setting form for the specified model.
:param model_name: Name of the model.
:return: Parameter setting form.
"""
return QwenModelParams()

View File

@ -0,0 +1,110 @@
import time
from http import HTTPStatus
from typing import Dict, Optional
import requests
from dashscope import VideoSynthesis
from langchain_core.messages import HumanMessage
from django.utils.translation import gettext
from langchain_community.chat_models import ChatTongyi
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.base_ttv import BaseGenerationVideo
from common.utils.logger import maxkb_logger
class GenerationVideoModel(MaxKBBaseModel, BaseGenerationVideo):
api_key: str
model_name: str
params: dict
max_retries: int = 3
retry_delay: int = 5 # seconds
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.api_key = kwargs.get('api_key')
self.model_name = kwargs.get('model_name')
self.params = kwargs.get('params', {})
self.max_retries = kwargs.get('max_retries', 3)
self.retry_delay = 5
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {}}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming']:
optional_params['params'][key] = value
return GenerationVideoModel(
model_name=model_name,
api_key=model_credential.get('api_key'),
**optional_params,
)
def check_auth(self):
chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max')
self._safe_call(chat.invoke, input=[HumanMessage([{"type": "text", "text": gettext('Hello')}])])
def _safe_call(self, func, **kwargs):
"""带重试的请求封装"""
for attempt in range(self.max_retries):
try:
rsp = func(**kwargs)
return rsp
except (requests.exceptions.ProxyError,
requests.exceptions.ConnectionError,
requests.exceptions.Timeout) as e:
maxkb_logger.error(f"⚠️ 网络错误: {e},正在重试 {attempt + 1}/{self.max_retries}...")
time.sleep(self.retry_delay)
raise RuntimeError("多次重试后仍无法连接到 DashScope API请检查代理或网络配置")
# --- 通用异步生成函数 ---
def generate_video(self, prompt, negative_prompt=None, first_frame_url=None, last_frame_url=None, **kwargs):
"""
prompt: 文本描述
negative_prompt: 反向文本描述
first_frame_url: 起始关键帧图片 URL (KF2V 必填)
last_frame_url: 结束关键帧图片 URL (KF2V 必填)
如果没有提供last_frame_url则表示只提供了first_frame_url生成的是单关键帧视频KFV 参数是img_url
"""
# 构建基础参数
params = {"api_key": self.api_key, "prompt": prompt, "model": self.model_name,
"negative_prompt": negative_prompt}
if first_frame_url and last_frame_url:
params['first_frame_url'] = first_frame_url
params["last_frame_url"] = last_frame_url
elif first_frame_url:
params['img_url'] = first_frame_url
# 合并所有额外参数
params.update(self.params)
# --- 异步提交任务 ---
rsp = self._safe_call(VideoSynthesis.async_call, **params)
if rsp.status_code != HTTPStatus.OK:
maxkb_logger.info('提交任务失败status_code: %s, code: %s, message: %s' %
(rsp.status_code, rsp.code, rsp.message))
return None
maxkb_logger.info("task_id:", rsp.output.task_id)
# --- 查询任务状态 ---
status = self._safe_call(VideoSynthesis.fetch, task=rsp, api_key=self.api_key)
if status.status_code == HTTPStatus.OK:
maxkb_logger.info("当前任务状态:", status.output.task_status)
else:
maxkb_logger.error('获取任务状态失败status_code: %s, code: %s, message: %s' %
(status.status_code, status.code, status.message))
# --- 等待任务完成 ---
rsp = self._safe_call(VideoSynthesis.wait, task=rsp, api_key=self.api_key)
if rsp.status_code == HTTPStatus.OK:
maxkb_logger.info("视频生成完成!视频 URL:", rsp.output.video_url)
return rsp.output.video_url
else:
maxkb_logger.error('生成失败status_code: %s, code: %s, message: %s' %
(rsp.status_code, rsp.code, rsp.message))
return None

View File

@ -1,4 +1,6 @@
# coding=utf-8
from concurrent.futures import ThreadPoolExecutor
from requests.exceptions import ConnectTimeout, ReadTimeout
from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping
from langchain_core.language_models import LanguageModelInput
@ -92,13 +94,24 @@ class BaseChatOpenAI(ChatOpenAI):
tools: Optional[
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
] = None,
timeout: Optional[float] = 0.5,
) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
try:
return super().get_num_tokens_from_messages(messages)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
with ThreadPoolExecutor(max_workers=1) as executor:
future = executor.submit(super().get_num_tokens_from_messages, messages, tools)
try:
response = future.result()
print("请求成功(未超时)")
return response
except Exception as e:
if isinstance(e, ReadTimeout):
raise # 继续抛出
else:
print("except:", e)
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
return self.usage_metadata.get('input_tokens', self.usage_metadata.get('prompt_tokens', 0))
def get_num_tokens(self, text: str) -> int:

View File

@ -0,0 +1,13 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_5122_17163)">
<path d="M0 16C0 8.45753 0 4.68629 2.34315 2.34315C4.68629 0 8.45753 0 16 0C23.5425 0 27.3137 0 29.6569 2.34315C32 4.68629 32 8.45753 32 16C32 23.5425 32 27.3137 29.6569 29.6569C27.3137 32 23.5425 32 16 32C8.45753 32 4.68629 32 2.34315 29.6569C0 27.3137 0 23.5425 0 16Z" fill="#3370FF"/>
<path d="M21.6982 23.1059C21.777 23.3424 22.0135 23.5001 22.25 23.5001C22.4865 23.5001 22.723 23.3424 22.8018 23.1059L23.1171 22.16C23.1959 21.8447 23.5112 21.5294 23.8266 21.4505L24.7725 21.1352C25.009 21.0564 25.1666 20.8199 25.1666 20.5834C25.1666 20.3469 25.009 20.1104 24.7725 20.0316L23.8266 19.7163C23.5112 19.6375 23.1959 19.3222 23.1171 19.0068L22.8018 18.0609C22.723 17.8244 22.4865 17.6667 22.25 17.6667C22.0135 17.6667 21.777 17.8244 21.6982 18.0609L21.3829 19.0068C21.304 19.3222 20.9887 19.6375 20.6734 19.7163L19.7275 20.0316C19.491 20.1104 19.3333 20.3469 19.3333 20.5834C19.3333 20.8199 19.491 21.0564 19.7275 21.1352L20.6734 21.4505C20.9887 21.5294 21.304 21.8447 21.3829 22.16L21.6982 23.1059Z" fill="white"/>
<path d="M24.3333 8.5C24.7935 8.5 25.1666 8.91973 25.1666 9.4375V16.3333C25.1666 16.6095 24.9428 16.8333 24.6666 16.8333H24C23.7238 16.8333 23.5 16.6095 23.5 16.3333V10.1667H8.49998V21.8333H18C18.2761 21.8333 18.5 22.0572 18.5 22.3333V23C18.5 23.2761 18.2761 23.5 18 23.5H7.66665C7.20641 23.5 6.83331 23.0803 6.83331 22.5625V9.4375C6.83331 8.91973 7.20641 8.5 7.66665 8.5H24.3333Z" fill="white"/>
<path d="M14.3333 12.7651C14.4627 12.7651 14.5903 12.7952 14.706 12.853L19.5091 15.2546C19.9207 15.4604 20.0876 15.9611 19.8818 16.3727C19.8012 16.534 19.6703 16.6648 19.5091 16.7454L14.706 19.147C14.2944 19.3528 13.7937 19.1859 13.5879 18.7743C13.53 18.6586 13.5 18.5309 13.5 18.4015V13.5985C13.5 13.1382 13.8731 12.7651 14.3333 12.7651Z" fill="white"/>
</g>
<defs>
<clipPath id="clip0_5122_17163">
<rect width="32" height="32" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 2.0 KiB

View File

@ -0,0 +1,13 @@
<svg width="32" height="32" viewBox="0 0 32 32" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_5122_16012)">
<path d="M0 16C0 8.45753 0 4.68629 2.34315 2.34315C4.68629 0 8.45753 0 16 0C23.5425 0 27.3137 0 29.6569 2.34315C32 4.68629 32 8.45753 32 16C32 23.5425 32 27.3137 29.6569 29.6569C27.3137 32 23.5425 32 16 32C8.45753 32 4.68629 32 2.34315 29.6569C0 27.3137 0 23.5425 0 16Z" fill="#3370FF"/>
<path d="M8.49999 7.66675H14.75C14.9801 7.66675 15.1667 7.8533 15.1667 8.08341V8.91675C15.1667 9.14687 14.9801 9.33341 14.75 9.33341H9.33332V14.7501C9.33332 14.9802 9.14678 15.1667 8.91666 15.1667H8.08332C7.8532 15.1667 7.66666 14.9802 7.66666 14.7501V8.50008C7.66666 8.03984 8.03975 7.66675 8.49999 7.66675ZM23.5 24.3334H18.9167C18.6865 24.3334 18.5 24.1469 18.5 23.9167V23.0834C18.5 22.8533 18.6865 22.6667 18.9167 22.6667H22.6667V18.0834C22.6667 17.8533 22.8532 17.6667 23.0833 17.6667H23.9167C24.1468 17.6667 24.3333 17.8533 24.3333 18.0834V23.5001C24.3333 23.9603 23.9602 24.3334 23.5 24.3334Z" fill="white"/>
<path fill-rule="evenodd" clip-rule="evenodd" d="M19.379 7.85862C19.4719 7.61739 19.7038 7.45825 19.9623 7.45825H20.8375C21.096 7.45825 21.3278 7.61739 21.4207 7.85862L24.3094 15.3586C24.4671 15.768 24.1649 16.2083 23.7261 16.2083H23.2633C22.9976 16.2083 22.761 16.0403 22.6733 15.7895L21.9953 13.8502C21.966 13.7666 21.8871 13.7106 21.7986 13.7106H19.085C18.9993 13.7106 18.9224 13.763 18.891 13.8428L18.1182 15.8116C18.0243 16.0509 17.7935 16.2083 17.5364 16.2083H17.0736C16.6349 16.2083 16.3327 15.768 16.4904 15.3586L19.379 7.85862ZM20.3991 9.57338L19.4584 12.1268H21.332L20.3991 9.57338Z" fill="white"/>
<path d="M16.8333 17.302C16.8333 17.0431 16.6468 16.8333 16.4167 16.8333H8.08332C7.8532 16.8333 7.66666 17.0431 7.66666 17.302V23.8645C7.66666 24.1234 7.8532 24.3333 8.08332 24.3333H16.4167C16.6468 24.3333 16.8333 24.1234 16.8333 23.8645V17.302ZM11.3947 22.1568C11.1888 22.2597 10.9386 22.1762 10.8356 21.9704C10.8067 21.9126 10.7917 21.8488 10.7917 21.7841V19.3824C10.7917 19.1523 10.9782 18.9658 11.2083 18.9658C11.273 18.9658 11.3368 18.9808 11.3947 19.0098L13.7963 20.2106C14.0021 20.3135 14.0856 20.5638 13.9826 20.7696C13.9423 20.8502 13.8769 20.9156 13.7963 20.9559L11.3947 22.1568Z" fill="white"/>
</g>
<defs>
<clipPath id="clip0_5122_16012">
<rect width="32" height="32" fill="white"/>
</clipPath>
</defs>
</svg>

After

Width:  |  Height:  |  Size: 2.3 KiB

View File

@ -7,7 +7,7 @@
<div class="flex-between cursor" @click="item['show'] = !item['show']">
<div class="flex align-center">
<el-icon class="mr-8 arrow-icon" :class="item['show'] ? 'rotate-90' : ''">
<CaretRight />
<CaretRight/>
</el-icon>
<component
:is="iconComponent(`${item.type}-icon`)"
@ -27,14 +27,14 @@
item.type === WorkflowType.ImageGenerateNode ||
item.type === WorkflowType.Application
"
>{{ item?.message_tokens + item?.answer_tokens }} tokens</span
>{{ item?.message_tokens + item?.answer_tokens }} tokens</span
>
<span class="mr-16 color-secondary">{{ item?.run_time?.toFixed(2) || 0.0 }} s</span>
<el-icon class="color-success" :size="16" v-if="item.status === 200">
<CircleCheck />
<CircleCheck/>
</el-icon>
<el-icon class="color-danger" :size="16" v-else>
<CircleClose />
<CircleClose/>
</el-icon>
</div>
</div>
@ -77,7 +77,7 @@
class="file cursor"
>
<div class="flex align-center">
<img :src="getImgUrl(f && f?.name)" alt="" width="24" />
<img :src="getImgUrl(f && f?.name)" alt="" width="24"/>
<div class="ml-4 ellipsis" :title="f && f?.name">
{{ f && f?.name }}
</div>
@ -130,7 +130,7 @@
class="file cursor"
>
<div class="flex align-center">
<img :src="getImgUrl(f && f?.name)" alt="" width="24" />
<img :src="getImgUrl(f && f?.name)" alt="" width="24"/>
<div class="ml-4 ellipsis" :title="f && f?.name">
{{ f && f?.name }}
</div>
@ -431,7 +431,7 @@
</div>
</template>
<!-- 多路召回 -->
<template v-if="item.type == WorkflowType.RrerankerNode">
<template v-if="item.type == WorkflowType.RerankerNode">
<div class="card-never border-r-6">
<h5 class="p-8-12">
{{ $t('chat.executionDetails.searchContent') }}
@ -485,7 +485,8 @@
<template v-if="item.type === WorkflowType.FormNode">
<div class="card-never border-r-6">
<h5 class="p-8-12">
{{ $t('common.param.outputParam')
{{
$t('common.param.outputParam')
}}<span style="color: #f54a45">{{
item.is_submit ? '' : `(${$t('chat.executionDetails.noSubmit')})`
}}</span>
@ -543,7 +544,7 @@
class="border-r-6 mr-8"
/>
<span v-else>{{ h.text }}<br /></span>
<span v-else>{{ h.text }}<br/></span>
</template>
</span>
@ -640,7 +641,155 @@
</div>
</div>
</template>
<template v-if="item.type == WorkflowType.TextToVideoGenerateNode">
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{ $t('chat.executionDetails.currentChat') }}
</h5>
<div class="p-8-12 border-t-dashed lighter pre-wrap">
{{ item.question || '-' }}
</div>
</div>
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{
$t(
'views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label',
)
}}
</h5>
<div class="p-8-12 border-t-dashed lighter pre-wrap">
{{ item.negative_prompt || '-' }}
</div>
</div>
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{
item.type == WorkflowType.Application
? $t('common.param.outputParam')
: $t('chat.executionDetails.answer')
}}
</h5>
<div class="p-8-12 border-t-dashed lighter">
<MdPreview
v-if="item.answer"
ref="editorRef"
editorId="preview-only"
:modelValue="item.answer"
style="background: none"
noImgZoomIn
/>
<template v-else> -</template>
</div>
</div>
</template>
<template v-if="item.type == WorkflowType.ImageToVideoGenerateNode">
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{ $t('chat.executionDetails.currentChat') }}
</h5>
<div class="p-8-12 border-t-dashed lighter pre-wrap">
{{ item.question || '-' }}
</div>
</div>
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{
$t(
'views.applicationWorkflow.nodes.imageGenerateNode.negative_prompt.label',
)
}}
</h5>
<div class="p-8-12 border-t-dashed lighter pre-wrap">
{{ item.negative_prompt || '-' }}
</div>
</div>
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.label')
}}
</h5>
<div class="p-8-12 border-t-dashed lighter pre-wrap">
<div v-if="typeof item.first_frame_url === 'string'">
<el-image
:src="item.first_frame_url"
alt=""
fit="cover" style="width: 40px; height: 40px; display: block"
class="border-r-6"
/>
</div>
<div v-else-if="Array.isArray(item.first_frame_url)">
<el-space wrap>
<template v-for="(f, i) in item.first_frame_url" :key="i">
<el-image
:src="f.url"
alt=""
fit="cover" style="width: 40px; height: 40px; display: block"
class="border-r-6"
/>
</template>
</el-space>
</div>
</div>
</div>
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.label')
}}
</h5>
<div class="p-8-12 border-t-dashed lighter pre-wrap">
<div v-if="typeof item.last_frame_url === 'string'">
<el-image
:src="item.last_frame_url"
alt=""
fit="cover" style="width: 40px; height: 40px; display: block"
class="border-r-6"
/>
</div>
<div v-else-if="Array.isArray(item.last_frame_url)">
<el-space wrap>
<template v-for="(f, i) in item.last_frame_url" :key="i">
<el-image
:src="f.url"
alt=""
fit="cover" style="width: 40px; height: 40px; display: block"
class="border-r-6"
/>
</template>
</el-space>
</div>
<div v-else>
-
</div>
</div>
</div>
<div class="card-never border-r-6 mt-8">
<h5 class="p-8-12">
{{
item.type == WorkflowType.Application
? $t('common.param.outputParam')
: $t('chat.executionDetails.answer')
}}
</h5>
<div class="p-8-12 border-t-dashed lighter">
<MdPreview
v-if="item.answer"
ref="editorRef"
editorId="preview-only"
:modelValue="item.answer"
style="background: none"
noImgZoomIn
/>
<template v-else> -</template>
</div>
</div>
</template>
<!-- 变量赋值 -->
<template v-if="item.type === WorkflowType.VariableAssignNode">
<div class="card-never border-r-6">
@ -779,14 +928,15 @@
</el-scrollbar>
</template>
<script setup lang="ts">
import { ref, computed } from 'vue'
import ParagraphCard from '@/components/ai-chat/component/knowledge-source-component/ParagraphCard.vue'
import { arraySort } from '@/utils/array'
import { iconComponent } from '@/workflow/icons/utils'
import { WorkflowType } from '@/enums/application'
import { getImgUrl } from '@/utils/common'
import {ref, computed} from 'vue'
import ParagraphCard
from '@/components/ai-chat/component/knowledge-source-component/ParagraphCard.vue'
import {arraySort} from '@/utils/array'
import {iconComponent} from '@/workflow/icons/utils'
import {WorkflowType} from '@/enums/application'
import {getImgUrl} from '@/utils/common'
import DynamicsForm from '@/components/dynamics-form/index.vue'
import { isWorkFlow } from '@/utils/application'
import {isWorkFlow} from '@/utils/application'
const props = defineProps<{
detail?: any[]
@ -860,6 +1010,7 @@ const AiResponse = computed(() => {
<style lang="scss" scoped>
.execution-details {
max-height: calc(100vh - 260px);
.arrow-icon {
transition: 0.2s;
}

View File

@ -14,7 +14,7 @@ export enum WorkflowType {
Reply = 'reply-node',
ToolLib = 'tool-lib-node',
ToolLibCustom = 'tool-node',
RrerankerNode = 'reranker-node',
RerankerNode = 'reranker-node',
Application = 'application-node',
DocumentExtractNode = 'document-extract-node',
ImageUnderstandNode = 'image-understand-node',
@ -25,4 +25,7 @@ export enum WorkflowType {
ImageGenerateNode = 'image-generate-node',
McpNode = 'mcp-node',
IntentNode = 'intent-node',
TextToVideoGenerateNode = 'text-to-video-node',
ImageToVideoGenerateNode = 'image-to-video-node'
}

View File

@ -14,5 +14,7 @@ export enum modelType {
TTS = 'views.model.modelType.TTS',
IMAGE = 'views.model.modelType.IMAGE',
TTI = 'views.model.modelType.TTI',
RERANKER = 'views.model.modelType.RERANKER'
RERANKER = 'views.model.modelType.RERANKER',
TTV = 'views.model.modelType.TTV',
ITV = 'views.model.modelType.ITV',
}

View File

@ -252,6 +252,50 @@ export default {
'Please describe content you do not want to generate, such as color, bloody content',
},
},
textToVideoGenerate: {
label: 'Text-to-Video',
text: 'Generate video based on provided text content',
answer: 'AI Response Content',
model: {
label: 'Text-to-Video Model',
requiredMessage: 'Please select a text-to-video model',
},
prompt: {
label: 'Prompt (Positive)',
tooltip: 'Positive prompt, used to describe elements and visual features expected in the generated video',
},
negative_prompt: {
label: 'Prompt (Negative)',
tooltip: 'Negative prompt, used to describe content you don\'t want to see in the video, which can restrict the video generation',
placeholder: 'Please describe video content you don\'t want to generate, such as: colors, bloody content',
},
},
imageToVideoGenerate: {
label: 'Image-to-Video',
text: 'Generate video based on provided images',
answer: 'AI Response Content',
model: {
label: 'Image-to-Video Model',
requiredMessage: 'Please select an image-to-video model',
},
prompt: {
label: 'Prompt (Positive)',
tooltip: 'Positive prompt, used to describe elements and visual features expected in the generated video',
},
negative_prompt: {
label: 'Prompt (Negative)',
tooltip: 'Negative prompt, used to describe content you don\'t want to see in the video, which can restrict the video generation',
placeholder: 'Please describe video content you don\'t want to generate, such as: colors, bloody content',
},
first_frame: {
label: 'First Frame Image',
requiredMessage: 'Please select the first frame image',
},
last_frame: {
label: 'Last Frame Image',
requiredMessage: 'Please select the last frame image',
},
},
speechToTextNode: {
label: 'Speech2Text',
text: 'Convert audio to text through speech recognition model',

View File

@ -31,6 +31,8 @@ export default {
TTS: 'TTS',
IMAGE: 'Vision Model',
TTI: 'Image Generation',
TTV: 'Text-to-Video',
ITV: 'Image-to-Video',
},
modelForm: {
title: {

View File

@ -257,6 +257,50 @@ export default {
placeholder: '请描述不想生成的图片内容,比如:颜色、血腥内容',
},
},
textToVideoGenerate: {
label: '文生视频',
text: '根据提供的文本内容生成视频',
answer: 'AI 回答内容',
model: {
label: '文生视频模型',
requiredMessage: '请选择文生视频模型',
},
prompt: {
label: '提示词(正向)',
tooltip: '正向提示词,用来描述生成视频中期望包含的元素和视觉特点',
},
negative_prompt: {
label: '提示词(负向)',
tooltip: '反向提示词,用来描述不希望在视频中看到的内容,可以对视频进行限制。',
placeholder: '请描述不想生成的视频内容,比如:颜色、血腥内容',
},
},
imageToVideoGenerate: {
label: '图生视频',
text: '根据提供的图片生成视频',
answer: 'AI 回答内容',
model: {
label: '图生视频模型',
requiredMessage: '请选择图生视频模型',
},
prompt: {
label: '提示词(正向)',
tooltip: '正向提示词,用来描述生成视频中期望包含的元素和视觉特点',
},
negative_prompt: {
label: '提示词(负向)',
tooltip: '反向提示词,用来描述不希望在视频中看到的内容,可以对视频进行限制。',
placeholder: '请描述不想生成的视频内容,比如:颜色、血腥内容',
},
first_frame: {
label: '首帧图片',
requiredMessage: '请选择首帧图片',
},
last_frame: {
label: '尾帧图片',
requiredMessage: '请选择尾帧图片',
},
},
speechToTextNode: {
label: '语音转文本',
text: '将音频通过语音识别模型转换为文本',

View File

@ -29,6 +29,8 @@ export default {
TTS: '语音合成',
IMAGE: '视觉模型',
TTI: '图片生成',
TTV: '文生视频',
ITV: '图生视频',
},
modelForm: {
title: {

View File

@ -251,6 +251,50 @@ export default {
placeholder: '請描述不想生成的圖片內容,比如:顏色、血腥內容',
},
},
textToVideoGenerate: {
label: '文生影片',
text: '根據提供的文字內容生成影片',
answer: 'AI 回答內容',
model: {
label: '文生影片模型',
requiredMessage: '請選擇文生影片模型',
},
prompt: {
label: '提示詞(正向)',
tooltip: '正向提示詞,用來描述生成影片中期望包含的元素和視覺特點',
},
negative_prompt: {
label: '提示詞(負向)',
tooltip: '反向提示詞,用來描述不希望在影片中看到的內容,可以對影片進行限制。',
placeholder: '請描述不想生成的影片內容,例如:顏色、血腥內容',
},
},
imageToVideoGenerate: {
label: '圖生影片',
text: '根據提供的圖片生成影片',
answer: 'AI 回答內容',
model: {
label: '圖生影片模型',
requiredMessage: '請選擇圖生影片模型',
},
prompt: {
label: '提示詞(正向)',
tooltip: '正向提示詞,用來描述生成影片中期望包含的元素和視覺特點',
},
negative_prompt: {
label: '提示詞(負向)',
tooltip: '反向提示詞,用來描述不希望在影片中看到的內容,可以對影片進行限制。',
placeholder: '請描述不想生成的影片內容,例如:顏色、血腥內容',
},
first_frame: {
label: '首幀圖片',
requiredMessage: '請選擇首幀圖片',
},
last_frame: {
label: '尾幀圖片',
requiredMessage: '請選擇尾幀圖片',
},
},
speechToTextNode: {
label: '語音轉文本',
text: '將音頻通過語音識別模型轉換為文本',

View File

@ -29,6 +29,8 @@ export default {
TTS: '語音合成',
IMAGE: '圖片理解',
TTI: '圖片生成',
TTV: '文生視頻',
ITV: '圖生視頻',
},
modelForm: {
title: {

View File

@ -174,7 +174,7 @@ export const replyNode = {
},
}
export const rerankerNode = {
type: WorkflowType.RrerankerNode,
type: WorkflowType.RerankerNode,
text: t('views.applicationWorkflow.nodes.rerankerNode.text'),
label: t('views.applicationWorkflow.nodes.rerankerNode.label'),
height: 252,
@ -381,6 +381,42 @@ export const intentNode = {
},
},
}
export const imageToVideoNode = {
type: WorkflowType.ImageToVideoGenerateNode,
text: t('views.applicationWorkflow.nodes.imageToVideoGenerate.text'),
label: t('views.applicationWorkflow.nodes.imageToVideoGenerate.label'),
height: 252,
properties: {
stepName: t('views.applicationWorkflow.nodes.imageToVideoGenerate.label'),
config: {
fields: [
{
label: t('common.fileUpload.video'),
value: 'video',
},
],
},
},
}
export const textToVideoNode = {
type: WorkflowType.TextToVideoGenerateNode,
text: t('views.applicationWorkflow.nodes.textToVideoGenerate.text'),
label: t('views.applicationWorkflow.nodes.textToVideoGenerate.label'),
height: 252,
properties: {
stepName: t('views.applicationWorkflow.nodes.textToVideoGenerate.label'),
config: {
fields: [
{
label: t('common.fileUpload.video'),
value: 'video',
},
],
},
},
}
export const menuNodes = [
{
label: t('views.applicationWorkflow.nodes.classify.aiCapability'),
@ -392,6 +428,8 @@ export const menuNodes = [
imageUnderstandNode,
textToSpeechNode,
speechToTextNode,
textToVideoNode,
imageToVideoNode
],
},
{ label: t('views.knowledge.title'), list: [searchKnowledgeNode, rerankerNode] },
@ -445,9 +483,6 @@ export const applicationNode = {
},
}
export const compareList = [
{ value: 'is_null', label: t('views.applicationWorkflow.compare.is_null') },
{ value: 'is_not_null', label: t('views.applicationWorkflow.compare.is_not_null') },
@ -477,7 +512,7 @@ export const nodeDict: any = {
[WorkflowType.Reply]: replyNode,
[WorkflowType.ToolLib]: toolNode,
[WorkflowType.ToolLibCustom]: toolNode,
[WorkflowType.RrerankerNode]: rerankerNode,
[WorkflowType.RerankerNode]: rerankerNode,
[WorkflowType.FormNode]: formNode,
[WorkflowType.Application]: applicationNode,
[WorkflowType.DocumentExtractNode]: documentExtractNode,
@ -487,6 +522,8 @@ export const nodeDict: any = {
[WorkflowType.ImageGenerateNode]: imageGenerateNode,
[WorkflowType.VariableAssignNode]: variableAssignNode,
[WorkflowType.McpNode]: mcpNode,
[WorkflowType.TextToVideoGenerateNode]: textToVideoNode,
[WorkflowType.ImageToVideoGenerateNode]: imageToVideoNode
[WorkflowType.IntentNode]: intentNode,
}
export function isWorkFlow(type: string | undefined) {

View File

@ -11,6 +11,8 @@ const end_nodes: Array<string> = [
WorkflowType.SpeechToTextNode,
WorkflowType.TextToSpeechNode,
WorkflowType.ImageGenerateNode,
WorkflowType.ImageToVideoGenerateNode,
WorkflowType.TextToVideoGenerateNode,
]
export class WorkFlowInstance {
nodes

View File

@ -0,0 +1,6 @@
<template>
<el-avatar shape="square">
<img src="@/assets/workflow/icon_image_to_video.svg" style="width: 100%" alt="" />
</el-avatar>
</template>
<script setup lang="ts"></script>

View File

@ -0,0 +1,6 @@
<template>
<el-avatar shape="square">
<img src="@/assets/workflow/icon_text_to_video.svg" style="width: 100%" alt="" />
</el-avatar>
</template>
<script setup lang="ts"></script>

View File

@ -0,0 +1,14 @@
import VideoGenerateNodeVue from './index.vue'
import {AppNode, AppNodeModel} from '@/workflow/common/app-node'
class VideoNode extends AppNode {
constructor(props: any) {
super(props, VideoGenerateNodeVue)
}
}
export default {
type: 'image-to-video-node',
model: AppNodeModel,
view: VideoNode
}

View File

@ -0,0 +1,333 @@
<template>
<NodeContainer :node-model="nodeModel">
<h5 class="title-decoration-1 mb-8">{{ $t('views.applicationWorkflow.nodeSetting') }}</h5>
<el-card shadow="never" class="card-never">
<el-form
@submit.prevent
:model="form_data"
label-position="top"
require-asterisk-position="right"
label-width="auto"
ref="aiChatNodeFormRef"
hide-required-asterisk
>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.model.label')"
prop="model_id"
:rules="{
required: true,
message: $t('views.applicationWorkflow.nodes.imageToVideoGenerate.model.requiredMessage'),
trigger: 'change',
}"
>
<template #label>
<div class="flex-between w-full">
<div>
<span
>{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.model.label')
}}<span class="color-danger">*</span></span
>
</div>
<el-button
:disabled="!form_data.model_id"
type="primary"
link
@click="openAIParamSettingDialog(form_data.model_id)"
@refreshForm="refreshParam"
>
<AppIcon iconName="app-setting"></AppIcon>
</el-button>
</div>
</template>
<ModelSelect
@change="model_change"
@wheel="wheel"
:teleported="false"
v-model="form_data.model_id"
:placeholder="
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.model.requiredMessage')
"
:options="modelOptions"
showFooter
:model-type="'TTI'"
></ModelSelect>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.prompt.label')"
prop="prompt"
:rules="{
required: true,
message: $t('views.application.form.prompt.requiredMessage'),
trigger: 'blur',
}"
>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span
>{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.prompt.label')
}}<span class="color-danger">*</span></span
>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content
>{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.prompt.tooltip') }}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<MdEditorMagnify
@wheel="wheel"
:title="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.prompt.label')"
v-model="form_data.prompt"
style="height: 150px"
@submitDialog="submitDialog"
/>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.negative_prompt.label')"
prop="prompt"
:rules="{
required: false,
message: $t('views.application.form.prompt.requiredMessage'),
trigger: 'blur',
}"
>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>{{
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.negative_prompt.label')
}}</span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content
>{{
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.negative_prompt.tooltip')
}}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<MdEditorMagnify
@wheel="wheel"
:title="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.negative_prompt.label')"
v-model="form_data.negative_prompt"
:placeholder="
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.negative_prompt.placeholder')
"
style="height: 150px"
@submitDialog="submitNegativeDialog"
/>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.label')"
:rules="{
type: 'array',
required: true,
message: $t(
'views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.requiredMessage',
),
trigger: 'change',
}"
>
<template #label
>{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.label')
}}<span class="color-danger">*</span></template
>
<NodeCascader
ref="nodeCascaderRef"
:nodeModel="nodeModel"
class="w-full"
:placeholder="
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.first_frame.requiredMessage')
"
v-model="form_data.first_frame_url"
/>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.label')"
:rules="{
type: 'array',
required: false,
message: $t(
'views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.requiredMessage',
),
trigger: 'change',
}"
>
<template #label
>{{ $t('views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.label')
}}</template
>
<NodeCascader
ref="nodeCascaderRef"
:nodeModel="nodeModel"
class="w-full"
:placeholder="
$t('views.applicationWorkflow.nodes.imageToVideoGenerate.last_frame.requiredMessage')
"
v-model="form_data.last_frame_url"
/>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')"
@click.prevent
>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>{{
$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')
}}</span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content>
{{ $t('views.applicationWorkflow.nodes.aiChatNode.returnContent.tooltip') }}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="form_data.is_result" />
</el-form-item>
</el-form>
</el-card>
<AIModeParamSettingDialog ref="AIModeParamSettingDialogRef" @refresh="refreshParam" />
</NodeContainer>
</template>
<script setup lang="ts">
import NodeContainer from '@/workflow/common/NodeContainer.vue'
import { computed, nextTick, onMounted, ref, inject } from 'vue'
import { groupBy, set } from 'lodash'
import type { FormInstance } from 'element-plus'
import AIModeParamSettingDialog from '@/views/application/component/AIModeParamSettingDialog.vue'
import { t } from '@/locales'
import { useRoute } from 'vue-router'
import { loadSharedApi } from '@/utils/dynamics-api/shared-api'
import NodeCascader from "@/workflow/common/NodeCascader.vue";
const getApplicationDetail = inject('getApplicationDetail') as any
const route = useRoute()
const {
params: { id },
} = route as any
const apiType = computed(() => {
if (route.path.includes('resource-management')) {
return 'systemManage'
} else {
return 'workspace'
}
})
const props = defineProps<{ nodeModel: any }>()
const modelOptions = ref<any>(null)
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
const aiChatNodeFormRef = ref<FormInstance>()
const validate = () => {
return aiChatNodeFormRef.value?.validate().catch((err) => {
return Promise.reject({ node: props.nodeModel, errMessage: err })
})
}
const wheel = (e: any) => {
if (e.ctrlKey === true) {
e.preventDefault()
return true
} else {
e.stopPropagation()
return true
}
}
const defaultPrompt = `{{${t('views.applicationWorkflow.nodes.startNode.label')}.question}}`
const form = {
model_id: '',
system: '',
prompt: defaultPrompt,
negative_prompt: '',
dialogue_number: 0,
dialogue_type: 'NODE',
is_result: true,
temperature: null,
max_tokens: null,
first_frame_url: ['start-node', 'image'],
last_frame_url: [],
}
const form_data = computed({
get: () => {
if (props.nodeModel.properties.node_data) {
return props.nodeModel.properties.node_data
} else {
set(props.nodeModel.properties, 'node_data', form)
}
return props.nodeModel.properties.node_data
},
set: (value) => {
set(props.nodeModel.properties, 'node_data', value)
},
})
const application = getApplicationDetail()
function getSelectModel() {
const obj =
apiType.value === 'systemManage'
? {
model_type: 'ITV',
workspace_id: application.value?.workspace_id,
}
: {
model_type: 'ITV',
}
loadSharedApi({ type: 'model', systemType: apiType.value })
.getSelectModelList(obj)
.then((res: any) => {
modelOptions.value = groupBy(res?.data, 'provider')
})
}
const model_change = () => {
nextTick(() => {
if (form_data.value.model_id) {
AIModeParamSettingDialogRef.value?.reset_default(form_data.value.model_id, id)
} else {
refreshParam({})
}
})
}
const openAIParamSettingDialog = (modelId: string) => {
if (modelId) {
AIModeParamSettingDialogRef.value?.open(modelId, id, form_data.value.model_params_setting)
}
}
function refreshParam(data: any) {
set(props.nodeModel.properties.node_data, 'model_params_setting', data)
}
function submitDialog(val: string) {
set(props.nodeModel.properties.node_data, 'prompt', val)
}
function submitNegativeDialog(val: string) {
set(props.nodeModel.properties.node_data, 'negative_prompt', val)
}
onMounted(() => {
getSelectModel()
set(props.nodeModel, 'validate', validate)
})
</script>
<style scoped lang="scss"></style>

View File

@ -0,0 +1,14 @@
import VideoGenerateNodeVue from './index.vue'
import {AppNode, AppNodeModel} from '@/workflow/common/app-node'
class TextToVideoNode extends AppNode {
constructor(props: any) {
super(props, VideoGenerateNodeVue)
}
}
export default {
type: 'text-to-video-node',
model: AppNodeModel,
view: TextToVideoNode
}

View File

@ -0,0 +1,280 @@
<template>
<NodeContainer :node-model="nodeModel">
<h5 class="title-decoration-1 mb-8">{{ $t('views.applicationWorkflow.nodeSetting') }}</h5>
<el-card shadow="never" class="card-never">
<el-form
@submit.prevent
:model="form_data"
label-position="top"
require-asterisk-position="right"
label-width="auto"
ref="aiChatNodeFormRef"
hide-required-asterisk
>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.textToVideoGenerate.model.label')"
prop="model_id"
:rules="{
required: true,
message: $t('views.applicationWorkflow.nodes.textToVideoGenerate.model.requiredMessage'),
trigger: 'change',
}"
>
<template #label>
<div class="flex-between w-full">
<div>
<span
>{{ $t('views.applicationWorkflow.nodes.textToVideoGenerate.model.label')
}}<span class="color-danger">*</span></span
>
</div>
<el-button
:disabled="!form_data.model_id"
type="primary"
link
@click="openAIParamSettingDialog(form_data.model_id)"
@refreshForm="refreshParam"
>
<AppIcon iconName="app-setting"></AppIcon>
</el-button>
</div>
</template>
<ModelSelect
@change="model_change"
@wheel="wheel"
:teleported="false"
v-model="form_data.model_id"
:placeholder="
$t('views.applicationWorkflow.nodes.textToVideoGenerate.model.requiredMessage')
"
:options="modelOptions"
showFooter
:model-type="'TTI'"
></ModelSelect>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.textToVideoGenerate.prompt.label')"
prop="prompt"
:rules="{
required: true,
message: $t('views.application.form.prompt.requiredMessage'),
trigger: 'blur',
}"
>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span
>{{ $t('views.applicationWorkflow.nodes.textToVideoGenerate.prompt.label')
}}<span class="color-danger">*</span></span
>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content
>{{ $t('views.applicationWorkflow.nodes.textToVideoGenerate.prompt.tooltip') }}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<MdEditorMagnify
@wheel="wheel"
:title="$t('views.applicationWorkflow.nodes.textToVideoGenerate.prompt.label')"
v-model="form_data.prompt"
style="height: 150px"
@submitDialog="submitDialog"
/>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.textToVideoGenerate.negative_prompt.label')"
prop="prompt"
:rules="{
required: false,
message: $t('views.application.form.prompt.requiredMessage'),
trigger: 'blur',
}"
>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>{{
$t('views.applicationWorkflow.nodes.textToVideoGenerate.negative_prompt.label')
}}</span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content
>{{
$t('views.applicationWorkflow.nodes.textToVideoGenerate.negative_prompt.tooltip')
}}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<MdEditorMagnify
@wheel="wheel"
:title="$t('views.applicationWorkflow.nodes.textToVideoGenerate.negative_prompt.label')"
v-model="form_data.negative_prompt"
:placeholder="
$t('views.applicationWorkflow.nodes.textToVideoGenerate.negative_prompt.placeholder')
"
style="height: 150px"
@submitDialog="submitNegativeDialog"
/>
</el-form-item>
<el-form-item
:label="$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')"
@click.prevent
>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>{{
$t('views.applicationWorkflow.nodes.aiChatNode.returnContent.label')
}}</span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content>
{{ $t('views.applicationWorkflow.nodes.aiChatNode.returnContent.tooltip') }}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="form_data.is_result" />
</el-form-item>
</el-form>
</el-card>
<AIModeParamSettingDialog ref="AIModeParamSettingDialogRef" @refresh="refreshParam" />
</NodeContainer>
</template>
<script setup lang="ts">
import NodeContainer from '@/workflow/common/NodeContainer.vue'
import { computed, nextTick, onMounted, ref, inject } from 'vue'
import { groupBy, set } from 'lodash'
import type { FormInstance } from 'element-plus'
import AIModeParamSettingDialog from '@/views/application/component/AIModeParamSettingDialog.vue'
import { t } from '@/locales'
import { useRoute } from 'vue-router'
import { loadSharedApi } from '@/utils/dynamics-api/shared-api'
const getApplicationDetail = inject('getApplicationDetail') as any
const route = useRoute()
const {
params: { id },
} = route as any
const apiType = computed(() => {
if (route.path.includes('resource-management')) {
return 'systemManage'
} else {
return 'workspace'
}
})
const props = defineProps<{ nodeModel: any }>()
const modelOptions = ref<any>(null)
const AIModeParamSettingDialogRef = ref<InstanceType<typeof AIModeParamSettingDialog>>()
const aiChatNodeFormRef = ref<FormInstance>()
const validate = () => {
return aiChatNodeFormRef.value?.validate().catch((err) => {
return Promise.reject({ node: props.nodeModel, errMessage: err })
})
}
const wheel = (e: any) => {
if (e.ctrlKey === true) {
e.preventDefault()
return true
} else {
e.stopPropagation()
return true
}
}
const defaultPrompt = `{{${t('views.applicationWorkflow.nodes.startNode.label')}.question}}`
const form = {
model_id: '',
system: '',
prompt: defaultPrompt,
negative_prompt: '',
dialogue_number: 0,
dialogue_type: 'NODE',
is_result: true,
temperature: null,
max_tokens: null,
}
const form_data = computed({
get: () => {
if (props.nodeModel.properties.node_data) {
return props.nodeModel.properties.node_data
} else {
set(props.nodeModel.properties, 'node_data', form)
}
return props.nodeModel.properties.node_data
},
set: (value) => {
set(props.nodeModel.properties, 'node_data', value)
},
})
const application = getApplicationDetail()
function getSelectModel() {
const obj =
apiType.value === 'systemManage'
? {
model_type: 'TTV',
workspace_id: application.value?.workspace_id,
}
: {
model_type: 'TTV',
}
loadSharedApi({ type: 'model', systemType: apiType.value })
.getSelectModelList(obj)
.then((res: any) => {
modelOptions.value = groupBy(res?.data, 'provider')
})
}
const model_change = () => {
nextTick(() => {
if (form_data.value.model_id) {
AIModeParamSettingDialogRef.value?.reset_default(form_data.value.model_id, id)
} else {
refreshParam({})
}
})
}
const openAIParamSettingDialog = (modelId: string) => {
if (modelId) {
AIModeParamSettingDialogRef.value?.open(modelId, id, form_data.value.model_params_setting)
}
}
function refreshParam(data: any) {
set(props.nodeModel.properties.node_data, 'model_params_setting', data)
}
function submitDialog(val: string) {
set(props.nodeModel.properties.node_data, 'prompt', val)
}
function submitNegativeDialog(val: string) {
set(props.nodeModel.properties.node_data, 'negative_prompt', val)
}
onMounted(() => {
getSelectModel()
set(props.nodeModel, 'validate', validate)
})
</script>
<style scoped lang="scss"></style>