diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/image.py b/apps/setting/models_provider/impl/openai_model_provider/model/image.py index 2ccb04f69..b1f7a7847 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/openai_model_provider/model/image.py @@ -17,9 +17,10 @@ class OpenAIImage(MaxKBBaseModel, ChatOpenAI): def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) return OpenAIImage( - model=model_name, + model_name=model_name, openai_api_base=model_credential.get('api_base'), openai_api_key=model_credential.get('api_key'), - stream_options={"include_usage": True}, + # stream_options={"include_usage": True}, + streaming=True, **optional_params, ) diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py index 60e209154..57598fe97 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py @@ -13,10 +13,11 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI): def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) chat_tong_yi = QwenVLChatModel( - model=model_name, + model_name=model_name, openai_api_key=model_credential.get('api_key'), openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', - stream_options={"include_usage": True}, + # stream_options={"include_usage": True}, + streaming=True, model_kwargs=optional_params, ) return chat_tong_yi diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py index eb7a00f61..273fdd52a 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py @@ -17,9 +17,10 @@ class TencentVision(MaxKBBaseModel, ChatOpenAI): def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) return TencentVision( - model=model_name, + model_name=model_name, openai_api_base='https://api.hunyuan.cloud.tencent.com/v1', openai_api_key=model_credential.get('api_key'), - stream_options={"include_usage": True}, + # stream_options={"include_usage": True}, + streaming=True, **optional_params, ) diff --git a/ui/src/workflow/nodes/image-understand/index.vue b/ui/src/workflow/nodes/image-understand/index.vue index 4c7252ad9..6ca722d29 100644 --- a/ui/src/workflow/nodes/image-understand/index.vue +++ b/ui/src/workflow/nodes/image-understand/index.vue @@ -193,6 +193,7 @@ import applicationApi from '@/api/application' import { app } from '@/main' import useStore from '@/stores' import NodeCascader from '@/workflow/common/NodeCascader.vue' +import type { FormInstance } from 'element-plus' const { model } = useStore() @@ -204,6 +205,12 @@ const props = defineProps<{ nodeModel: any }>() const modelOptions = ref(null) const providerOptions = ref>([]) +const aiChatNodeFormRef = ref() +const validate = () => { + return aiChatNodeFormRef.value?.validate().catch((err) => { + return Promise.reject({ node: props.nodeModel, errMessage: err }) + }) +} const wheel = (e: any) => { if (e.ctrlKey === true) { @@ -277,6 +284,8 @@ function submitDialog(val: string) { onMounted(() => { getModel() getProvider() + + set(props.nodeModel, 'validate', validate) })