diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py index 6484e649e..223282386 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py @@ -5,6 +5,7 @@ from typing import Dict from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI + class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def is_cache_model(): @@ -13,7 +14,7 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) - if model_name == 'qwen-omni-turbo': + if 'qwen-omni-turbo' in model_name: optional_params['streaming'] = True return BaiLianChatModel( model=model_name,