From f82ba3c4b856181cfdda27df77084844f2799e31 Mon Sep 17 00:00:00 2001 From: wxg0103 <727495428@qq.com> Date: Thu, 27 Mar 2025 10:18:43 +0800 Subject: [PATCH] fix: qwq-plus only supported stream --- .../impl/aliyun_bai_lian_model_provider/model/llm.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py index 223282386..28014c350 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py @@ -14,7 +14,7 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) - if 'qwen-omni-turbo' in model_name: + if 'qwen-omni-turbo' in model_name or 'qwq-plus' in model_name: optional_params['streaming'] = True return BaiLianChatModel( model=model_name,