diff --git a/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py b/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py index 2cf4ffaf7..e2664940c 100644 --- a/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py +++ b/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py @@ -18,6 +18,6 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) return chat_tong_yi diff --git a/apps/models_provider/impl/ollama_model_provider/model/image.py b/apps/models_provider/impl/ollama_model_provider/model/image.py index c2cdd7210..340fd66e5 100644 --- a/apps/models_provider/impl/ollama_model_provider/model/image.py +++ b/apps/models_provider/impl/ollama_model_provider/model/image.py @@ -28,5 +28,5 @@ class OllamaImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) diff --git a/apps/models_provider/impl/openai_model_provider/model/image.py b/apps/models_provider/impl/openai_model_provider/model/image.py index 7dc6d8c12..57f345cf9 100644 --- a/apps/models_provider/impl/openai_model_provider/model/image.py +++ b/apps/models_provider/impl/openai_model_provider/model/image.py @@ -16,5 +16,5 @@ class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) diff --git a/apps/models_provider/impl/openai_model_provider/model/llm.py b/apps/models_provider/impl/openai_model_provider/model/llm.py index 2d5b76af7..f4a252a26 100644 --- a/apps/models_provider/impl/openai_model_provider/model/llm.py +++ b/apps/models_provider/impl/openai_model_provider/model/llm.py @@ -35,8 +35,8 @@ class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI): streaming = False chat_open_ai = OpenAIChatModel( model=model_name, - openai_api_base=model_credential.get('api_base'), - openai_api_key=model_credential.get('api_key'), + api_base=model_credential.get('api_base'), + api_key=model_credential.get('api_key'), extra_body=optional_params, streaming=streaming, custom_get_token_ids=custom_get_token_ids diff --git a/apps/models_provider/impl/qwen_model_provider/__init__.py b/apps/models_provider/impl/qwen_model_provider/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/apps/models_provider/impl/qwen_model_provider/credential/__init__.py b/apps/models_provider/impl/qwen_model_provider/credential/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/apps/models_provider/impl/qwen_model_provider/model/__init__.py b/apps/models_provider/impl/qwen_model_provider/model/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/apps/models_provider/impl/siliconCloud_model_provider/model/image.py b/apps/models_provider/impl/siliconCloud_model_provider/model/image.py index 3f4efc095..e644c1277 100644 --- a/apps/models_provider/impl/siliconCloud_model_provider/model/image.py +++ b/apps/models_provider/impl/siliconCloud_model_provider/model/image.py @@ -16,5 +16,5 @@ class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) diff --git a/apps/models_provider/impl/tencent_cloud_model_provider/model/llm.py b/apps/models_provider/impl/tencent_cloud_model_provider/model/llm.py index 216ca8752..dc962e491 100644 --- a/apps/models_provider/impl/tencent_cloud_model_provider/model/llm.py +++ b/apps/models_provider/impl/tencent_cloud_model_provider/model/llm.py @@ -33,21 +33,8 @@ class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI): model=model_name, openai_api_base=model_credential.get('api_base'), openai_api_key=model_credential.get('api_key'), - **optional_params, + extra_body=optional_params, custom_get_token_ids=custom_get_token_ids ) return azure_chat_open_ai - def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: - try: - return super().get_num_tokens_from_messages(messages) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) - - def get_num_tokens(self, text: str) -> int: - try: - return super().get_num_tokens(text) - except Exception as e: - tokenizer = TokenizerManage.get_tokenizer() - return len(tokenizer.encode(text)) diff --git a/apps/models_provider/impl/tencent_model_provider/model/image.py b/apps/models_provider/impl/tencent_model_provider/model/image.py index 17b2d4cee..f093012a4 100644 --- a/apps/models_provider/impl/tencent_model_provider/model/image.py +++ b/apps/models_provider/impl/tencent_model_provider/model/image.py @@ -11,10 +11,10 @@ class TencentVision(MaxKBBaseModel, BaseChatOpenAI): optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) return TencentVision( model_name=model_name, - openai_api_base='https://api.hunyuan.cloud.tencent.com/v1', - openai_api_key=model_credential.get('api_key'), + api_base='https://api.hunyuan.cloud.tencent.com/v1', + api_key=model_credential.get('api_key'), # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) diff --git a/apps/models_provider/impl/vllm_model_provider/model/image.py b/apps/models_provider/impl/vllm_model_provider/model/image.py index 450567bf7..9dac5b4aa 100644 --- a/apps/models_provider/impl/vllm_model_provider/model/image.py +++ b/apps/models_provider/impl/vllm_model_provider/model/image.py @@ -19,7 +19,7 @@ class VllmImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) def is_cache_model(self): diff --git a/apps/models_provider/impl/volcanic_engine_model_provider/model/image.py b/apps/models_provider/impl/volcanic_engine_model_provider/model/image.py index dea663f1f..1c95f66a4 100644 --- a/apps/models_provider/impl/volcanic_engine_model_provider/model/image.py +++ b/apps/models_provider/impl/volcanic_engine_model_provider/model/image.py @@ -16,5 +16,5 @@ class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) diff --git a/apps/models_provider/impl/xf_model_provider/xf_model_provider.py b/apps/models_provider/impl/xf_model_provider/xf_model_provider.py index c93351e59..7bcf4fcfb 100644 --- a/apps/models_provider/impl/xf_model_provider/xf_model_provider.py +++ b/apps/models_provider/impl/xf_model_provider/xf_model_provider.py @@ -27,15 +27,15 @@ from django.utils.translation import gettext as _ ssl._create_default_https_context = ssl.create_default_context() -qwen_model_credential = XunFeiLLMModelCredential() +xunfei_model_credential = XunFeiLLMModelCredential() stt_model_credential = XunFeiSTTModelCredential() image_model_credential = XunFeiImageModelCredential() tts_model_credential = XunFeiTTSModelCredential() embedding_model_credential = XFEmbeddingCredential() model_info_list = [ - ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), - ModelInfo('generalv3', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), - ModelInfo('generalv2', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), + ModelInfo('generalv3.5', '', ModelTypeConst.LLM, xunfei_model_credential, XFChatSparkLLM), + ModelInfo('generalv3', '', ModelTypeConst.LLM, xunfei_model_credential, XFChatSparkLLM), + ModelInfo('generalv2', '', ModelTypeConst.LLM, xunfei_model_credential, XFChatSparkLLM), ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech), ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding) @@ -45,7 +45,7 @@ model_info_manage = ( ModelInfoManage.builder() .append_model_info_list(model_info_list) .append_default_model_info( - ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM)) + ModelInfo('generalv3.5', '', ModelTypeConst.LLM, xunfei_model_credential, XFChatSparkLLM)) .append_default_model_info( ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), ) diff --git a/apps/models_provider/impl/xinference_model_provider/model/image.py b/apps/models_provider/impl/xinference_model_provider/model/image.py index 3b634ebfa..dc24223fd 100644 --- a/apps/models_provider/impl/xinference_model_provider/model/image.py +++ b/apps/models_provider/impl/xinference_model_provider/model/image.py @@ -19,7 +19,7 @@ class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: diff --git a/apps/models_provider/impl/zhipu_model_provider/model/image.py b/apps/models_provider/impl/zhipu_model_provider/model/image.py index 7a84aade2..16366bd6f 100644 --- a/apps/models_provider/impl/zhipu_model_provider/model/image.py +++ b/apps/models_provider/impl/zhipu_model_provider/model/image.py @@ -16,5 +16,5 @@ class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI): # stream_options={"include_usage": True}, streaming=True, stream_usage=True, - **optional_params, + extra_body=optional_params ) diff --git a/apps/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py b/apps/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py index e71e01b17..4d4c0825a 100644 --- a/apps/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py +++ b/apps/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py @@ -20,14 +20,14 @@ from models_provider.impl.zhipu_model_provider.model.tti import ZhiPuTextToImage from maxkb.conf import PROJECT_DIR from django.utils.translation import gettext as _ -qwen_model_credential = ZhiPuLLMModelCredential() +zhipu_model_credential = ZhiPuLLMModelCredential() zhipu_image_model_credential = ZhiPuImageModelCredential() zhipu_tti_model_credential = ZhiPuTextToImageModelCredential() model_info_list = [ - ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel), - ModelInfo('glm-4v', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel), - ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel) + ModelInfo('glm-4', '', ModelTypeConst.LLM, zhipu_model_credential, ZhipuChatModel), + ModelInfo('glm-4v', '', ModelTypeConst.LLM, zhipu_model_credential, ZhipuChatModel), + ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, zhipu_model_credential, ZhipuChatModel) ] model_info_image_list = [ @@ -57,7 +57,7 @@ model_info_tti_list = [ model_info_manage = ( ModelInfoManage.builder() .append_model_info_list(model_info_list) - .append_default_model_info(ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel)) + .append_default_model_info(ModelInfo('glm-4', '', ModelTypeConst.LLM, zhipu_model_credential, ZhipuChatModel)) .append_model_info_list(model_info_image_list) .append_default_model_info(model_info_image_list[0]) .append_model_info_list(model_info_tti_list)