diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py index c1f63788c..2b1fe31f2 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py @@ -2,12 +2,11 @@ from typing import Dict -from langchain_community.chat_models import ChatOpenAI - from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI): +class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI): openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) return chat_tong_yi diff --git a/apps/setting/models_provider/impl/azure_model_provider/model/image.py b/apps/setting/models_provider/impl/azure_model_provider/model/image.py index fac01473d..14abab3af 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/azure_model_provider/model/image.py @@ -1,7 +1,7 @@ -from typing import Dict +from typing import Dict, List +from langchain_core.messages import BaseMessage, get_buffer_string from langchain_openai import AzureChatOpenAI -from langchain_openai.chat_models import ChatOpenAI from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel @@ -26,3 +26,17 @@ class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI): streaming=True, **optional_params, ) + + def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: + try: + return super().get_num_tokens_from_messages(messages) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) + + def get_num_tokens(self, text: str) -> int: + try: + return super().get_num_tokens(text) + except Exception as e: + tokenizer = TokenizerManage.get_tokenizer() + return len(tokenizer.encode(text)) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py index 8a764dea8..4cf0f1d56 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py @@ -1,15 +1,8 @@ from typing import Dict from urllib.parse import urlparse, ParseResult -from langchain_openai.chat_models import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel - - -def custom_get_token_ids(text: str): - tokenizer = TokenizerManage.get_tokenizer() - return tokenizer.encode(text) +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI def get_base_url(url: str): @@ -20,7 +13,7 @@ def get_base_url(url: str): return result_url[:-1] if result_url.endswith("/") else result_url -class OllamaImage(MaxKBBaseModel, ChatOpenAI): +class OllamaImage(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -34,5 +27,6 @@ class OllamaImage(MaxKBBaseModel, ChatOpenAI): openai_api_key=model_credential.get('api_key'), # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/image.py b/apps/setting/models_provider/impl/openai_model_provider/model/image.py index b1f7a7847..731f476c4 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/openai_model_provider/model/image.py @@ -1,17 +1,10 @@ from typing import Dict -from langchain_openai.chat_models import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -def custom_get_token_ids(text: str): - tokenizer = TokenizerManage.get_tokenizer() - return tokenizer.encode(text) - - -class OpenAIImage(MaxKBBaseModel, ChatOpenAI): +class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -22,5 +15,6 @@ class OpenAIImage(MaxKBBaseModel, ChatOpenAI): openai_api_key=model_credential.get('api_key'), # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py index c1f63788c..2b1fe31f2 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py @@ -2,12 +2,11 @@ from typing import Dict -from langchain_community.chat_models import ChatOpenAI - from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI): +class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI): openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) return chat_tong_yi diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py index 273fdd52a..1b66ab6d2 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py @@ -1,17 +1,10 @@ from typing import Dict -from langchain_openai.chat_models import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -def custom_get_token_ids(text: str): - tokenizer = TokenizerManage.get_tokenizer() - return tokenizer.encode(text) - - -class TencentVision(MaxKBBaseModel, ChatOpenAI): +class TencentVision(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -22,5 +15,6 @@ class TencentVision(MaxKBBaseModel, ChatOpenAI): openai_api_key=model_credential.get('api_key'), # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py index 3cc467611..39446b4e1 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py @@ -1,17 +1,10 @@ from typing import Dict -from langchain_openai.chat_models import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -def custom_get_token_ids(text: str): - tokenizer = TokenizerManage.get_tokenizer() - return tokenizer.encode(text) - - -class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI): +class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -22,5 +15,6 @@ class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI): openai_api_base=model_credential.get('api_base'), # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/image.py b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py index 1b696b8cf..f51a64ec4 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py @@ -1,17 +1,10 @@ from typing import Dict -from langchain_openai.chat_models import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -def custom_get_token_ids(text: str): - tokenizer = TokenizerManage.get_tokenizer() - return tokenizer.encode(text) - - -class XinferenceImage(MaxKBBaseModel, ChatOpenAI): +class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -22,5 +15,6 @@ class XinferenceImage(MaxKBBaseModel, ChatOpenAI): openai_api_key=model_credential.get('api_key'), # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, ) diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py index d1658a0c0..f13c71538 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py @@ -1,17 +1,10 @@ from typing import Dict -from langchain_openai.chat_models import ChatOpenAI - -from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel +from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI -def custom_get_token_ids(text: str): - tokenizer = TokenizerManage.get_tokenizer() - return tokenizer.encode(text) - - -class ZhiPuImage(MaxKBBaseModel, ChatOpenAI): +class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): @@ -22,5 +15,6 @@ class ZhiPuImage(MaxKBBaseModel, ChatOpenAI): openai_api_base='https://open.bigmodel.cn/api/paas/v4', # stream_options={"include_usage": True}, streaming=True, + stream_usage=True, **optional_params, )