mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
refactor: image model get_num_tokens override
(cherry picked from commit c46b7ab094)
This commit is contained in:
parent
9acc11786c
commit
e6e4b68100
|
|
@ -2,12 +2,11 @@
|
|||
|
||||
from typing import Dict
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
||||
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
|
@ -26,3 +26,17 @@ class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI):
|
|||
streaming=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
try:
|
||||
return super().get_num_tokens_from_messages(messages)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
try:
|
||||
return super().get_num_tokens(text)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
|
|
|
|||
|
|
@ -1,15 +1,8 @@
|
|||
from typing import Dict
|
||||
from urllib.parse import urlparse, ParseResult
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def get_base_url(url: str):
|
||||
|
|
@ -20,7 +13,7 @@ def get_base_url(url: str):
|
|||
return result_url[:-1] if result_url.endswith("/") else result_url
|
||||
|
||||
|
||||
class OllamaImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -34,5 +27,6 @@ class OllamaImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class OpenAIImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class OpenAIImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -2,12 +2,11 @@
|
|||
|
||||
from typing import Dict
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
||||
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class TencentVision(MaxKBBaseModel, ChatOpenAI):
|
||||
class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class TencentVision(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base=model_credential.get('api_base'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class XinferenceImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class XinferenceImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base='https://open.bigmodel.cn/api/paas/v4',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
Loading…
Reference in New Issue