mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
fix: VLLM supplier recalculates token function (#2375)
This commit is contained in:
parent
2d4deda6b4
commit
6b72611b72
|
|
@ -1,5 +1,8 @@
|
|||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from langchain_core.messages import get_buffer_string, BaseMessage
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
|
@ -21,3 +24,15 @@ class VllmImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
|
||||
def is_cache_model(self):
|
||||
return False
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
if self.usage_metadata is None or self.usage_metadata == {}:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
return self.usage_metadata.get('input_tokens', 0)
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
if self.usage_metadata is None or self.usage_metadata == {}:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
return self.get_last_generation_info().get('output_tokens', 0)
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
from urllib.parse import urlparse, ParseResult
|
||||
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
|
@ -33,3 +36,15 @@ class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
stream_usage=True,
|
||||
)
|
||||
return vllm_chat_open_ai
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
if self.usage_metadata is None or self.usage_metadata == {}:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
return self.usage_metadata.get('input_tokens', 0)
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
if self.usage_metadata is None or self.usage_metadata == {}:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
return self.get_last_generation_info().get('output_tokens', 0)
|
||||
|
|
|
|||
Loading…
Reference in New Issue