refactor: update model params

This commit is contained in:
wxg0103 2024-08-16 12:22:26 +08:00 committed by wxg0103
parent b596b691ec
commit 93bfb45782
6 changed files with 32 additions and 3 deletions

View File

@ -25,6 +25,10 @@ def get_base_url(url: str):
class OllamaChatModel(MaxKBBaseModel, ChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
api_base = model_credential.get('api_base', '')

View File

@ -13,6 +13,11 @@ from setting.models_provider.base_model_provider import MaxKBBaseModel
class OpenAIChatModel(MaxKBBaseModel, ChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}

View File

@ -57,4 +57,13 @@ class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
'precision': 2,
'tooltip': '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'
},
'max_tokens': {
'value': 800,
'min': 1,
'max': 2048,
'step': 1,
'label': '输出最大Tokens',
'precision': 0,
'tooltip': '指定模型可生成的最大token个数'
}
}

View File

@ -19,6 +19,10 @@ from setting.models_provider.base_model_provider import MaxKBBaseModel
class QwenChatModel(MaxKBBaseModel, ChatTongyi):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}
@ -29,7 +33,7 @@ class QwenChatModel(MaxKBBaseModel, ChatTongyi):
chat_tong_yi = QwenChatModel(
model_name=model_name,
dashscope_api_key=model_credential.get('api_key'),
**optional_params,
model_kwargs=optional_params,
)
return chat_tong_yi
@ -61,7 +65,7 @@ class QwenChatModel(MaxKBBaseModel, ChatTongyi):
if (
choice["finish_reason"] == "stop"
and message["content"] == ""
):
) or (choice["finish_reason"] == "length"):
token_usage = stream_resp["usage"]
self.__dict__.setdefault('_last_generation_info', {}).update(token_usage)
if (

View File

@ -9,6 +9,9 @@ from setting.models_provider.impl.tencent_model_provider.model.hunyuan import Ch
class TencentModel(MaxKBBaseModel, ChatHunyuan):
@staticmethod
def is_cache_model():
return False
def __init__(self, model_name: str, credentials: Dict[str, str], streaming: bool = False, **kwargs):
hunyuan_app_id = credentials.get('hunyuan_app_id')
@ -25,7 +28,7 @@ class TencentModel(MaxKBBaseModel, ChatHunyuan):
super().__init__(model=model_name, hunyuan_app_id=hunyuan_app_id, hunyuan_secret_id=hunyuan_secret_id,
hunyuan_secret_key=hunyuan_secret_key, streaming=streaming,
temperature=optional_params.get('temperature', None)
temperature=optional_params.get('temperature', 1.0)
)
@staticmethod

View File

@ -6,6 +6,10 @@ from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {}