mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
chore: model provider drop down options
This commit is contained in:
parent
3480a8fda4
commit
0cc1d00140
|
|
@ -15,17 +15,23 @@ from setting.models_provider.impl.deepseek_model_provider.credential.llm import
|
|||
from setting.models_provider.impl.deepseek_model_provider.model.llm import DeepSeekChatModel
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from django.utils.translation import gettext as _
|
||||
deepseek_llm_model_credential = DeepSeekLLMModelCredential()
|
||||
|
||||
deepseek_chat = ModelInfo('deepseek-chat', _('Good at common conversational tasks, supports 32K contexts'), ModelTypeConst.LLM,
|
||||
deepseek_llm_model_credential = DeepSeekLLMModelCredential()
|
||||
deepseek_reasoner = ModelInfo('deepseek-reasoner', '', ModelTypeConst.LLM,
|
||||
deepseek_llm_model_credential, DeepSeekChatModel
|
||||
)
|
||||
|
||||
deepseek_chat = ModelInfo('deepseek-chat', _('Good at common conversational tasks, supports 32K contexts'),
|
||||
ModelTypeConst.LLM,
|
||||
deepseek_llm_model_credential, DeepSeekChatModel
|
||||
)
|
||||
|
||||
deepseek_coder = ModelInfo('deepseek-coder', _('Good at handling programming tasks, supports 16K contexts'), ModelTypeConst.LLM,
|
||||
deepseek_coder = ModelInfo('deepseek-coder', _('Good at handling programming tasks, supports 16K contexts'),
|
||||
ModelTypeConst.LLM,
|
||||
deepseek_llm_model_credential,
|
||||
DeepSeekChatModel)
|
||||
|
||||
model_info_manage = ModelInfoManage.builder().append_model_info(deepseek_chat).append_model_info(
|
||||
model_info_manage = ModelInfoManage.builder().append_model_info(deepseek_reasoner).append_model_info(deepseek_chat).append_model_info(
|
||||
deepseek_coder).append_default_model_info(
|
||||
deepseek_coder).build()
|
||||
|
||||
|
|
|
|||
|
|
@ -28,6 +28,27 @@ from django.utils.translation import gettext as _
|
|||
|
||||
ollama_llm_model_credential = OllamaLLMModelCredential()
|
||||
model_info_list = [
|
||||
ModelInfo(
|
||||
'deepseek-r1:1.5b',
|
||||
'',
|
||||
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
|
||||
ModelInfo(
|
||||
'deepseek-r1:7b',
|
||||
'',
|
||||
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
|
||||
ModelInfo(
|
||||
'deepseek-r1:8b',
|
||||
'',
|
||||
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
|
||||
ModelInfo(
|
||||
'deepseek-r1:14b',
|
||||
'',
|
||||
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
|
||||
ModelInfo(
|
||||
'deepseek-r1:32b',
|
||||
'',
|
||||
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
|
||||
|
||||
ModelInfo(
|
||||
'llama2',
|
||||
_('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 7B pretrained models. Links to other models can be found in the index at the bottom.'),
|
||||
|
|
|
|||
Loading…
Reference in New Issue