From a3fca88e73017b854a4747d51640789cbffdccdb Mon Sep 17 00:00:00 2001 From: shaohuzhang1 Date: Fri, 8 Mar 2024 14:05:29 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20ollama=E5=B9=B3=E5=8F=B0=E6=B7=BB?= =?UTF-8?q?=E5=8A=A0baichuan2:13b-chat=E6=A8=A1=E5=9E=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../impl/ollama_model_provider/ollama_model_provider.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py index 6958d9d43..1316de063 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py @@ -82,6 +82,10 @@ model_dict = { 'llama2-chinese:13b-maxkb', '由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。fi2cloud专用', ModelTypeConst.LLM, ollama_llm_model_credential), + 'baichuan2:13b-chat': ModelInfo( + 'baichuan2:13b-chat', + 'Baichuan 2 是百川智能推出的新一代开源大语言模型,采用 2.6 万亿 Tokens 的高质量语料训练,在权威的中文和英文 benchmark 上均取得同尺寸最好的效果', + ModelTypeConst.LLM, ollama_llm_model_credential), }