diff --git a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py index 6f0fc942c..a69083010 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py @@ -87,6 +87,46 @@ model_info_list = [ 'qwen:110b', 'qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。1100亿参数。', ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2-72b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2-57b-a14b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2-7b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-72b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-32b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-14b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-7b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-1.5b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-0.5b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), + ModelInfo( + 'qwen2.5-3b-instruct', + '', + ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'phi3', 'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。', diff --git a/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py index 06b0dbba3..208a8a6f8 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py @@ -194,15 +194,71 @@ model_info_list = [ XinferenceChatModel ), ModelInfo( - 'qwen2-moe-instruct', - 'Qwen2 MOE Instruct 是 Qwen2 的指令微调版本,专为执行特定任务而设计。', + 'qwen2-72b-instruct', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( - 'qwen2-vl-instruct', - 'Qwen2 VL Instruct 是 Qwen2 的指令微调版本,专为执行特定任务而设计。', + 'qwen2-57b-a14b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2-7b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-72b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-32b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-14b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-7b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-1.5b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-0.5b-instruct', + '', + ModelTypeConst.LLM, + xinference_llm_model_credential, + XinferenceChatModel + ), + ModelInfo( + 'qwen2.5-3b-instruct', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel