From 6f6b16341626ee3ce555e10e82f0768a05f37949 Mon Sep 17 00:00:00 2001 From: wxg0103 <727495428@qq.com> Date: Fri, 18 Apr 2025 17:45:15 +0800 Subject: [PATCH] feat: add model setting --- apps/common/config/embedding_config.py | 34 +- apps/common/utils/common.py | 12 +- apps/locales/en_US/LC_MESSAGES/django.po | 2094 +++++++++++++++- apps/locales/zh_CN/LC_MESSAGES/django.po | 2199 +++++++++++++++- apps/locales/zh_Hant/LC_MESSAGES/django.po | 2211 ++++++++++++++++- apps/models_provider/api/model.py | 56 +- apps/models_provider/api/provide.py | 60 +- .../credential/llm.py | 2 +- apps/models_provider/serializers/model.py | 181 -- .../serializers/model_serializer.py | 389 +++ apps/models_provider/tools.py | 2 - apps/models_provider/urls.py | 21 +- apps/models_provider/views/model.py | 125 +- apps/models_provider/views/provide.py | 35 + 14 files changed, 7031 insertions(+), 390 deletions(-) delete mode 100644 apps/models_provider/serializers/model.py create mode 100644 apps/models_provider/serializers/model_serializer.py diff --git a/apps/common/config/embedding_config.py b/apps/common/config/embedding_config.py index 98c391e96..6b66569d9 100644 --- a/apps/common/config/embedding_config.py +++ b/apps/common/config/embedding_config.py @@ -47,20 +47,20 @@ class ModelManage: ModelManage.cache.delete(_id) -class VectorStore: - from embedding.vector.pg_vector import PGVector - from embedding.vector.base_vector import BaseVectorStore - instance_map = { - 'pg_vector': PGVector, - } - instance = None - - @staticmethod - def get_embedding_vector() -> BaseVectorStore: - from embedding.vector.pg_vector import PGVector - if VectorStore.instance is None: - from maxkb.const import CONFIG - vector_store_class = VectorStore.instance_map.get(CONFIG.get("VECTOR_STORE_NAME"), - PGVector) - VectorStore.instance = vector_store_class() - return VectorStore.instance +# class VectorStore: +# from embedding.vector.pg_vector import PGVector +# from embedding.vector.base_vector import BaseVectorStore +# instance_map = { +# 'pg_vector': PGVector, +# } +# instance = None +# +# @staticmethod +# def get_embedding_vector() -> BaseVectorStore: +# from embedding.vector.pg_vector import PGVector +# if VectorStore.instance is None: +# from maxkb.const import CONFIG +# vector_store_class = VectorStore.instance_map.get(CONFIG.get("VECTOR_STORE_NAME"), +# PGVector) +# VectorStore.instance = vector_store_class() +# return VectorStore.instance diff --git a/apps/common/utils/common.py b/apps/common/utils/common.py index 98341221d..680ec602f 100644 --- a/apps/common/utils/common.py +++ b/apps/common/utils/common.py @@ -13,7 +13,8 @@ import io import mimetypes import re import shutil -from typing import List +from functools import reduce +from typing import List, Dict from django.core.files.uploadedfile import InMemoryUploadedFile from django.utils.translation import gettext as _ @@ -50,13 +51,13 @@ def group_by(list_source: List, key): return result - CHAR_SET = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] def get_random_chars(number=6): return "".join([CHAR_SET[random.randint(0, len(CHAR_SET) - 1)] for index in range(number)]) + def encryption(message: str): """ 加密敏感字段数据 加密方式是 如果密码是 1234567890 那么给前端则是 123******890 @@ -122,7 +123,6 @@ def get_file_content(path): return content - def bytes_to_uploaded_file(file_bytes, file_name="file.txt"): content_type, _ = mimetypes.guess_type(file_name) if content_type is None: @@ -205,3 +205,9 @@ def split_and_transcribe(file_path, model, max_segment_length_ms=59000, audio_fo full_text.append(text) return ' '.join(full_text) + +def query_params_to_single_dict(query_params: Dict): + return reduce(lambda x, y: {**x, **y}, list( + filter(lambda item: item is not None, [({key: value} if value is not None and len(value) > 0 else None) for + key, value in + query_params.items()])), {}) diff --git a/apps/locales/en_US/LC_MESSAGES/django.po b/apps/locales/en_US/LC_MESSAGES/django.po index c92ca05e2..e544bca1e 100644 --- a/apps/locales/en_US/LC_MESSAGES/django.po +++ b/apps/locales/en_US/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-04-14 20:02+0800\n" +"POT-Creation-Date: 2025-04-18 17:06+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,97 +17,2087 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: .\apps\common\auth\authenticate.py:63 .\apps\common\auth\authenticate.py:84 +#: common/auth/authenticate.py:80 msgid "Not logged in, please log in first" msgstr "" -#: .\apps\common\auth\authenticate.py:69 .\apps\common\auth\authenticate.py:75 -#: .\apps\common\auth\authenticate.py:90 .\apps\common\auth\authenticate.py:96 +#: common/auth/authenticate.py:82 common/auth/authenticate.py:89 +#: common/auth/authenticate.py:95 msgid "Authentication information is incorrect! illegal user" msgstr "" -#: .\apps\common\auth\handle\impl\user_token.py:30 +#: common/auth/authentication.py:96 +msgid "No permission to access" +msgstr "" + +#: common/auth/handle/impl/user_token.py:157 msgid "Login expired" msgstr "" -#: .\apps\common\constants\permission_constants.py:46 -msgid "ADMIN" -msgstr "" - -#: .\apps\common\constants\permission_constants.py:46 -msgid "Super administrator" -msgstr "" - -#: .\apps\common\exception\handle_exception.py:32 +#: common/exception/handle_exception.py:32 msgid "Unknown exception" msgstr "" -#: .\apps\common\result\api.py:17 .\apps\common\result\api.py:27 +#: common/forms/base_field.py:64 +#, python-brace-format +msgid "The field {field_label} is required" +msgstr "" + +#: common/forms/slider_field.py:56 +#, python-brace-format +msgid "The {field_label} cannot be less than {min}" +msgstr "" + +#: common/forms/slider_field.py:62 +#, python-brace-format +msgid "The {field_label} cannot be greater than {max}" +msgstr "" + +#: common/result/api.py:17 common/result/api.py:27 msgid "response code" msgstr "" -#: .\apps\common\result\api.py:18 .\apps\common\result\api.py:19 -#: .\apps\common\result\api.py:28 .\apps\common\result\api.py:29 +#: common/result/api.py:18 common/result/api.py:19 common/result/api.py:28 +#: common/result/api.py:29 msgid "error prompt" msgstr "" -#: .\apps\common\result\api.py:43 +#: common/result/api.py:43 msgid "total number of data" msgstr "" -#: .\apps\common\result\api.py:44 +#: common/result/api.py:44 msgid "current page" msgstr "" -#: .\apps\common\result\api.py:45 +#: common/result/api.py:45 msgid "page size" msgstr "" -#: .\apps\common\result\result.py:31 +#: common/result/result.py:31 msgid "Success" msgstr "" -#: .\apps\maxkb\settings\base.py:80 +#: common/utils/common.py:83 +msgid "Text-to-speech node, the text content must be of string type" +msgstr "" + +#: common/utils/common.py:85 +msgid "Text-to-speech node, the text content cannot be empty" +msgstr "" + +#: maxkb/settings/base.py:83 msgid "Intelligent customer service platform" msgstr "" -#: .\apps\users\serializers\login.py:23 +#: models_provider/api/model.py:36 models_provider/api/model.py:49 +#: models_provider/serializers/model_serializer.py:262 +#: models_provider/serializers/model_serializer.py:326 +#: modules/serializers/module.py:31 modules/serializers/module.py:63 +#: modules/serializers/module.py:95 tools/serializers/tool.py:66 +#: tools/serializers/tool.py:86 +msgid "workspace id" +msgstr "" + +#: models_provider/api/model.py:55 +#: models_provider/serializers/model_serializer.py:107 +#: models_provider/serializers/model_serializer.py:365 +msgid "model id" +msgstr "" + +#: models_provider/api/provide.py:17 models_provider/api/provide.py:23 +#: models_provider/api/provide.py:28 models_provider/api/provide.py:30 +#: models_provider/api/provide.py:67 +#: models_provider/serializers/model_serializer.py:40 +#: models_provider/serializers/model_serializer.py:218 +#: models_provider/serializers/model_serializer.py:256 +#: models_provider/serializers/model_serializer.py:321 +msgid "model name" +msgstr "" + +#: models_provider/api/provide.py:18 models_provider/api/provide.py:38 +#: models_provider/api/provide.py:61 models_provider/api/provide.py:89 +#: models_provider/api/provide.py:111 +#: models_provider/serializers/model_serializer.py:41 +#: models_provider/serializers/model_serializer.py:257 +#: models_provider/serializers/model_serializer.py:324 +msgid "provider" +msgstr "" + +#: models_provider/api/provide.py:19 +msgid "icon" +msgstr "" + +#: models_provider/api/provide.py:24 +msgid "value" +msgstr "" + +#: models_provider/api/provide.py:29 models_provider/api/provide.py:55 +#: models_provider/api/provide.py:83 +#: models_provider/serializers/model_serializer.py:42 +#: models_provider/serializers/model_serializer.py:220 +#: models_provider/serializers/model_serializer.py:258 +#: models_provider/serializers/model_serializer.py:322 +msgid "model type" +msgstr "" + +#: models_provider/api/provide.py:34 tools/serializers/tool.py:38 +msgid "input type" +msgstr "" + +#: models_provider/api/provide.py:35 +msgid "label" +msgstr "" + +#: models_provider/api/provide.py:36 +msgid "text field" +msgstr "" + +#: models_provider/api/provide.py:37 +msgid "value field" +msgstr "" + +#: models_provider/api/provide.py:39 +msgid "method" +msgstr "" + +#: models_provider/api/provide.py:40 tools/serializers/tool.py:23 +#: tools/serializers/tool.py:37 +msgid "required" +msgstr "" + +#: models_provider/api/provide.py:41 +msgid "default value" +msgstr "" + +#: models_provider/api/provide.py:42 +msgid "relation show field dict" +msgstr "" + +#: models_provider/api/provide.py:43 +msgid "relation trigger field dict" +msgstr "" + +#: models_provider/api/provide.py:44 +msgid "trigger type" +msgstr "" + +#: models_provider/api/provide.py:45 +msgid "attrs" +msgstr "" + +#: models_provider/api/provide.py:46 +msgid "props info" +msgstr "" + +#: models_provider/api/provide.py:82 +msgid "model_type" +msgstr "" + +#: models_provider/base_model_provider.py:60 +msgid "Model type cannot be empty" +msgstr "" + +#: models_provider/base_model_provider.py:85 +msgid "The current platform does not support downloading models" +msgstr "" + +#: models_provider/base_model_provider.py:140 +msgid "LLM" +msgstr "" + +#: models_provider/base_model_provider.py:141 +msgid "Embedding Model" +msgstr "" + +#: models_provider/base_model_provider.py:142 +msgid "Speech2Text" +msgstr "" + +#: models_provider/base_model_provider.py:143 +msgid "TTS" +msgstr "" + +#: models_provider/base_model_provider.py:144 +msgid "Vision Model" +msgstr "" + +#: models_provider/base_model_provider.py:145 +msgid "Image Generation" +msgstr "" + +#: models_provider/base_model_provider.py:146 +msgid "Rerank" +msgstr "" + +#: models_provider/base_model_provider.py:220 +msgid "The model does not support" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 +msgid "" +"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " +"Lab, developers can integrate high-quality text retrieval and sorting " +"through the LlamaIndex framework." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 +msgid "" +"Chinese (including various dialects such as Cantonese), English, Japanese, " +"and Korean support free switching between multiple languages." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 +msgid "" +"CosyVoice is based on a new generation of large generative speech models, " +"which can predict emotions, intonation, rhythm, etc. based on context, and " +"has better anthropomorphic effects." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 +msgid "" +"Universal text vector is Tongyi Lab's multi-language text unified vector " +"model based on the LLM base. It provides high-level vector services for " +"multiple mainstream languages around the world and helps developers quickly " +"convert text data into high-quality vector data." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 +#: models_provider/impl/qwen_model_provider/qwen_model_provider.py:40 +msgid "" +"Tongyi Wanxiang - a large image model for text generation, supports " +"bilingual input in Chinese and English, and supports the input of reference " +"pictures for reference content or reference style migration. Key styles " +"include but are not limited to watercolor, oil painting, Chinese painting, " +"sketch, flat illustration, two-dimensional, and 3D. Cartoon." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 +msgid "Alibaba Cloud Bailian" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:53 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:50 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:74 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:61 +#: models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 +#: models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 +#: models_provider/impl/anthropic_model_provider/credential/image.py:33 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:57 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 +#: models_provider/impl/azure_model_provider/credential/embedding.py:37 +#: models_provider/impl/azure_model_provider/credential/image.py:55 +#: models_provider/impl/azure_model_provider/credential/llm.py:69 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:57 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:36 +#: models_provider/impl/gemini_model_provider/credential/image.py:51 +#: models_provider/impl/gemini_model_provider/credential/llm.py:57 +#: models_provider/impl/gemini_model_provider/model/stt.py:43 +#: models_provider/impl/kimi_model_provider/credential/llm.py:57 +#: models_provider/impl/local_model_provider/credential/embedding.py:36 +#: models_provider/impl/local_model_provider/credential/reranker.py:37 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:37 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:44 +#: models_provider/impl/openai_model_provider/credential/embedding.py:36 +#: models_provider/impl/openai_model_provider/credential/image.py:54 +#: models_provider/impl/openai_model_provider/credential/llm.py:59 +#: models_provider/impl/qwen_model_provider/credential/image.py:56 +#: models_provider/impl/qwen_model_provider/credential/llm.py:56 +#: models_provider/impl/qwen_model_provider/model/tti.py:43 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:54 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:23 +#: models_provider/impl/tencent_model_provider/credential/image.py:56 +#: models_provider/impl/tencent_model_provider/credential/llm.py:51 +#: models_provider/impl/tencent_model_provider/model/tti.py:54 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:36 +#: models_provider/impl/vllm_model_provider/credential/llm.py:50 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:52 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 +#: models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:31 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:60 +#: models_provider/impl/xf_model_provider/credential/embedding.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:76 +#: models_provider/impl/xf_model_provider/model/tts.py:101 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:31 +#: models_provider/impl/xinference_model_provider/credential/image.py:51 +#: models_provider/impl/xinference_model_provider/credential/llm.py:50 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:34 +#: models_provider/impl/xinference_model_provider/model/tts.py:44 +#: models_provider/impl/zhipu_model_provider/credential/image.py:51 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:56 +#: models_provider/impl/zhipu_model_provider/model/tti.py:49 +msgid "Hello" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:36 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:60 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:46 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:44 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:96 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:89 +#: models_provider/impl/anthropic_model_provider/credential/image.py:23 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:47 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 +#: models_provider/impl/azure_model_provider/credential/embedding.py:27 +#: models_provider/impl/azure_model_provider/credential/image.py:45 +#: models_provider/impl/azure_model_provider/credential/llm.py:59 +#: models_provider/impl/azure_model_provider/credential/stt.py:23 +#: models_provider/impl/azure_model_provider/credential/tti.py:58 +#: models_provider/impl/azure_model_provider/credential/tts.py:41 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:47 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:26 +#: models_provider/impl/gemini_model_provider/credential/image.py:41 +#: models_provider/impl/gemini_model_provider/credential/llm.py:47 +#: models_provider/impl/gemini_model_provider/credential/stt.py:21 +#: models_provider/impl/kimi_model_provider/credential/llm.py:47 +#: models_provider/impl/local_model_provider/credential/embedding.py:27 +#: models_provider/impl/local_model_provider/credential/reranker.py:28 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:26 +#: models_provider/impl/ollama_model_provider/credential/image.py:39 +#: models_provider/impl/ollama_model_provider/credential/llm.py:44 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:27 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:31 +#: models_provider/impl/openai_model_provider/credential/embedding.py:26 +#: models_provider/impl/openai_model_provider/credential/image.py:44 +#: models_provider/impl/openai_model_provider/credential/llm.py:48 +#: models_provider/impl/openai_model_provider/credential/stt.py:22 +#: models_provider/impl/openai_model_provider/credential/tti.py:61 +#: models_provider/impl/openai_model_provider/credential/tts.py:40 +#: models_provider/impl/qwen_model_provider/credential/image.py:47 +#: models_provider/impl/qwen_model_provider/credential/llm.py:47 +#: models_provider/impl/qwen_model_provider/credential/tti.py:68 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:22 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:19 +#: models_provider/impl/tencent_model_provider/credential/image.py:47 +#: models_provider/impl/tencent_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_model_provider/credential/tti.py:78 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:26 +#: models_provider/impl/vllm_model_provider/credential/image.py:42 +#: models_provider/impl/vllm_model_provider/credential/llm.py:39 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:51 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:27 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:46 +#: models_provider/impl/xf_model_provider/credential/embedding.py:27 +#: models_provider/impl/xf_model_provider/credential/image.py:29 +#: models_provider/impl/xf_model_provider/credential/llm.py:66 +#: models_provider/impl/xf_model_provider/credential/stt.py:24 +#: models_provider/impl/xf_model_provider/credential/tts.py:47 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:19 +#: models_provider/impl/xinference_model_provider/credential/image.py:41 +#: models_provider/impl/xinference_model_provider/credential/llm.py:39 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:25 +#: models_provider/impl/xinference_model_provider/credential/stt.py:21 +#: models_provider/impl/xinference_model_provider/credential/tti.py:59 +#: models_provider/impl/xinference_model_provider/credential/tts.py:39 +#: models_provider/impl/zhipu_model_provider/credential/image.py:41 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:47 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:40 +#, python-brace-format +msgid "{model_type} Model type is not supported" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:44 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:68 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:55 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:53 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:105 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:98 +#, python-brace-format +msgid "{key} is required" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:60 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:82 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:69 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:67 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:121 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:113 +#: models_provider/impl/anthropic_model_provider/credential/image.py:43 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:65 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 +#: models_provider/impl/azure_model_provider/credential/image.py:65 +#: models_provider/impl/azure_model_provider/credential/stt.py:40 +#: models_provider/impl/azure_model_provider/credential/tti.py:77 +#: models_provider/impl/azure_model_provider/credential/tts.py:58 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:65 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:43 +#: models_provider/impl/gemini_model_provider/credential/image.py:61 +#: models_provider/impl/gemini_model_provider/credential/llm.py:66 +#: models_provider/impl/gemini_model_provider/credential/stt.py:38 +#: models_provider/impl/kimi_model_provider/credential/llm.py:64 +#: models_provider/impl/local_model_provider/credential/embedding.py:44 +#: models_provider/impl/local_model_provider/credential/reranker.py:45 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:51 +#: models_provider/impl/openai_model_provider/credential/embedding.py:43 +#: models_provider/impl/openai_model_provider/credential/image.py:64 +#: models_provider/impl/openai_model_provider/credential/llm.py:67 +#: models_provider/impl/openai_model_provider/credential/stt.py:39 +#: models_provider/impl/openai_model_provider/credential/tti.py:80 +#: models_provider/impl/openai_model_provider/credential/tts.py:58 +#: models_provider/impl/qwen_model_provider/credential/image.py:66 +#: models_provider/impl/qwen_model_provider/credential/llm.py:64 +#: models_provider/impl/qwen_model_provider/credential/tti.py:86 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:64 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:30 +#: models_provider/impl/tencent_model_provider/credential/image.py:66 +#: models_provider/impl/tencent_model_provider/credential/llm.py:57 +#: models_provider/impl/tencent_model_provider/credential/tti.py:104 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:43 +#: models_provider/impl/vllm_model_provider/credential/image.py:62 +#: models_provider/impl/vllm_model_provider/credential/llm.py:55 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:62 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:68 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:38 +#: models_provider/impl/xf_model_provider/credential/embedding.py:38 +#: models_provider/impl/xf_model_provider/credential/image.py:50 +#: models_provider/impl/xf_model_provider/credential/llm.py:84 +#: models_provider/impl/xf_model_provider/credential/stt.py:41 +#: models_provider/impl/xf_model_provider/credential/tts.py:65 +#: models_provider/impl/xinference_model_provider/credential/image.py:60 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:40 +#: models_provider/impl/xinference_model_provider/credential/stt.py:37 +#: models_provider/impl/xinference_model_provider/credential/tti.py:77 +#: models_provider/impl/xinference_model_provider/credential/tts.py:56 +#: models_provider/impl/zhipu_model_provider/credential/image.py:61 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:64 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:59 +#, python-brace-format +msgid "" +"Verification failed, please check whether the parameters are correct: {error}" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:17 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:22 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 +#: models_provider/impl/azure_model_provider/credential/image.py:17 +#: models_provider/impl/azure_model_provider/credential/llm.py:23 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:22 +#: models_provider/impl/gemini_model_provider/credential/image.py:15 +#: models_provider/impl/gemini_model_provider/credential/llm.py:22 +#: models_provider/impl/kimi_model_provider/credential/llm.py:22 +#: models_provider/impl/ollama_model_provider/credential/image.py:12 +#: models_provider/impl/ollama_model_provider/credential/llm.py:20 +#: models_provider/impl/openai_model_provider/credential/image.py:17 +#: models_provider/impl/openai_model_provider/credential/llm.py:23 +#: models_provider/impl/qwen_model_provider/credential/image.py:22 +#: models_provider/impl/qwen_model_provider/credential/llm.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:17 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 +#: models_provider/impl/tencent_model_provider/credential/image.py:22 +#: models_provider/impl/tencent_model_provider/credential/llm.py:14 +#: models_provider/impl/vllm_model_provider/credential/image.py:15 +#: models_provider/impl/vllm_model_provider/credential/llm.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:22 +#: models_provider/impl/xf_model_provider/credential/llm.py:22 +#: models_provider/impl/xf_model_provider/credential/llm.py:41 +#: models_provider/impl/xinference_model_provider/credential/image.py:14 +#: models_provider/impl/xinference_model_provider/credential/llm.py:15 +#: models_provider/impl/zhipu_model_provider/credential/image.py:15 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:22 +msgid "Temperature" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:18 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:30 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:31 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 +#: models_provider/impl/azure_model_provider/credential/image.py:26 +#: models_provider/impl/azure_model_provider/credential/llm.py:32 +#: models_provider/impl/azure_model_provider/credential/llm.py:43 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:31 +#: models_provider/impl/gemini_model_provider/credential/image.py:24 +#: models_provider/impl/gemini_model_provider/credential/llm.py:31 +#: models_provider/impl/kimi_model_provider/credential/llm.py:31 +#: models_provider/impl/ollama_model_provider/credential/image.py:21 +#: models_provider/impl/ollama_model_provider/credential/llm.py:29 +#: models_provider/impl/openai_model_provider/credential/image.py:26 +#: models_provider/impl/openai_model_provider/credential/llm.py:32 +#: models_provider/impl/qwen_model_provider/credential/image.py:31 +#: models_provider/impl/qwen_model_provider/credential/llm.py:31 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:26 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_model_provider/credential/image.py:31 +#: models_provider/impl/vllm_model_provider/credential/image.py:24 +#: models_provider/impl/vllm_model_provider/credential/llm.py:24 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:24 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:50 +#: models_provider/impl/xinference_model_provider/credential/image.py:23 +#: models_provider/impl/xinference_model_provider/credential/llm.py:24 +#: models_provider/impl/zhipu_model_provider/credential/image.py:24 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:31 +msgid "Output the maximum Tokens" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:31 +msgid "Specify the maximum number of tokens that the model can generate." +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:44 +#: models_provider/impl/anthropic_model_provider/credential/image.py:15 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:74 +msgid "API URL" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 +#: models_provider/impl/anthropic_model_provider/credential/image.py:16 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:75 +msgid "API Key" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 +#: models_provider/impl/azure_model_provider/credential/tti.py:15 +#: models_provider/impl/openai_model_provider/credential/tti.py:15 +#: models_provider/impl/qwen_model_provider/credential/tti.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 +#: models_provider/impl/xinference_model_provider/credential/tti.py:14 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:15 +msgid "Image size" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 +#: models_provider/impl/azure_model_provider/credential/tti.py:15 +#: models_provider/impl/qwen_model_provider/credential/tti.py:22 +msgid "Specify the size of the generated image, such as: 1024x1024" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: models_provider/impl/azure_model_provider/credential/tti.py:40 +#: models_provider/impl/openai_model_provider/credential/tti.py:43 +#: models_provider/impl/qwen_model_provider/credential/tti.py:34 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 +#: models_provider/impl/xinference_model_provider/credential/tti.py:41 +msgid "Number of pictures" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: models_provider/impl/azure_model_provider/credential/tti.py:40 +#: models_provider/impl/qwen_model_provider/credential/tti.py:34 +msgid "Specify the number of generated images" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:44 +#: models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Style" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:44 +#: models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Specify the style of generated images" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 +#: models_provider/impl/qwen_model_provider/credential/tti.py:45 +msgid "Default value, the image style is randomly output by the model" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 +#: models_provider/impl/qwen_model_provider/credential/tti.py:46 +msgid "photography" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 +#: models_provider/impl/qwen_model_provider/credential/tti.py:47 +msgid "Portraits" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 +#: models_provider/impl/qwen_model_provider/credential/tti.py:48 +msgid "3D cartoon" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 +#: models_provider/impl/qwen_model_provider/credential/tti.py:49 +msgid "animation" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 +#: models_provider/impl/qwen_model_provider/credential/tti.py:50 +msgid "painting" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 +#: models_provider/impl/qwen_model_provider/credential/tti.py:51 +msgid "watercolor" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:55 +#: models_provider/impl/qwen_model_provider/credential/tti.py:52 +msgid "sketch" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:56 +#: models_provider/impl/qwen_model_provider/credential/tti.py:53 +msgid "Chinese painting" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:57 +#: models_provider/impl/qwen_model_provider/credential/tti.py:54 +msgid "flat illustration" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +msgid "Timbre" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +msgid "Chinese sounds can support mixed scenes of Chinese and English" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 +msgid "Long Xiaochun" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 +msgid "Long Xiaoxia" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 +msgid "Long Xiaochen" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 +msgid "Long Xiaobai" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 +msgid "Long Laotie" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 +msgid "Long Shu" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 +msgid "Long Shuo" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 +msgid "Long Jing" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 +msgid "Long Miao" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:35 +msgid "Long Yue" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:36 +msgid "Long Yuan" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:37 +msgid "Long Fei" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:38 +msgid "Long Jielidou" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:39 +msgid "Long Tong" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:40 +msgid "Long Xiang" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 +msgid "Speaking speed" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 +msgid "[0.5, 2], the default is 1, usually one decimal place is enough" +msgstr "" + +#: models_provider/impl/anthropic_model_provider/credential/image.py:28 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:52 +#: models_provider/impl/azure_model_provider/credential/embedding.py:32 +#: models_provider/impl/azure_model_provider/credential/image.py:50 +#: models_provider/impl/azure_model_provider/credential/llm.py:64 +#: models_provider/impl/azure_model_provider/credential/stt.py:28 +#: models_provider/impl/azure_model_provider/credential/tti.py:63 +#: models_provider/impl/azure_model_provider/credential/tts.py:46 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:52 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:31 +#: models_provider/impl/gemini_model_provider/credential/image.py:46 +#: models_provider/impl/gemini_model_provider/credential/llm.py:52 +#: models_provider/impl/gemini_model_provider/credential/stt.py:26 +#: models_provider/impl/kimi_model_provider/credential/llm.py:52 +#: models_provider/impl/local_model_provider/credential/embedding.py:31 +#: models_provider/impl/local_model_provider/credential/reranker.py:32 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:46 +#: models_provider/impl/ollama_model_provider/credential/llm.py:62 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:63 +#: models_provider/impl/openai_model_provider/credential/embedding.py:31 +#: models_provider/impl/openai_model_provider/credential/image.py:49 +#: models_provider/impl/openai_model_provider/credential/llm.py:53 +#: models_provider/impl/openai_model_provider/credential/stt.py:27 +#: models_provider/impl/openai_model_provider/credential/tti.py:66 +#: models_provider/impl/openai_model_provider/credential/tts.py:45 +#: models_provider/impl/qwen_model_provider/credential/image.py:51 +#: models_provider/impl/qwen_model_provider/credential/llm.py:51 +#: models_provider/impl/qwen_model_provider/credential/tti.py:72 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:49 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:27 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 +#: models_provider/impl/tencent_model_provider/credential/image.py:51 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:31 +#: models_provider/impl/vllm_model_provider/credential/image.py:47 +#: models_provider/impl/vllm_model_provider/credential/llm.py:65 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:47 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:56 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:55 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:72 +#: models_provider/impl/xf_model_provider/credential/image.py:34 +#: models_provider/impl/xf_model_provider/credential/llm.py:71 +#: models_provider/impl/xf_model_provider/credential/stt.py:29 +#: models_provider/impl/xf_model_provider/credential/tts.py:52 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:40 +#: models_provider/impl/xinference_model_provider/credential/image.py:46 +#: models_provider/impl/xinference_model_provider/credential/llm.py:59 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:29 +#: models_provider/impl/xinference_model_provider/credential/stt.py:26 +#: models_provider/impl/xinference_model_provider/credential/tti.py:64 +#: models_provider/impl/xinference_model_provider/credential/tts.py:44 +#: models_provider/impl/zhipu_model_provider/credential/image.py:46 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:51 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:45 +#, python-brace-format +msgid "{key} is required" +msgstr "" + +#: models_provider/impl/anthropic_model_provider/credential/llm.py:23 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 +#: models_provider/impl/azure_model_provider/credential/image.py:18 +#: models_provider/impl/azure_model_provider/credential/llm.py:24 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:23 +#: models_provider/impl/gemini_model_provider/credential/image.py:16 +#: models_provider/impl/gemini_model_provider/credential/llm.py:23 +#: models_provider/impl/kimi_model_provider/credential/llm.py:23 +#: models_provider/impl/ollama_model_provider/credential/image.py:13 +#: models_provider/impl/ollama_model_provider/credential/llm.py:21 +#: models_provider/impl/openai_model_provider/credential/image.py:18 +#: models_provider/impl/openai_model_provider/credential/llm.py:24 +#: models_provider/impl/qwen_model_provider/credential/image.py:23 +#: models_provider/impl/qwen_model_provider/credential/llm.py:23 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:18 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 +#: models_provider/impl/tencent_model_provider/credential/image.py:23 +#: models_provider/impl/tencent_model_provider/credential/llm.py:15 +#: models_provider/impl/vllm_model_provider/credential/image.py:16 +#: models_provider/impl/vllm_model_provider/credential/llm.py:16 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:16 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:23 +#: models_provider/impl/xf_model_provider/credential/llm.py:23 +#: models_provider/impl/xf_model_provider/credential/llm.py:42 +#: models_provider/impl/xinference_model_provider/credential/image.py:15 +#: models_provider/impl/xinference_model_provider/credential/llm.py:16 +#: models_provider/impl/zhipu_model_provider/credential/image.py:16 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:23 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic" +msgstr "" + +#: models_provider/impl/anthropic_model_provider/credential/llm.py:32 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 +#: models_provider/impl/azure_model_provider/credential/image.py:27 +#: models_provider/impl/azure_model_provider/credential/llm.py:33 +#: models_provider/impl/azure_model_provider/credential/llm.py:44 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:32 +#: models_provider/impl/gemini_model_provider/credential/image.py:25 +#: models_provider/impl/gemini_model_provider/credential/llm.py:32 +#: models_provider/impl/kimi_model_provider/credential/llm.py:32 +#: models_provider/impl/ollama_model_provider/credential/image.py:22 +#: models_provider/impl/ollama_model_provider/credential/llm.py:30 +#: models_provider/impl/openai_model_provider/credential/image.py:27 +#: models_provider/impl/openai_model_provider/credential/llm.py:33 +#: models_provider/impl/qwen_model_provider/credential/image.py:32 +#: models_provider/impl/qwen_model_provider/credential/llm.py:32 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:27 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 +#: models_provider/impl/tencent_model_provider/credential/image.py:32 +#: models_provider/impl/vllm_model_provider/credential/image.py:25 +#: models_provider/impl/vllm_model_provider/credential/llm.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:32 +#: models_provider/impl/xf_model_provider/credential/llm.py:32 +#: models_provider/impl/xf_model_provider/credential/llm.py:51 +#: models_provider/impl/xinference_model_provider/credential/image.py:24 +#: models_provider/impl/xinference_model_provider/credential/llm.py:25 +#: models_provider/impl/zhipu_model_provider/credential/image.py:25 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:32 +msgid "Specify the maximum number of tokens that the model can generate" +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 +msgid "" +"An update to Claude 2 that doubles the context window and improves " +"reliability, hallucination rates, and evidence-based accuracy in long " +"documents and RAG contexts." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 +msgid "" +"Anthropic is a powerful model that can handle a variety of tasks, from " +"complex dialogue and creative content generation to detailed command " +"obedience." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 +msgid "" +"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" +"instant responsiveness. The model can answer simple queries and requests " +"quickly. Customers will be able to build seamless AI experiences that mimic " +"human interactions. Claude 3 Haiku can process images and return text " +"output, and provides 200K context windows." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 +msgid "" +"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " +"intelligence and speed, especially when it comes to handling enterprise " +"workloads. This model offers maximum utility while being priced lower than " +"competing products, and it's been engineered to be a solid choice for " +"deploying AI at scale." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 +msgid "" +"The Claude 3.5 Sonnet raises the industry standard for intelligence, " +"outperforming competing models and the Claude 3 Opus in extensive " +"evaluations, with the speed and cost-effectiveness of our mid-range models." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 +msgid "" +"A faster, more affordable but still very powerful model that can handle a " +"range of tasks including casual conversation, text analysis, summarization " +"and document question answering." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 +msgid "" +"Titan Text Premier is the most powerful and advanced model in the Titan Text " +"series, designed to deliver exceptional performance for a variety of " +"enterprise applications. With its cutting-edge features, it delivers greater " +"accuracy and outstanding results, making it an excellent choice for " +"organizations looking for a top-notch text processing solution." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 +msgid "" +"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" +"tuning English-language tasks, including summarization and copywriting, " +"where customers require smaller, more cost-effective, and highly " +"customizable models." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 +msgid "" +"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " +"it ideal for a variety of high-level general language tasks, such as open-" +"ended text generation and conversational chat, as well as support in " +"retrieval-augmented generation (RAG). At launch, the model is optimized for " +"English, but other languages are supported." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 +msgid "" +"7B dense converter for rapid deployment and easy customization. Small in " +"size yet powerful in a variety of use cases. Supports English and code, as " +"well as 32k context windows." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 +msgid "" +"Advanced Mistral AI large-scale language model capable of handling any " +"language task, including complex multilingual reasoning, text understanding, " +"transformation, and code generation." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 +msgid "" +"Ideal for content creation, conversational AI, language understanding, R&D, " +"and enterprise applications" +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 +msgid "" +"Ideal for limited computing power and resources, edge devices, and faster " +"training times." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 +msgid "" +"Titan Embed Text is the largest embedding model in the Amazon Titan Embed " +"series and can handle various text embedding tasks, such as text " +"classification, text similarity calculation, etc." +msgstr "" + +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 +#, python-brace-format +msgid "The following fields are required: {keys}" +msgstr "" + +#: models_provider/impl/azure_model_provider/credential/embedding.py:44 +#: models_provider/impl/azure_model_provider/credential/llm.py:76 +msgid "Verification failed, please check whether the parameters are correct" +msgstr "" + +#: models_provider/impl/azure_model_provider/credential/tti.py:28 +#: models_provider/impl/openai_model_provider/credential/tti.py:29 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +#: models_provider/impl/xinference_model_provider/credential/tti.py:28 +msgid "Picture quality" +msgstr "" + +#: models_provider/impl/azure_model_provider/credential/tts.py:17 +#: models_provider/impl/openai_model_provider/credential/tts.py:17 +msgid "" +"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " +"to find one that suits your desired tone and audience. The current voiceover " +"is optimized for English." +msgstr "" + +#: models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 +msgid "Good at common conversational tasks, supports 32K contexts" +msgstr "" + +#: models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 +msgid "Good at handling programming tasks, supports 16K contexts" +msgstr "" + +#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 +msgid "Latest Gemini 1.0 Pro model, updated with Google update" +msgstr "" + +#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 +msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" +msgstr "" + +#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 +#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 +#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 +#: models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 +msgid "Latest Gemini 1.5 Flash model, updated with Google updates" +msgstr "" + +#: models_provider/impl/gemini_model_provider/model/stt.py:53 +msgid "convert audio to text" +msgstr "" + +#: models_provider/impl/local_model_provider/credential/embedding.py:53 +#: models_provider/impl/local_model_provider/credential/reranker.py:54 +msgid "Model catalog" +msgstr "" + +#: models_provider/impl/local_model_provider/local_model_provider.py:39 +msgid "local model" +msgstr "" + +#: models_provider/impl/ollama_model_provider/credential/embedding.py:30 +#: models_provider/impl/ollama_model_provider/credential/image.py:43 +#: models_provider/impl/ollama_model_provider/credential/llm.py:48 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:35 +#: models_provider/impl/vllm_model_provider/credential/llm.py:43 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:24 +#: models_provider/impl/xinference_model_provider/credential/llm.py:44 +msgid "API domain name is invalid" +msgstr "" + +#: models_provider/impl/ollama_model_provider/credential/embedding.py:35 +#: models_provider/impl/ollama_model_provider/credential/image.py:48 +#: models_provider/impl/ollama_model_provider/credential/llm.py:53 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:40 +#: models_provider/impl/vllm_model_provider/credential/llm.py:47 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:30 +#: models_provider/impl/xinference_model_provider/credential/llm.py:48 +msgid "The model does not exist, please download the model first" +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 7B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 13B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 70B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 +msgid "" +"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " +"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " +"that it has strong Chinese conversation capabilities." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 8 billion " +"parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 70 billion " +"parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 +msgid "" +"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 500 million parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 +msgid "" +"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 1.8 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 +msgid "" +"Compared with previous versions, qwen 1.5 4b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"4 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 +msgid "" +"Compared with previous versions, qwen 1.5 7b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"7 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 +msgid "" +"Compared with previous versions, qwen 1.5 14b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"14 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 +msgid "" +"Compared with previous versions, qwen 1.5 32b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"32 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 +msgid "" +"Compared with previous versions, qwen 1.5 72b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"72 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 +msgid "" +"Compared with previous versions, qwen 1.5 110b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 110 billion parameters." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 +msgid "" +"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " +"model." +msgstr "" + +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 +#: models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 +msgid "" +"A high-performance open embedding model with a large token context window." +msgstr "" + +#: models_provider/impl/openai_model_provider/credential/tti.py:16 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " +"or 1792x1024 pixels." +msgstr "" + +#: models_provider/impl/openai_model_provider/credential/tti.py:29 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +msgid "" +" \n" +"By default, images are produced in standard quality, but with DALL·E 3 you " +"can set quality: \"hd\" to enhance detail. Square, standard quality images " +"are generated fastest.\n" +" " +msgstr "" + +#: models_provider/impl/openai_model_provider/credential/tti.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 +msgid "" +"You can use DALL·E 3 to request 1 image at a time (requesting more images by " +"issuing parallel requests), or use DALL·E 2 with the n parameter to request " +"up to 10 images at a time." +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:35 +#: models_provider/impl/openai_model_provider/openai_model_provider.py:119 +#: models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:118 +msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:38 +msgid "Latest gpt-4, updated with OpenAI adjustments" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:40 +#: models_provider/impl/openai_model_provider/openai_model_provider.py:99 +msgid "" +"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " +"adjustments" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:43 +#: models_provider/impl/openai_model_provider/openai_model_provider.py:102 +msgid "" +"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " +"adjustments" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:46 +msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:49 +msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:53 +msgid "" +"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " +"tokens" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:57 +msgid "" +"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " +"tokens" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:61 +msgid "" +"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " +"13, 2024" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:65 +msgid "" +"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:69 +msgid "" +"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " +"tokens" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:72 +msgid "" +"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " +"tokens" +msgstr "" + +#: models_provider/impl/openai_model_provider/openai_model_provider.py:75 +msgid "" +"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " +"tokens" +msgstr "" + +#: models_provider/impl/qwen_model_provider/qwen_model_provider.py:63 +msgid "Tongyi Qianwen" +msgstr "" + +#: models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 +msgid "Tencent Cloud" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/llm.py:41 +#: models_provider/impl/tencent_model_provider/credential/tti.py:88 +#, python-brace-format +msgid "{keys} is required" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "painting style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "If not passed, the default value is 201 (Japanese anime style)" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:18 +msgid "Not limited to style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:19 +msgid "ink painting" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:20 +msgid "concept art" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:21 +msgid "Oil painting 1" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:22 +msgid "Oil Painting 2 (Van Gogh)" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:23 +msgid "watercolor painting" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:24 +msgid "pixel art" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:25 +msgid "impasto style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:26 +msgid "illustration" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:27 +msgid "paper cut style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:28 +msgid "Impressionism 1 (Monet)" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:29 +msgid "Impressionism 2" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:31 +msgid "classical portraiture" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:32 +msgid "black and white sketch" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:33 +msgid "cyberpunk" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:34 +msgid "science fiction style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:35 +msgid "dark style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:37 +msgid "vaporwave" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:38 +msgid "Japanese animation" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:39 +msgid "monster style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:40 +msgid "Beautiful ancient style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:41 +msgid "retro anime" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:42 +msgid "Game cartoon hand drawing" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:43 +msgid "Universal realistic style" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "Generate image resolution" +msgstr "" + +#: models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "If not transmitted, the default value is 768:768." +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 +msgid "" +"The most effective version of the current hybrid model, the trillion-level " +"parameter scale MOE-32K long article model. Reaching the absolute leading " +"level on various benchmarks, with complex instructions and reasoning, " +"complex mathematical capabilities, support for function call, and " +"application focus optimization in fields such as multi-language translation, " +"finance, law, and medical care" +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 +msgid "" +"A better routing strategy is adopted to simultaneously alleviate the " +"problems of load balancing and expert convergence. For long articles, the " +"needle-in-a-haystack index reaches 99.9%" +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 +msgid "" +"Upgraded to MOE structure, the context window is 256k, leading many open " +"source models in multiple evaluation sets such as NLP, code, mathematics, " +"industry, etc." +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 +msgid "" +"Hunyuan's latest version of the role-playing model, a role-playing model " +"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " +"model combined with the role-playing scene data set for additional training, " +"and has better basic effects in role-playing scenes." +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 +msgid "" +"Hunyuan's latest MOE architecture FunctionCall model has been trained with " +"high-quality FunctionCall data and has a context window of 32K, leading in " +"multiple dimensions of evaluation indicators." +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 +msgid "" +"Hunyuan's latest code generation model, after training the base model with " +"200B high-quality code data, and iterating on high-quality SFT data for half " +"a year, the context long window length has been increased to 8K, and it " +"ranks among the top in the automatic evaluation indicators of code " +"generation in the five major languages; the five major languages In the " +"manual high-quality evaluation of 10 comprehensive code tasks that consider " +"all aspects, the performance is in the first echelon." +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 +msgid "" +"Tencent's Hunyuan Embedding interface can convert text into high-quality " +"vector data. The vector dimension is 1024 dimensions." +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 +msgid "Mixed element visual model" +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 +msgid "Hunyuan graph model" +msgstr "" + +#: models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 +msgid "Tencent Hunyuan" +msgstr "" + +#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 +#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 +msgid "Facebook’s 125M parameter model" +msgstr "" + +#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 +msgid "BAAI’s 7B parameter model" +msgstr "" + +#: models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 +msgid "BAAI’s 13B parameter mode" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 +msgid "" +"If the gap between width, height and 512 is too large, the picture rendering " +"effect will be poor and the probability of excessive delay will increase " +"significantly. Recommended ratio and corresponding width and height before " +"super score: width*height" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +#: models_provider/impl/xinference_model_provider/credential/tts.py:15 +msgid "timbre" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:31 +#: models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "speaking speed" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:31 +msgid "[0.2,3], the default is 1, usually one decimal place is enough" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 +msgid "" +"The user goes to the model inference page of Volcano Ark to create an " +"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " +"it." +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 +msgid "Universal 2.0-Vincent Diagram" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 +msgid "Universal 2.0Pro-Vincent Chart" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 +msgid "Universal 1.4-Vincent Chart" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 +msgid "Animation 1.3.0-Vincent Picture" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 +msgid "Animation 1.3.1-Vincent Picture" +msgstr "" + +#: models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 +msgid "volcano engine" +msgstr "" + +#: models_provider/impl/wenxin_model_provider/credential/llm.py:51 +#, python-brace-format +msgid "{model_name} The model does not support" +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 +msgid "" +"ERNIE-Bot-4 is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 +msgid "" +"ERNIE-Bot is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 +msgid "" +"ERNIE-Bot-turbo is a large language model independently developed by Baidu. " +"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " +"content creation and generation, and has a faster response speed." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 +msgid "" +"BLOOMZ-7B is a well-known large language model in the industry. It was " +"developed and open sourced by BigScience and can output text in 46 languages " +"and 13 programming languages." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 +msgid "" +"Llama-2-13b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning and knowledge application. " +"Llama-2-13b-chat is a native open source version with balanced performance " +"and effect, suitable for conversation scenarios." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 +msgid "" +"Llama-2-70b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning, and knowledge application. " +"Llama-2-70b-chat is a native open source version with high-precision effects." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 +msgid "" +"The Chinese enhanced version developed by the Qianfan team based on " +"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" +"EVAL." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 +msgid "" +"Embedding-V1 is a text representation model based on Baidu Wenxin large " +"model technology. It can convert text into a vector form represented by " +"numerical values and can be used in text retrieval, information " +"recommendation, knowledge mining and other scenarios. Embedding-V1 provides " +"the Embeddings interface, which can generate corresponding vector " +"representations based on input content. You can call this interface to input " +"text into the model and obtain the corresponding vector representation for " +"subsequent text processing and analysis." +msgstr "" + +#: models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 +msgid "Thousand sails large model" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/image.py:42 +msgid "Please outline this picture" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:15 +msgid "Speaker" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:16 +msgid "" +"Speaker, optional value: Please go to the console to add a trial or purchase " +"speaker. After adding, the speaker parameter value will be displayed." +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:21 +msgid "iFlytek Xiaoyan" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:22 +msgid "iFlytek Xujiu" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:23 +msgid "iFlytek Xiaoping" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:24 +msgid "iFlytek Xiaojing" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:25 +msgid "iFlytek Xuxiaobao" +msgstr "" + +#: models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "Speech speed, optional value: [0-100], default is 50" +msgstr "" + +#: models_provider/impl/xf_model_provider/xf_model_provider.py:39 +#: models_provider/impl/xf_model_provider/xf_model_provider.py:50 +msgid "Chinese and English recognition" +msgstr "" + +#: models_provider/impl/xf_model_provider/xf_model_provider.py:66 +msgid "iFlytek Spark" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tti.py:15 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " +"1792x1024 pixels." +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tti.py:29 +msgid "" +"By default, images are generated in standard quality, you can set quality: " +"\"hd\" to enhance detail. Square, standard quality images are generated " +"fastest." +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tti.py:42 +msgid "" +"You can request 1 image at a time (requesting more images by making parallel " +"requests), or up to 10 images at a time using the n parameter." +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:20 +msgid "Chinese female" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:21 +msgid "Chinese male" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:22 +msgid "Japanese male" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:23 +msgid "Cantonese female" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:24 +msgid "English female" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:25 +msgid "English male" +msgstr "" + +#: models_provider/impl/xinference_model_provider/credential/tts.py:26 +msgid "Korean female" +msgstr "" + +#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 +msgid "" +"Code Llama is a language model specifically designed for code generation." +msgstr "" + +#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 +msgid "" +" \n" +"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " +"designed to perform specific tasks.\n" +" " +msgstr "" + +#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 +msgid "" +"Code Llama Python is a language model specifically designed for Python code " +"generation." +msgstr "" + +#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 +msgid "" +"CodeQwen 1.5 is a language model for code generation with high performance." +msgstr "" + +#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 +msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." +msgstr "" + +#: models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 +msgid "Deepseek is a large-scale language model with 13 billion parameters." +msgstr "" + +#: models_provider/impl/zhipu_model_provider/credential/tti.py:16 +msgid "" +"Image size, only cogview-3-plus supports this parameter. Optional range: " +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " +"default is 1024x1024." +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 +msgid "" +"Have strong multi-modal understanding capabilities. Able to understand up to " +"five images simultaneously and supports video content understanding" +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis" +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis (free)" +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 +msgid "" +"Quickly and accurately generate images based on user text descriptions. " +"Resolution supports 1024x1024" +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes" +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes (free)" +msgstr "" + +#: models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 +msgid "zhipu AI" +msgstr "" + +#: models_provider/serializers/model_serializer.py:43 +#: models_provider/serializers/model_serializer.py:222 +#: models_provider/serializers/model_serializer.py:259 +#: models_provider/serializers/model_serializer.py:323 +msgid "base model" +msgstr "" + +#: models_provider/serializers/model_serializer.py:44 +#: models_provider/serializers/model_serializer.py:260 +msgid "parameter configuration" +msgstr "" + +#: models_provider/serializers/model_serializer.py:45 +#: models_provider/serializers/model_serializer.py:225 +#: models_provider/serializers/model_serializer.py:261 +msgid "certification information" +msgstr "" + +#: models_provider/serializers/model_serializer.py:108 +#: models_provider/serializers/model_serializer.py:215 +#: models_provider/serializers/model_serializer.py:255 +#: modules/serializers/module.py:35 tools/serializers/tool.py:65 +msgid "user id" +msgstr "" + +#: models_provider/serializers/model_serializer.py:116 +#: models_provider/serializers/model_serializer.py:132 +#: models_provider/serializers/model_serializer.py:151 +#: models_provider/serializers/model_serializer.py:178 +#: models_provider/serializers/model_serializer.py:371 +#: models_provider/tools.py:111 +msgid "Model does not exist" +msgstr "" + +#: models_provider/serializers/model_serializer.py:233 +#: models_provider/serializers/model_serializer.py:272 +#, python-brace-format +msgid "base model【{model_name}】already exists" +msgstr "" + +#: models_provider/serializers/model_serializer.py:312 +msgid "Model saving failed" +msgstr "" + +#: models_provider/serializers/model_serializer.py:325 +msgid "create user" +msgstr "" + +#: models_provider/tools.py:113 +msgid "No permission to use this model" +msgstr "" + +#: models_provider/views/model.py:28 models_provider/views/model.py:29 +msgid "Create model" +msgstr "" + +#: models_provider/views/model.py:30 models_provider/views/model.py:57 +#: models_provider/views/model.py:74 models_provider/views/model.py:85 +#: models_provider/views/model.py:96 models_provider/views/model.py:110 +#: models_provider/views/model.py:121 models_provider/views/model.py:137 +#: models_provider/views/model.py:150 models_provider/views/provide.py:24 +#: models_provider/views/provide.py:47 models_provider/views/provide.py:61 +#: models_provider/views/provide.py:79 models_provider/views/provide.py:96 +msgid "Model" +msgstr "" + +#: models_provider/views/model.py:53 models_provider/views/model.py:54 +msgid "Query model list" +msgstr "" + +#: models_provider/views/model.py:69 models_provider/views/model.py:70 +msgid "Update model" +msgstr "" + +#: models_provider/views/model.py:82 models_provider/views/model.py:83 +msgid "Delete model" +msgstr "" + +#: models_provider/views/model.py:92 models_provider/views/model.py:93 +msgid "Query model details" +msgstr "" + +#: models_provider/views/model.py:106 models_provider/views/model.py:107 +msgid "Get model parameter form" +msgstr "" + +#: models_provider/views/model.py:117 models_provider/views/model.py:118 +msgid "Save model parameter form" +msgstr "" + +#: models_provider/views/model.py:132 models_provider/views/model.py:134 +msgid "" +"Query model meta information, this interface does not carry authentication " +"information" +msgstr "" + +#: models_provider/views/model.py:147 models_provider/views/model.py:148 +msgid "Pause model download" +msgstr "" + +#: models_provider/views/provide.py:21 models_provider/views/provide.py:22 +msgid "Get a list of model suppliers" +msgstr "" + +#: models_provider/views/provide.py:43 models_provider/views/provide.py:44 +msgid "Get a list of model types" +msgstr "" + +#: models_provider/views/provide.py:57 models_provider/views/provide.py:58 +msgid "Example of obtaining model list" +msgstr "" + +#: models_provider/views/provide.py:75 +msgid "Get model default parameters" +msgstr "" + +#: models_provider/views/provide.py:76 models_provider/views/provide.py:92 +#: models_provider/views/provide.py:93 +msgid "Get the model creation form" +msgstr "" + +#: modules/models/module.py:6 modules/models/module.py:13 +#: modules/serializers/module.py:29 +msgid "module name" +msgstr "" + +#: modules/models/module.py:9 modules/serializers/module.py:32 +msgid "parent id" +msgstr "" + +#: modules/serializers/module.py:28 modules/serializers/module.py:62 +msgid "module id" +msgstr "" + +#: modules/serializers/module.py:30 +msgid "module user id" +msgstr "" + +#: modules/serializers/module.py:36 modules/serializers/module.py:64 +#: modules/serializers/module.py:96 tools/serializers/tool.py:28 +msgid "source" +msgstr "" + +#: modules/serializers/module.py:49 +msgid "Module name already exists" +msgstr "" + +#: modules/serializers/module.py:70 +msgid "Module does not exist" +msgstr "" + +#: modules/serializers/module.py:89 +msgid "Cannot delete root module" +msgstr "" + +#: modules/views/module.py:19 modules/views/module.py:20 +msgid "Create module" +msgstr "" + +#: modules/views/module.py:24 modules/views/module.py:43 +#: modules/views/module.py:56 modules/views/module.py:68 +#: modules/views/module.py:85 +msgid "Module" +msgstr "" + +#: modules/views/module.py:38 modules/views/module.py:39 +msgid "Update module" +msgstr "" + +#: modules/views/module.py:52 modules/views/module.py:53 +msgid "Get module" +msgstr "" + +#: modules/views/module.py:65 modules/views/module.py:66 +msgid "Delete module" +msgstr "" + +#: modules/views/module.py:81 modules/views/module.py:82 +msgid "Get module tree" +msgstr "" + +#: tools/serializers/tool.py:22 +msgid "variable name" +msgstr "" + +#: tools/serializers/tool.py:24 +msgid "type" +msgstr "" + +#: tools/serializers/tool.py:26 +msgid "fields only support string|int|dict|array|float" +msgstr "" + +#: tools/serializers/tool.py:30 +msgid "The field only supports custom|reference" +msgstr "" + +#: tools/serializers/tool.py:35 +msgid "field name" +msgstr "" + +#: tools/serializers/tool.py:36 +msgid "field label" +msgstr "" + +#: tools/serializers/tool.py:46 +msgid "tool name" +msgstr "" + +#: tools/serializers/tool.py:49 +msgid "tool description" +msgstr "" + +#: tools/serializers/tool.py:51 +msgid "tool content" +msgstr "" + +#: tools/serializers/tool.py:54 +msgid "input field list" +msgstr "" + +#: tools/serializers/tool.py:56 +msgid "init field list" +msgstr "" + +#: tools/serializers/tool.py:58 +msgid "Is active" +msgstr "" + +#: tools/serializers/tool.py:85 +msgid "tool id" +msgstr "" + +#: tools/serializers/tool.py:93 +msgid "Tool not found" +msgstr "" + +#: tools/views/tool.py:19 tools/views/tool.py:20 +msgid "Create tool" +msgstr "" + +#: tools/views/tool.py:24 tools/views/tool.py:40 tools/views/tool.py:52 +#: tools/views/tool.py:63 +msgid "Tool" +msgstr "" + +#: tools/views/tool.py:35 tools/views/tool.py:36 tools/views/tool.py:48 +#: tools/views/tool.py:49 +msgid "Update tool" +msgstr "" + +#: tools/views/tool.py:60 tools/views/tool.py:61 +msgid "Delete tool" +msgstr "" + +#: users/serializers/login.py:27 msgid "Username" msgstr "" -#: .\apps\users\serializers\login.py:24 +#: users/serializers/login.py:28 msgid "Password" msgstr "" -#: .\apps\users\serializers\login.py:31 -msgid "token" -msgstr "" - -#: .\apps\users\serializers\login.py:43 -msgid "The username or password is incorrect" -msgstr "" - -#: .\apps\users\serializers\login.py:45 -msgid "The user has been disabled, please contact the administrator!" -msgstr "" - -#: .\apps\users\views\login.py:21 .\apps\users\views\login.py:22 -msgid "Log in" -msgstr "" - -#: .\apps\users\views\login.py:23 .\apps\users\views\user.py:26 -msgid "User management" -msgstr "" - -#: .\apps\users\views\user.py:24 .\apps\users\views\user.py:25 -msgid "Get current user information" -msgstr "" - -msgid "Get captcha" -msgstr "" - +#: users/serializers/login.py:29 users/serializers/login.py:69 msgid "captcha" msgstr "" +#: users/serializers/login.py:36 +msgid "token" +msgstr "" + +#: users/serializers/login.py:50 msgid "Captcha code error or expiration" -msgstr "" \ No newline at end of file +msgstr "" + +#: users/serializers/login.py:53 +msgid "The username or password is incorrect" +msgstr "" + +#: users/serializers/login.py:55 +msgid "The user has been disabled, please contact the administrator!" +msgstr "" + +#: users/views/login.py:21 users/views/login.py:22 +msgid "Log in" +msgstr "" + +#: users/views/login.py:23 users/views/login.py:34 users/views/user.py:28 +#: users/views/user.py:40 users/views/user.py:53 +msgid "User management" +msgstr "" + +#: users/views/login.py:32 users/views/login.py:33 +msgid "Get captcha" +msgstr "" + +#: users/views/user.py:26 users/views/user.py:27 users/views/user.py:38 +msgid "Get current user information" +msgstr "" diff --git a/apps/locales/zh_CN/LC_MESSAGES/django.po b/apps/locales/zh_CN/LC_MESSAGES/django.po index 73e9f093c..efc6a5d74 100644 --- a/apps/locales/zh_CN/LC_MESSAGES/django.po +++ b/apps/locales/zh_CN/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-04-14 19:50+0800\n" +"POT-Creation-Date: 2025-04-18 17:04+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -17,97 +17,2194 @@ msgstr "" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -#: .\apps\common\auth\authenticate.py:63 .\apps\common\auth\authenticate.py:84 +#: common/auth/authenticate.py:80 msgid "Not logged in, please log in first" msgstr "未登录,请先登录" -#: .\apps\common\auth\authenticate.py:69 .\apps\common\auth\authenticate.py:75 -#: .\apps\common\auth\authenticate.py:90 .\apps\common\auth\authenticate.py:96 +#: common/auth/authenticate.py:82 common/auth/authenticate.py:89 +#: common/auth/authenticate.py:95 msgid "Authentication information is incorrect! illegal user" msgstr "身份验证信息不正确!非法用户" -#: .\apps\common\auth\handle\impl\user_token.py:30 +#: common/auth/authentication.py:96 +msgid "No permission to access" +msgstr "无权限访问" + +#: common/auth/handle/impl/user_token.py:157 msgid "Login expired" msgstr "登录已过期" -#: .\apps\common\constants\permission_constants.py:46 -msgid "ADMIN" -msgstr "管理员" - -#: .\apps\common\constants\permission_constants.py:46 -msgid "Super administrator" -msgstr "超级管理员" - -#: .\apps\common\exception\handle_exception.py:32 +#: common/exception/handle_exception.py:32 msgid "Unknown exception" msgstr "未知错误" -#: .\apps\common\result\api.py:17 .\apps\common\result\api.py:27 +#: common/forms/base_field.py:64 +#, python-brace-format +msgid "The field {field_label} is required" +msgstr "{field_label} 字段是必填项" + +#: common/forms/slider_field.py:56 +#, python-brace-format +msgid "The {field_label} cannot be less than {min}" +msgstr "{field_label} 不能小于{min}" + +#: common/forms/slider_field.py:62 +#, python-brace-format +msgid "The {field_label} cannot be greater than {max}" +msgstr "{field_label} 不能大于{max}" + +#: common/result/api.py:17 common/result/api.py:27 msgid "response code" msgstr "响应码" -#: .\apps\common\result\api.py:18 .\apps\common\result\api.py:19 -#: .\apps\common\result\api.py:28 .\apps\common\result\api.py:29 +#: common/result/api.py:18 common/result/api.py:19 common/result/api.py:28 +#: common/result/api.py:29 msgid "error prompt" msgstr "错误提示" -#: .\apps\common\result\api.py:43 +#: common/result/api.py:43 msgid "total number of data" msgstr "总数据" -#: .\apps\common\result\api.py:44 +#: common/result/api.py:44 msgid "current page" msgstr "当前页" -#: .\apps\common\result\api.py:45 +#: common/result/api.py:45 msgid "page size" msgstr "每页大小" -#: .\apps\common\result\result.py:31 +#: common/result/result.py:31 msgid "Success" msgstr "成功" -#: .\apps\maxkb\settings\base.py:80 +#: common/utils/common.py:83 +msgid "Text-to-speech node, the text content must be of string type" +msgstr "文本转语音节点,文本内容必须是字符串类型" + +#: common/utils/common.py:85 +msgid "Text-to-speech node, the text content cannot be empty" +msgstr "文本转语音节点,文本内容不能为空" + +#: maxkb/settings/base.py:83 msgid "Intelligent customer service platform" msgstr "智能客服平台" -#: .\apps\users\serializers\login.py:23 +#: models_provider/api/model.py:36 models_provider/api/model.py:49 +#: models_provider/serializers/model_serializer.py:262 +#: models_provider/serializers/model_serializer.py:326 +#: modules/serializers/module.py:31 modules/serializers/module.py:63 +#: modules/serializers/module.py:95 +msgid "workspace id" +msgstr "工作空间ID" + +#: models_provider/api/model.py:55 +#: models_provider/serializers/model_serializer.py:107 +#: models_provider/serializers/model_serializer.py:365 +msgid "model id" +msgstr "模型ID" + +#: models_provider/api/provide.py:17 models_provider/api/provide.py:23 +#: models_provider/api/provide.py:28 models_provider/api/provide.py:30 +#: models_provider/api/provide.py:67 +#: models_provider/serializers/model_serializer.py:40 +#: models_provider/serializers/model_serializer.py:218 +#: models_provider/serializers/model_serializer.py:256 +#: models_provider/serializers/model_serializer.py:321 +msgid "model name" +msgstr "模型名称" + +#: models_provider/api/provide.py:18 models_provider/api/provide.py:38 +#: models_provider/api/provide.py:61 models_provider/api/provide.py:89 +#: models_provider/api/provide.py:111 +#: models_provider/serializers/model_serializer.py:41 +#: models_provider/serializers/model_serializer.py:257 +#: models_provider/serializers/model_serializer.py:324 +msgid "provider" +msgstr "供应商" + +#: models_provider/api/provide.py:19 +msgid "icon" +msgstr "" + +#: models_provider/api/provide.py:24 +msgid "value" +msgstr "值" + +#: models_provider/api/provide.py:29 models_provider/api/provide.py:55 +#: models_provider/api/provide.py:83 +#: models_provider/serializers/model_serializer.py:42 +#: models_provider/serializers/model_serializer.py:220 +#: models_provider/serializers/model_serializer.py:258 +#: models_provider/serializers/model_serializer.py:322 +msgid "model type" +msgstr "模型类型" + +#: models_provider/api/provide.py:34 +msgid "input type" +msgstr "输入类型" + +#: models_provider/api/provide.py:35 +msgid "label" +msgstr "标签" + +#: models_provider/api/provide.py:36 +msgid "text field" +msgstr "文本字段" + +#: models_provider/api/provide.py:37 +msgid "value field" +msgstr "值" + +#: models_provider/api/provide.py:39 +msgid "method" +msgstr "方法" + +#: models_provider/api/provide.py:40 tools/serializers/tool.py:22 +msgid "required" +msgstr "必填" + +#: models_provider/api/provide.py:41 +msgid "default value" +msgstr "默认值" + +#: models_provider/api/provide.py:42 +msgid "relation show field dict" +msgstr "关系显示字段" + +#: models_provider/api/provide.py:43 +msgid "relation trigger field dict" +msgstr "关系触发字段" + +#: models_provider/api/provide.py:44 +msgid "trigger type" +msgstr "触发类型" + +#: models_provider/api/provide.py:45 +msgid "attrs" +msgstr "属性" + +#: models_provider/api/provide.py:46 +msgid "props info" +msgstr "props 信息" + +#: models_provider/base_model_provider.py:60 +msgid "Model type cannot be empty" +msgstr "模型类型不能为空" + +#: models_provider/base_model_provider.py:85 +msgid "The current platform does not support downloading models" +msgstr "当前平台不支持下载模型" + +#: models_provider/base_model_provider.py:140 +msgid "LLM" +msgstr "大语言模型" + +#: models_provider/base_model_provider.py:141 +msgid "Embedding Model" +msgstr "向量模型" + +#: models_provider/base_model_provider.py:142 +msgid "Speech2Text" +msgstr "语音识别" + +#: models_provider/base_model_provider.py:143 +msgid "TTS" +msgstr "语音合成" + +#: models_provider/base_model_provider.py:144 +msgid "Vision Model" +msgstr "视觉模型" + +#: models_provider/base_model_provider.py:145 +msgid "Image Generation" +msgstr "图片生成" + +#: models_provider/base_model_provider.py:146 +msgid "Rerank" +msgstr "重排模型" + +#: models_provider/base_model_provider.py:220 +msgid "The model does not support" +msgstr "模型不支持" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 +msgid "" +"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " +"Lab, developers can integrate high-quality text retrieval and sorting " +"through the LlamaIndex framework." +msgstr "" +"阿里巴巴通义实验室开发的GTE-Rerank文本排序系列模型,开发者可以通过LlamaIndex" +"框架进行集成高质量文本检索、排序。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 +msgid "" +"Chinese (including various dialects such as Cantonese), English, Japanese, " +"and Korean support free switching between multiple languages." +msgstr "中文(含粤语等各种方言)、英文、日语、韩语支持多个语种自由切换" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 +msgid "" +"CosyVoice is based on a new generation of large generative speech models, " +"which can predict emotions, intonation, rhythm, etc. based on context, and " +"has better anthropomorphic effects." +msgstr "" +"CosyVoice基于新一代生成式语音大模型,能根据上下文预测情绪、语调、韵律等,具有" +"更好的拟人效果" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 +msgid "" +"Universal text vector is Tongyi Lab's multi-language text unified vector " +"model based on the LLM base. It provides high-level vector services for " +"multiple mainstream languages around the world and helps developers quickly " +"convert text data into high-quality vector data." +msgstr "" +"通用文本向量,是通义实验室基于LLM底座的多语言文本统一向量模型,面向全球多个主" +"流语种,提供高水准的向量服务,帮助开发者将文本数据快速转换为高质量的向量数" +"据。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40 +msgid "" +"Tongyi Wanxiang - a large image model for text generation, supports " +"bilingual input in Chinese and English, and supports the input of reference " +"pictures for reference content or reference style migration. Key styles " +"include but are not limited to watercolor, oil painting, Chinese painting, " +"sketch, flat illustration, two-dimensional, and 3D. Cartoon." +msgstr "" +"通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容" +"或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二" +"次元、3D卡通。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 +msgid "Alibaba Cloud Bailian" +msgstr "阿里云百炼" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:53 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:50 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:74 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:61 +#: models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 +#: models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 +#: models_provider/impl/anthropic_model_provider/credential/image.py:33 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:57 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 +#: models_provider/impl/azure_model_provider/credential/embedding.py:37 +#: models_provider/impl/azure_model_provider/credential/image.py:55 +#: models_provider/impl/azure_model_provider/credential/llm.py:69 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:57 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:36 +#: models_provider/impl/gemini_model_provider/credential/image.py:51 +#: models_provider/impl/gemini_model_provider/credential/llm.py:57 +#: models_provider/impl/gemini_model_provider/model/stt.py:43 +#: models_provider/impl/kimi_model_provider/credential/llm.py:57 +#: models_provider/impl/local_model_provider/credential/embedding.py:36 +#: models_provider/impl/local_model_provider/credential/reranker.py:37 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:37 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:44 +#: models_provider/impl/openai_model_provider/credential/embedding.py:36 +#: models_provider/impl/openai_model_provider/credential/image.py:54 +#: models_provider/impl/openai_model_provider/credential/llm.py:59 +#: models_provider/impl/qwen_model_provider/credential/image.py:56 +#: models_provider/impl/qwen_model_provider/credential/llm.py:56 +#: models_provider/impl/qwen_model_provider/model/tti.py:43 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:54 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:23 +#: models_provider/impl/tencent_model_provider/credential/image.py:56 +#: models_provider/impl/tencent_model_provider/credential/llm.py:51 +#: models_provider/impl/tencent_model_provider/model/tti.py:54 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:36 +#: models_provider/impl/vllm_model_provider/credential/llm.py:50 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:52 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 +#: models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:31 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:60 +#: models_provider/impl/xf_model_provider/credential/embedding.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:76 +#: models_provider/impl/xf_model_provider/model/tts.py:101 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:31 +#: models_provider/impl/xinference_model_provider/credential/image.py:51 +#: models_provider/impl/xinference_model_provider/credential/llm.py:50 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:34 +#: models_provider/impl/xinference_model_provider/model/tts.py:44 +#: models_provider/impl/zhipu_model_provider/credential/image.py:51 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:56 +#: models_provider/impl/zhipu_model_provider/model/tti.py:49 +msgid "Hello" +msgstr "你好" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:36 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:60 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:46 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:44 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:96 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:89 +#: models_provider/impl/anthropic_model_provider/credential/image.py:23 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:47 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 +#: models_provider/impl/azure_model_provider/credential/embedding.py:27 +#: models_provider/impl/azure_model_provider/credential/image.py:45 +#: models_provider/impl/azure_model_provider/credential/llm.py:59 +#: models_provider/impl/azure_model_provider/credential/stt.py:23 +#: models_provider/impl/azure_model_provider/credential/tti.py:58 +#: models_provider/impl/azure_model_provider/credential/tts.py:41 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:47 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:26 +#: models_provider/impl/gemini_model_provider/credential/image.py:41 +#: models_provider/impl/gemini_model_provider/credential/llm.py:47 +#: models_provider/impl/gemini_model_provider/credential/stt.py:21 +#: models_provider/impl/kimi_model_provider/credential/llm.py:47 +#: models_provider/impl/local_model_provider/credential/embedding.py:27 +#: models_provider/impl/local_model_provider/credential/reranker.py:28 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:26 +#: models_provider/impl/ollama_model_provider/credential/image.py:39 +#: models_provider/impl/ollama_model_provider/credential/llm.py:44 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:27 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:31 +#: models_provider/impl/openai_model_provider/credential/embedding.py:26 +#: models_provider/impl/openai_model_provider/credential/image.py:44 +#: models_provider/impl/openai_model_provider/credential/llm.py:48 +#: models_provider/impl/openai_model_provider/credential/stt.py:22 +#: models_provider/impl/openai_model_provider/credential/tti.py:61 +#: models_provider/impl/openai_model_provider/credential/tts.py:40 +#: models_provider/impl/qwen_model_provider/credential/image.py:47 +#: models_provider/impl/qwen_model_provider/credential/llm.py:47 +#: models_provider/impl/qwen_model_provider/credential/tti.py:68 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:22 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:19 +#: models_provider/impl/tencent_model_provider/credential/image.py:47 +#: models_provider/impl/tencent_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_model_provider/credential/tti.py:78 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:26 +#: models_provider/impl/vllm_model_provider/credential/image.py:42 +#: models_provider/impl/vllm_model_provider/credential/llm.py:39 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:51 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:27 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:46 +#: models_provider/impl/xf_model_provider/credential/embedding.py:27 +#: models_provider/impl/xf_model_provider/credential/image.py:29 +#: models_provider/impl/xf_model_provider/credential/llm.py:66 +#: models_provider/impl/xf_model_provider/credential/stt.py:24 +#: models_provider/impl/xf_model_provider/credential/tts.py:47 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:19 +#: models_provider/impl/xinference_model_provider/credential/image.py:41 +#: models_provider/impl/xinference_model_provider/credential/llm.py:39 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:25 +#: models_provider/impl/xinference_model_provider/credential/stt.py:21 +#: models_provider/impl/xinference_model_provider/credential/tti.py:59 +#: models_provider/impl/xinference_model_provider/credential/tts.py:39 +#: models_provider/impl/zhipu_model_provider/credential/image.py:41 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:47 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:40 +#, python-brace-format +msgid "{model_type} Model type is not supported" +msgstr "{model_type} 模型类型不支持" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:44 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:68 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:55 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:53 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:105 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:98 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填项" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:60 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:82 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:69 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:67 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:121 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:113 +#: models_provider/impl/anthropic_model_provider/credential/image.py:43 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:65 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 +#: models_provider/impl/azure_model_provider/credential/image.py:65 +#: models_provider/impl/azure_model_provider/credential/stt.py:40 +#: models_provider/impl/azure_model_provider/credential/tti.py:77 +#: models_provider/impl/azure_model_provider/credential/tts.py:58 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:65 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:43 +#: models_provider/impl/gemini_model_provider/credential/image.py:61 +#: models_provider/impl/gemini_model_provider/credential/llm.py:66 +#: models_provider/impl/gemini_model_provider/credential/stt.py:38 +#: models_provider/impl/kimi_model_provider/credential/llm.py:64 +#: models_provider/impl/local_model_provider/credential/embedding.py:44 +#: models_provider/impl/local_model_provider/credential/reranker.py:45 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:51 +#: models_provider/impl/openai_model_provider/credential/embedding.py:43 +#: models_provider/impl/openai_model_provider/credential/image.py:64 +#: models_provider/impl/openai_model_provider/credential/llm.py:67 +#: models_provider/impl/openai_model_provider/credential/stt.py:39 +#: models_provider/impl/openai_model_provider/credential/tti.py:80 +#: models_provider/impl/openai_model_provider/credential/tts.py:58 +#: models_provider/impl/qwen_model_provider/credential/image.py:66 +#: models_provider/impl/qwen_model_provider/credential/llm.py:64 +#: models_provider/impl/qwen_model_provider/credential/tti.py:86 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:64 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:30 +#: models_provider/impl/tencent_model_provider/credential/image.py:66 +#: models_provider/impl/tencent_model_provider/credential/llm.py:57 +#: models_provider/impl/tencent_model_provider/credential/tti.py:104 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:43 +#: models_provider/impl/vllm_model_provider/credential/image.py:62 +#: models_provider/impl/vllm_model_provider/credential/llm.py:55 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:62 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:68 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:38 +#: models_provider/impl/xf_model_provider/credential/embedding.py:38 +#: models_provider/impl/xf_model_provider/credential/image.py:50 +#: models_provider/impl/xf_model_provider/credential/llm.py:84 +#: models_provider/impl/xf_model_provider/credential/stt.py:41 +#: models_provider/impl/xf_model_provider/credential/tts.py:65 +#: models_provider/impl/xinference_model_provider/credential/image.py:60 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:40 +#: models_provider/impl/xinference_model_provider/credential/stt.py:37 +#: models_provider/impl/xinference_model_provider/credential/tti.py:77 +#: models_provider/impl/xinference_model_provider/credential/tts.py:56 +#: models_provider/impl/zhipu_model_provider/credential/image.py:61 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:64 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:59 +#, python-brace-format +msgid "" +"Verification failed, please check whether the parameters are correct: {error}" +msgstr "认证失败,请检查参数是否正确:{error}" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:17 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:22 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 +#: models_provider/impl/azure_model_provider/credential/image.py:17 +#: models_provider/impl/azure_model_provider/credential/llm.py:23 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:22 +#: models_provider/impl/gemini_model_provider/credential/image.py:15 +#: models_provider/impl/gemini_model_provider/credential/llm.py:22 +#: models_provider/impl/kimi_model_provider/credential/llm.py:22 +#: models_provider/impl/ollama_model_provider/credential/image.py:12 +#: models_provider/impl/ollama_model_provider/credential/llm.py:20 +#: models_provider/impl/openai_model_provider/credential/image.py:17 +#: models_provider/impl/openai_model_provider/credential/llm.py:23 +#: models_provider/impl/qwen_model_provider/credential/image.py:22 +#: models_provider/impl/qwen_model_provider/credential/llm.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:17 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 +#: models_provider/impl/tencent_model_provider/credential/image.py:22 +#: models_provider/impl/tencent_model_provider/credential/llm.py:14 +#: models_provider/impl/vllm_model_provider/credential/image.py:15 +#: models_provider/impl/vllm_model_provider/credential/llm.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:22 +#: models_provider/impl/xf_model_provider/credential/llm.py:22 +#: models_provider/impl/xf_model_provider/credential/llm.py:41 +#: models_provider/impl/xinference_model_provider/credential/image.py:14 +#: models_provider/impl/xinference_model_provider/credential/llm.py:15 +#: models_provider/impl/zhipu_model_provider/credential/image.py:15 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:22 +msgid "Temperature" +msgstr "温度" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:30 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:31 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 +#: models_provider/impl/azure_model_provider/credential/image.py:26 +#: models_provider/impl/azure_model_provider/credential/llm.py:32 +#: models_provider/impl/azure_model_provider/credential/llm.py:43 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:31 +#: models_provider/impl/gemini_model_provider/credential/image.py:24 +#: models_provider/impl/gemini_model_provider/credential/llm.py:31 +#: models_provider/impl/kimi_model_provider/credential/llm.py:31 +#: models_provider/impl/ollama_model_provider/credential/image.py:21 +#: models_provider/impl/ollama_model_provider/credential/llm.py:29 +#: models_provider/impl/openai_model_provider/credential/image.py:26 +#: models_provider/impl/openai_model_provider/credential/llm.py:32 +#: models_provider/impl/qwen_model_provider/credential/image.py:31 +#: models_provider/impl/qwen_model_provider/credential/llm.py:31 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:26 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_model_provider/credential/image.py:31 +#: models_provider/impl/vllm_model_provider/credential/image.py:24 +#: models_provider/impl/vllm_model_provider/credential/llm.py:24 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:24 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:50 +#: models_provider/impl/xinference_model_provider/credential/image.py:23 +#: models_provider/impl/xinference_model_provider/credential/llm.py:24 +#: models_provider/impl/zhipu_model_provider/credential/image.py:24 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:31 +msgid "Output the maximum Tokens" +msgstr "输出最大Token数" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:31 +msgid "Specify the maximum number of tokens that the model can generate." +msgstr "指定模型可以生成的最大 tokens 数" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:44 +#: models_provider/impl/anthropic_model_provider/credential/image.py:15 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:74 +msgid "API URL" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 +#: models_provider/impl/anthropic_model_provider/credential/image.py:16 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:75 +msgid "API Key" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 +#: models_provider/impl/azure_model_provider/credential/tti.py:15 +#: models_provider/impl/openai_model_provider/credential/tti.py:15 +#: models_provider/impl/qwen_model_provider/credential/tti.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 +#: models_provider/impl/xinference_model_provider/credential/tti.py:14 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:15 +#, fuzzy +#| msgid "page size" +msgid "Image size" +msgstr "每页大小" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 +#: models_provider/impl/azure_model_provider/credential/tti.py:15 +#: models_provider/impl/qwen_model_provider/credential/tti.py:22 +msgid "Specify the size of the generated image, such as: 1024x1024" +msgstr "指定生成图片的尺寸, 如: 1024x1024" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: models_provider/impl/azure_model_provider/credential/tti.py:40 +#: models_provider/impl/openai_model_provider/credential/tti.py:43 +#: models_provider/impl/qwen_model_provider/credential/tti.py:34 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 +#: models_provider/impl/xinference_model_provider/credential/tti.py:41 +msgid "Number of pictures" +msgstr "图片数量" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: models_provider/impl/azure_model_provider/credential/tti.py:40 +#: models_provider/impl/qwen_model_provider/credential/tti.py:34 +msgid "Specify the number of generated images" +msgstr "指定生成图片的数量" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Style" +msgstr "风格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Specify the style of generated images" +msgstr "指定生成图片的风格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45 +msgid "Default value, the image style is randomly output by the model" +msgstr "默认值,图片风格由模型随机输出" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46 +msgid "photography" +msgstr "摄影" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47 +msgid "Portraits" +msgstr "人像写真" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48 +msgid "3D cartoon" +msgstr "3D卡通" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49 +msgid "animation" +msgstr "动画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50 +msgid "painting" +msgstr "油画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51 +msgid "watercolor" +msgstr "水彩" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52 +msgid "sketch" +msgstr "素描" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53 +msgid "Chinese painting" +msgstr "中国画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54 +msgid "flat illustration" +msgstr "扁平插画" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15 +msgid "timbre" +msgstr "音色" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +msgid "Chinese sounds can support mixed scenes of Chinese and English" +msgstr "中文音色支持中英文混合场景" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +msgid "Long Xiaochun" +msgstr "龙小淳" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21 +msgid "Long Xiaoxia" +msgstr "龙小夏" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22 +msgid "Long Xiaochen" +msgstr "龙小诚" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23 +msgid "Long Xiaobai" +msgstr "龙小白" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24 +msgid "Long laotie" +msgstr "龙老铁" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25 +msgid "Long Shu" +msgstr "龙书" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 +msgid "Long Shuo" +msgstr "龙硕" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 +msgid "Long Jing" +msgstr "龙婧" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 +msgid "Long Miao" +msgstr "龙妙" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 +msgid "Long Yue" +msgstr "龙悦" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 +msgid "Long Yuan" +msgstr "龙媛" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 +msgid "Long Fei" +msgstr "龙飞" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 +msgid "Long Jielidou" +msgstr "龙杰力豆" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 +msgid "Long Tong" +msgstr "龙彤" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 +msgid "Long Xiang" +msgstr "龙祥" + + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 +msgid "Speaking speed" +msgstr "语速" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 +msgid "[0.5, 2], the default is 1, usually one decimal place is enough" +msgstr "[0.5,2],默认为1,通常一位小数就足够了" + +#: models_provider/impl/anthropic_model_provider/credential/image.py:28 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:52 +#: models_provider/impl/azure_model_provider/credential/embedding.py:32 +#: models_provider/impl/azure_model_provider/credential/image.py:50 +#: models_provider/impl/azure_model_provider/credential/llm.py:64 +#: models_provider/impl/azure_model_provider/credential/stt.py:28 +#: models_provider/impl/azure_model_provider/credential/tti.py:63 +#: models_provider/impl/azure_model_provider/credential/tts.py:46 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:52 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:31 +#: models_provider/impl/gemini_model_provider/credential/image.py:46 +#: models_provider/impl/gemini_model_provider/credential/llm.py:52 +#: models_provider/impl/gemini_model_provider/credential/stt.py:26 +#: models_provider/impl/kimi_model_provider/credential/llm.py:52 +#: models_provider/impl/local_model_provider/credential/embedding.py:31 +#: models_provider/impl/local_model_provider/credential/reranker.py:32 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:46 +#: models_provider/impl/ollama_model_provider/credential/llm.py:62 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:63 +#: models_provider/impl/openai_model_provider/credential/embedding.py:31 +#: models_provider/impl/openai_model_provider/credential/image.py:49 +#: models_provider/impl/openai_model_provider/credential/llm.py:53 +#: models_provider/impl/openai_model_provider/credential/stt.py:27 +#: models_provider/impl/openai_model_provider/credential/tti.py:66 +#: models_provider/impl/openai_model_provider/credential/tts.py:45 +#: models_provider/impl/qwen_model_provider/credential/image.py:51 +#: models_provider/impl/qwen_model_provider/credential/llm.py:51 +#: models_provider/impl/qwen_model_provider/credential/tti.py:72 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:49 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:27 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 +#: models_provider/impl/tencent_model_provider/credential/image.py:51 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:31 +#: models_provider/impl/vllm_model_provider/credential/image.py:47 +#: models_provider/impl/vllm_model_provider/credential/llm.py:65 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:47 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:56 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:55 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:72 +#: models_provider/impl/xf_model_provider/credential/image.py:34 +#: models_provider/impl/xf_model_provider/credential/llm.py:71 +#: models_provider/impl/xf_model_provider/credential/stt.py:29 +#: models_provider/impl/xf_model_provider/credential/tts.py:52 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:40 +#: models_provider/impl/xinference_model_provider/credential/image.py:46 +#: models_provider/impl/xinference_model_provider/credential/llm.py:59 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:29 +#: models_provider/impl/xinference_model_provider/credential/stt.py:26 +#: models_provider/impl/xinference_model_provider/credential/tti.py:64 +#: models_provider/impl/xinference_model_provider/credential/tts.py:44 +#: models_provider/impl/zhipu_model_provider/credential/image.py:46 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:51 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:45 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填项" + +#: models_provider/impl/anthropic_model_provider/credential/llm.py:23 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 +#: models_provider/impl/azure_model_provider/credential/image.py:18 +#: models_provider/impl/azure_model_provider/credential/llm.py:24 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:23 +#: models_provider/impl/gemini_model_provider/credential/image.py:16 +#: models_provider/impl/gemini_model_provider/credential/llm.py:23 +#: models_provider/impl/kimi_model_provider/credential/llm.py:23 +#: models_provider/impl/ollama_model_provider/credential/image.py:13 +#: models_provider/impl/ollama_model_provider/credential/llm.py:21 +#: models_provider/impl/openai_model_provider/credential/image.py:18 +#: models_provider/impl/openai_model_provider/credential/llm.py:24 +#: models_provider/impl/qwen_model_provider/credential/image.py:23 +#: models_provider/impl/qwen_model_provider/credential/llm.py:23 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:18 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 +#: models_provider/impl/tencent_model_provider/credential/image.py:23 +#: models_provider/impl/tencent_model_provider/credential/llm.py:15 +#: models_provider/impl/vllm_model_provider/credential/image.py:16 +#: models_provider/impl/vllm_model_provider/credential/llm.py:16 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:16 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:23 +#: models_provider/impl/xf_model_provider/credential/llm.py:23 +#: models_provider/impl/xf_model_provider/credential/llm.py:42 +#: models_provider/impl/xinference_model_provider/credential/image.py:15 +#: models_provider/impl/xinference_model_provider/credential/llm.py:16 +#: models_provider/impl/zhipu_model_provider/credential/image.py:16 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:23 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic" +msgstr "较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定" + +#: models_provider/impl/anthropic_model_provider/credential/llm.py:32 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 +#: models_provider/impl/azure_model_provider/credential/image.py:27 +#: models_provider/impl/azure_model_provider/credential/llm.py:33 +#: models_provider/impl/azure_model_provider/credential/llm.py:44 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:32 +#: models_provider/impl/gemini_model_provider/credential/image.py:25 +#: models_provider/impl/gemini_model_provider/credential/llm.py:32 +#: models_provider/impl/kimi_model_provider/credential/llm.py:32 +#: models_provider/impl/ollama_model_provider/credential/image.py:22 +#: models_provider/impl/ollama_model_provider/credential/llm.py:30 +#: models_provider/impl/openai_model_provider/credential/image.py:27 +#: models_provider/impl/openai_model_provider/credential/llm.py:33 +#: models_provider/impl/qwen_model_provider/credential/image.py:32 +#: models_provider/impl/qwen_model_provider/credential/llm.py:32 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:27 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 +#: models_provider/impl/tencent_model_provider/credential/image.py:32 +#: models_provider/impl/vllm_model_provider/credential/image.py:25 +#: models_provider/impl/vllm_model_provider/credential/llm.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:32 +#: models_provider/impl/xf_model_provider/credential/llm.py:32 +#: models_provider/impl/xf_model_provider/credential/llm.py:51 +#: models_provider/impl/xinference_model_provider/credential/image.py:24 +#: models_provider/impl/xinference_model_provider/credential/llm.py:25 +#: models_provider/impl/zhipu_model_provider/credential/image.py:25 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:32 +msgid "Specify the maximum number of tokens that the model can generate" +msgstr "指定模型可以生成的最大 tokens 数" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 +msgid "" +"An update to Claude 2 that doubles the context window and improves " +"reliability, hallucination rates, and evidence-based accuracy in long " +"documents and RAG contexts." +msgstr "" +"Claude 2 的更新,采用双倍的上下文窗口,并在长文档和 RAG 上下文中提高可靠性、" +"幻觉率和循证准确性。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 +msgid "" +"Anthropic is a powerful model that can handle a variety of tasks, from " +"complex dialogue and creative content generation to detailed command " +"obedience." +msgstr "" +"Anthropic 功能强大的模型,可处理各种任务,从复杂的对话和创意内容生成到详细的" +"指令服从。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 +msgid "" +"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" +"instant responsiveness. The model can answer simple queries and requests " +"quickly. Customers will be able to build seamless AI experiences that mimic " +"human interactions. Claude 3 Haiku can process images and return text " +"output, and provides 200K context windows." +msgstr "" +"Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该" +"模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体" +"验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 +msgid "" +"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " +"intelligence and speed, especially when it comes to handling enterprise " +"workloads. This model offers maximum utility while being priced lower than " +"competing products, and it's been engineered to be a solid choice for " +"deploying AI at scale." +msgstr "" +"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在" +"处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过" +"精心设计,是大规模部署人工智能的可靠选择。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 +msgid "" +"The Claude 3.5 Sonnet raises the industry standard for intelligence, " +"outperforming competing models and the Claude 3 Opus in extensive " +"evaluations, with the speed and cost-effectiveness of our mid-range models." +msgstr "" +"Claude 3.5 Sonnet提高了智能的行业标准,在广泛的评估中超越了竞争对手的型号和" +"Claude 3 Opus,具有我们中端型号的速度和成本效益。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 +msgid "" +"A faster, more affordable but still very powerful model that can handle a " +"range of tasks including casual conversation, text analysis, summarization " +"and document question answering." +msgstr "" +"一种更快速、更实惠但仍然非常强大的模型,它可以处理一系列任务,包括随意对话、" +"文本分析、摘要和文档问题回答。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 +msgid "" +"Titan Text Premier is the most powerful and advanced model in the Titan Text " +"series, designed to deliver exceptional performance for a variety of " +"enterprise applications. With its cutting-edge features, it delivers greater " +"accuracy and outstanding results, making it an excellent choice for " +"organizations looking for a top-notch text processing solution." +msgstr "" +"Titan Text Premier 是 Titan Text 系列中功能强大且先进的型号,旨在为各种企业应" +"用程序提供卓越的性能。凭借其尖端功能,它提供了更高的准确性和出色的结果,使其" +"成为寻求一流文本处理解决方案的组织的绝佳选择。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 +msgid "" +"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" +"tuning English-language tasks, including summarization and copywriting, " +"where customers require smaller, more cost-effective, and highly " +"customizable models." +msgstr "" +"Amazon Titan Text Lite 是一种轻量级的高效模型,非常适合英语任务的微调,包括摘" +"要和文案写作等,在这种场景下,客户需要更小、更经济高效且高度可定制的模型" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 +msgid "" +"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " +"it ideal for a variety of high-level general language tasks, such as open-" +"ended text generation and conversational chat, as well as support in " +"retrieval-augmented generation (RAG). At launch, the model is optimized for " +"English, but other languages are supported." +msgstr "" +"Amazon Titan Text Express 的上下文长度长达 8000 个 tokens,因而非常适合各种高" +"级常规语言任务,例如开放式文本生成和对话式聊天,以及检索增强生成(RAG)中的支" +"持。在发布时,该模型针对英语进行了优化,但也支持其他语言。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 +msgid "" +"7B dense converter for rapid deployment and easy customization. Small in " +"size yet powerful in a variety of use cases. Supports English and code, as " +"well as 32k context windows." +msgstr "" +"7B 密集型转换器,可快速部署,易于定制。体积虽小,但功能强大,适用于各种用例。" +"支持英语和代码,以及 32k 的上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 +msgid "" +"Advanced Mistral AI large-scale language model capable of handling any " +"language task, including complex multilingual reasoning, text understanding, " +"transformation, and code generation." +msgstr "" +"先进的 Mistral AI 大型语言模型,能够处理任何语言任务,包括复杂的多语言推理、" +"文本理解、转换和代码生成。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 +msgid "" +"Ideal for content creation, conversational AI, language understanding, R&D, " +"and enterprise applications" +msgstr "非常适合内容创作、会话式人工智能、语言理解、研发和企业应用" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 +msgid "" +"Ideal for limited computing power and resources, edge devices, and faster " +"training times." +msgstr "非常适合有限的计算能力和资源、边缘设备和更快的训练时间。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 +msgid "" +"Titan Embed Text is the largest embedding model in the Amazon Titan Embed " +"series and can handle various text embedding tasks, such as text " +"classification, text similarity calculation, etc." +msgstr "" +"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以处理各种文本" +"嵌入任务,如文本分类、文本相似度计算等。" + +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 +#, python-brace-format +msgid "The following fields are required: {keys}" +msgstr "以下字段是必填项: {keys}" + +#: models_provider/impl/azure_model_provider/credential/embedding.py:44 +#: models_provider/impl/azure_model_provider/credential/llm.py:76 +msgid "Verification failed, please check whether the parameters are correct" +msgstr "认证失败,请检查参数是否正确" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28 +msgid "Picture quality" +msgstr "图片质量" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17 +msgid "" +"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " +"to find one that suits your desired tone and audience. The current voiceover " +"is optimized for English." +msgstr "" +"尝试不同的声音(合金、回声、寓言、缟玛瑙、新星和闪光),找到一种适合您所需的" +"音调和听众的声音。当前的语音针对英语进行了优化。" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 +msgid "Good at common conversational tasks, supports 32K contexts" +msgstr "擅长通用对话任务,支持 32K 上下文" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 +msgid "Good at handling programming tasks, supports 16K contexts" +msgstr "擅长处理编程任务,支持 16K 上下文" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 +msgid "Latest Gemini 1.0 Pro model, updated with Google update" +msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 +msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" +msgstr "最新的Gemini 1.0 Pro Vision模型,随Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 +msgid "Latest Gemini 1.5 Flash model, updated with Google updates" +msgstr "最新的Gemini 1.5 Flash模型,随Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53 +msgid "convert audio to text" +msgstr "将音频转换为文本" + +#: models_provider/impl/local_model_provider/credential/embedding.py:53 +#: models_provider/impl/local_model_provider/credential/reranker.py:54 +msgid "Model catalog" +msgstr "模型目录" + +#: models_provider/impl/local_model_provider/local_model_provider.py:39 +msgid "local model" +msgstr "本地模型" + +#: models_provider/impl/ollama_model_provider/credential/embedding.py:30 +#: models_provider/impl/ollama_model_provider/credential/image.py:43 +#: models_provider/impl/ollama_model_provider/credential/llm.py:48 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:35 +#: models_provider/impl/vllm_model_provider/credential/llm.py:43 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:24 +#: models_provider/impl/xinference_model_provider/credential/llm.py:44 +msgid "API domain name is invalid" +msgstr "API 域名无效" + +#: models_provider/impl/ollama_model_provider/credential/embedding.py:35 +#: models_provider/impl/ollama_model_provider/credential/image.py:48 +#: models_provider/impl/ollama_model_provider/credential/llm.py:53 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:40 +#: models_provider/impl/vllm_model_provider/credential/llm.py:47 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:30 +#: models_provider/impl/xinference_model_provider/credential/llm.py:48 +msgid "The model does not exist, please download the model first" +msgstr "模型不存在,请先下载模型" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 7B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。" +"这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 13B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。" +"这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 70B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。" +"这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 +msgid "" +"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " +"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " +"that it has strong Chinese conversation capabilities." +msgstr "" +"由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-" +"chat-hf进行LoRA微调,使其具备较强的中文对话能力。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 8 billion " +"parameters." +msgstr "Meta Llama 3:迄今为止最有能力的公开产品LLM。80亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 70 billion " +"parameters." +msgstr "Meta Llama 3:迄今为止最有能力的公开产品LLM。700亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 +msgid "" +"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 500 million parameters." +msgstr "" +"qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有" +"显著增强。所有规模的模型都支持32768个tokens的上下文长度。5亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 +msgid "" +"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 1.8 billion parameters." +msgstr "" +"qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有" +"显著增强。所有规模的模型都支持32768个tokens的上下文长度。18亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 +msgid "" +"Compared with previous versions, qwen 1.5 4b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"4 billion parameters." +msgstr "" +"qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。40亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 +msgid "" +"Compared with previous versions, qwen 1.5 7b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"7 billion parameters." +msgstr "" +"qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。70亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 +msgid "" +"Compared with previous versions, qwen 1.5 14b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"14 billion parameters." +msgstr "" +"qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。140亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 +msgid "" +"Compared with previous versions, qwen 1.5 32b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"32 billion parameters." +msgstr "" +"qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。320亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 +msgid "" +"Compared with previous versions, qwen 1.5 72b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"72 billion parameters." +msgstr "" +"qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显" +"著增强。所有规模的模型都支持32768个tokens的上下文长度。720亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 +msgid "" +"Compared with previous versions, qwen 1.5 110b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 110 billion parameters." +msgstr "" +"qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有" +"显著增强。所有规模的模型都支持32768个tokens的上下文长度。1100亿参数。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 +msgid "" +"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " +"model." +msgstr "Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 +msgid "" +"A high-performance open embedding model with a large token context window." +msgstr "一个具有大 tokens上下文窗口的高性能开放嵌入模型。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " +"or 1792x1024 pixels." +msgstr "" +"图像生成端点允许您根据文本提示创建原始图像。使用 DALL·E 3 时,图像的尺寸可以" +"为 1024x1024、1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +msgid "" +" \n" +"By default, images are produced in standard quality, but with DALL·E 3 you " +"can set quality: \"hd\" to enhance detail. Square, standard quality images " +"are generated fastest.\n" +" " +msgstr "" +"默认情况下,图像以标准质量生成,但使用 DALL·E 3 时,您可以设置质量:“hd”以增" +"强细节。方形、标准质量的图像生成速度最快。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 +msgid "" +"You can use DALL·E 3 to request 1 image at a time (requesting more images by " +"issuing parallel requests), or use DALL·E 2 with the n parameter to request " +"up to 10 images at a time." +msgstr "" +"您可以使用 DALL·E 3 一次请求 1 个图像(通过发出并行请求来请求更多图像),或者" +"使用带有 n 参数的 DALL·E 2 一次最多请求 10 个图像。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111 +msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-3.5-turbo,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38 +msgid "Latest gpt-4, updated with OpenAI adjustments" +msgstr "最新的gpt-4,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99 +msgid "" +"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " +"adjustments" +msgstr "最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102 +msgid "" +"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " +"adjustments" +msgstr "最新的gpt-4o-mini,比gpt-4o更便宜、更快,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46 +msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49 +msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo-preview,随OpenAI调整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53 +msgid "" +"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " +"tokens" +msgstr "2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57 +msgid "" +"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " +"tokens" +msgstr "2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61 +msgid "" +"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " +"13, 2024" +msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65 +msgid "" +"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" +msgstr "2024年5月13日的gpt-4o快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69 +msgid "" +"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72 +msgid "" +"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75 +msgid "" +"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " +"tokens" +msgstr "2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens" + +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63 +msgid "Tongyi Qianwen" +msgstr "通义千问" + +#: models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 +msgid "Tencent Cloud" +msgstr "腾讯云" + +#: models_provider/impl/tencent_model_provider/credential/llm.py:41 +#: models_provider/impl/tencent_model_provider/credential/tti.py:88 +#, python-brace-format +msgid "{keys} is required" +msgstr "{keys} 是必填项" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "painting style" +msgstr "绘画风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "If not passed, the default value is 201 (Japanese anime style)" +msgstr "如果未传递,则默认值为201(日本动漫风格)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18 +msgid "Not limited to style" +msgstr "不限于风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19 +msgid "ink painting" +msgstr "水墨画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20 +msgid "concept art" +msgstr "概念艺术" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21 +msgid "Oil painting 1" +msgstr "油画1" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22 +msgid "Oil Painting 2 (Van Gogh)" +msgstr "油画2(梵高)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23 +msgid "watercolor painting" +msgstr "水彩画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24 +msgid "pixel art" +msgstr "像素画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25 +msgid "impasto style" +msgstr "厚涂风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26 +msgid "illustration" +msgstr "插图" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27 +msgid "paper cut style" +msgstr "剪纸风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28 +msgid "Impressionism 1 (Monet)" +msgstr "印象派1(莫奈)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29 +msgid "Impressionism 2" +msgstr "印象派2" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31 +msgid "classical portraiture" +msgstr "古典肖像画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32 +msgid "black and white sketch" +msgstr "黑白素描画" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33 +msgid "cyberpunk" +msgstr "赛博朋克" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34 +msgid "science fiction style" +msgstr "科幻风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35 +msgid "dark style" +msgstr "暗黑风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37 +msgid "vaporwave" +msgstr "蒸汽波" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38 +msgid "Japanese animation" +msgstr "日系动漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39 +msgid "monster style" +msgstr "怪兽风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40 +msgid "Beautiful ancient style" +msgstr "唯美古风" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41 +msgid "retro anime" +msgstr "复古动漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42 +msgid "Game cartoon hand drawing" +msgstr "游戏卡通手绘" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43 +msgid "Universal realistic style" +msgstr "通用写实风格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "Generate image resolution" +msgstr "生成图像分辨率" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "If not transmitted, the default value is 768:768." +msgstr "不传默认使用768:768。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 +msgid "" +"The most effective version of the current hybrid model, the trillion-level " +"parameter scale MOE-32K long article model. Reaching the absolute leading " +"level on various benchmarks, with complex instructions and reasoning, " +"complex mathematical capabilities, support for function call, and " +"application focus optimization in fields such as multi-language translation, " +"finance, law, and medical care" +msgstr "" +"当前混元模型中效果最优版本,万亿级参数规模 MOE-32K 长文模型。在各种 " +"benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 " +"functioncall,在多语言翻译、金融法律医疗等领域应用重点优化" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 +msgid "" +"A better routing strategy is adopted to simultaneously alleviate the " +"problems of load balancing and expert convergence. For long articles, the " +"needle-in-a-haystack index reaches 99.9%" +msgstr "" +"采用更优的路由策略,同时缓解了负载均衡和专家趋同的问题。长文方面,大海捞针指" +"标达到99.9%" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 +msgid "" +"Upgraded to MOE structure, the context window is 256k, leading many open " +"source models in multiple evaluation sets such as NLP, code, mathematics, " +"industry, etc." +msgstr "" +"升级为 MOE 结构,上下文窗口为 256k ,在 NLP,代码,数学,行业等多项评测集上领" +"先众多开源模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 +msgid "" +"Hunyuan's latest version of the role-playing model, a role-playing model " +"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " +"model combined with the role-playing scene data set for additional training, " +"and has better basic effects in role-playing scenes." +msgstr "" +"混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合" +"角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 +msgid "" +"Hunyuan's latest MOE architecture FunctionCall model has been trained with " +"high-quality FunctionCall data and has a context window of 32K, leading in " +"multiple dimensions of evaluation indicators." +msgstr "" +"混元最新 MOE 架构 FunctionCall 模型,经过高质量的 FunctionCall 数据训练,上下" +"文窗口达 32K,在多个维度的评测指标上处于领先。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 +msgid "" +"Hunyuan's latest code generation model, after training the base model with " +"200B high-quality code data, and iterating on high-quality SFT data for half " +"a year, the context long window length has been increased to 8K, and it " +"ranks among the top in the automatic evaluation indicators of code " +"generation in the five major languages; the five major languages In the " +"manual high-quality evaluation of 10 comprehensive code tasks that consider " +"all aspects, the performance is in the first echelon." +msgstr "" +"混元最新代码生成模型,经过 200B 高质量代码数据增训基座模型,迭代半年高质量 " +"SFT 数据训练,上下文长窗口长度增大到 8K,五大语言代码生成自动评测指标上位居前" +"列;五大语言10项考量各方面综合代码任务人工高质量评测上,性能处于第一梯队" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 +msgid "" +"Tencent's Hunyuan Embedding interface can convert text into high-quality " +"vector data. The vector dimension is 1024 dimensions." +msgstr "" +"腾讯混元 Embedding 接口,可以将文本转化为高质量的向量数据。向量维度为1024维。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 +msgid "Mixed element visual model" +msgstr "混元视觉模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 +msgid "Hunyuan graph model" +msgstr "混元生图模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 +msgid "Tencent Hunyuan" +msgstr "腾讯混元" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 +msgid "Facebook’s 125M parameter model" +msgstr "Facebook的125M参数模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 +msgid "BAAI’s 7B parameter model" +msgstr "BAAI的7B参数模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 +msgid "BAAI’s 13B parameter mode" +msgstr "BAAI的13B参数模型" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 +msgid "" +"If the gap between width, height and 512 is too large, the picture rendering " +"effect will be poor and the probability of excessive delay will increase " +"significantly. Recommended ratio and corresponding width and height before " +"super score: width*height" +msgstr "" +"宽、高与512差距过大,则出图效果不佳、延迟过长概率显著增加。超分前建议比例及对" +"应宽高:width*height" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29 +msgid "Universal female voice" +msgstr "通用女声" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25 +msgid "Supernatural timbre-ZiZi 2.0" +msgstr "超自然音色-梓梓2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26 +msgid "Supernatural timbre-ZiZi" +msgstr "超自然音色-梓梓" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27 +msgid "Supernatural sound-Ranran 2.0" +msgstr "超自然音色-燃燃2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28 +msgid "Supernatural sound-Ranran" +msgstr "超自然音色-燃燃" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30 +msgid "Universal male voice" +msgstr "通用男声" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +msgid "[0.2,3], the default is 1, usually one decimal place is enough" +msgstr "[0.2,3],默认为1,通常保留一位小数即可" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 +msgid "" +"The user goes to the model inference page of Volcano Ark to create an " +"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " +"it." +msgstr "" +"用户前往火山方舟的模型推理页面创建推理接入点,这里需要输入ep-xxxxxxxxxx-yyyy" +"进行调用" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 +msgid "Universal 2.0-Vincent Diagram" +msgstr "通用2.0-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 +msgid "Universal 2.0Pro-Vincent Chart" +msgstr "通用2.0Pro-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 +msgid "Universal 1.4-Vincent Chart" +msgstr "通用1.4-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 +msgid "Animation 1.3.0-Vincent Picture" +msgstr "动漫1.3.0-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 +msgid "Animation 1.3.1-Vincent Picture" +msgstr "动漫1.3.1-文生图" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 +msgid "volcano engine" +msgstr "火山引擎" + + +#: models_provider/impl/wenxin_model_provider/credential/llm.py:51 +#, python-brace-format +msgid "{model_name} The model does not support" +msgstr "{model_name} 模型不支持" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 +msgid "" +"ERNIE-Bot-4 is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、" +"内容创作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 +msgid "" +"ERNIE-Bot is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内" +"容创作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 +msgid "" +"ERNIE-Bot-turbo is a large language model independently developed by Baidu. " +"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " +"content creation and generation, and has a faster response speed." +msgstr "" +"ERNIE-Bot-turbo是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问" +"答、内容创作生成等能力,响应速度更快。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 +msgid "" +"BLOOMZ-7B is a well-known large language model in the industry. It was " +"developed and open sourced by BigScience and can output text in 46 languages " +"and 13 programming languages." +msgstr "" +"BLOOMZ-7B是业内知名的大语言模型,由BigScience研发并开源,能够以46种语言和13种" +"编程语言输出文本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 +msgid "" +"Llama-2-13b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning and knowledge application. " +"Llama-2-13b-chat is a native open source version with balanced performance " +"and effect, suitable for conversation scenarios." +msgstr "" +"Llama-2-13b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀," +"Llama-2-13b-chat是性能与效果均衡的原生开源版本,适用于对话场景。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 +msgid "" +"Llama-2-70b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning, and knowledge application. " +"Llama-2-70b-chat is a native open source version with high-precision effects." +msgstr "" +"Llama-2-70b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀," +"Llama-2-70b-chat是高精度效果的原生开源版本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 +msgid "" +"The Chinese enhanced version developed by the Qianfan team based on " +"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" +"EVAL." +msgstr "" +"千帆团队在Llama-2-7b基础上的中文增强版本,在CMMLU、C-EVAL等中文知识库上表现优" +"异。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 +msgid "" +"Embedding-V1 is a text representation model based on Baidu Wenxin large " +"model technology. It can convert text into a vector form represented by " +"numerical values and can be used in text retrieval, information " +"recommendation, knowledge mining and other scenarios. Embedding-V1 provides " +"the Embeddings interface, which can generate corresponding vector " +"representations based on input content. You can call this interface to input " +"text into the model and obtain the corresponding vector representation for " +"subsequent text processing and analysis." +msgstr "" +"Embedding-V1是一个基于百度文心大模型技术的文本表示模型,可以将文本转化为用数" +"值表示的向量形式,用于文本检索、信息推荐、知识挖掘等场景。 Embedding-V1提供了" +"Embeddings接口,可以根据输入内容生成对应的向量表示。您可以通过调用该接口,将" +"文本输入到模型中,获取到对应的向量表示,从而进行后续的文本处理和分析。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 +msgid "Thousand sails large model" +msgstr "千帆大模型" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42 +msgid "Please outline this picture" +msgstr "请描述这张图片" + + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15 +msgid "Speaker" +msgstr "发音人" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16 +msgid "" +"Speaker, optional value: Please go to the console to add a trial or purchase " +"speaker. After adding, the speaker parameter value will be displayed." +msgstr "" +"发音人,可选值:请到控制台添加试用或购买发音人,添加后即显示发音人参数值" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21 +msgid "iFlytek Xiaoyan" +msgstr "讯飞小燕" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22 +msgid "iFlytek Xujiu" +msgstr "讯飞许久" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23 +msgid "iFlytek Xiaoping" +msgstr "讯飞小萍" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24 +msgid "iFlytek Xiaojing" +msgstr "讯飞小婧" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25 +msgid "iFlytek Xuxiaobao" +msgstr "讯飞许小宝" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "Speech speed, optional value: [0-100], default is 50" +msgstr "语速,可选值:[0-100],默认为50" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39 +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50 +msgid "Chinese and English recognition" +msgstr "中英文识别" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66 +msgid "iFlytek Spark" +msgstr "讯飞星火" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " +"1792x1024 pixels." +msgstr "" +"图像生成端点允许您根据文本提示创建原始图像。图像的尺寸可以为 1024x1024、" +"1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29 +msgid "" +"By default, images are generated in standard quality, you can set quality: " +"\"hd\" to enhance detail. Square, standard quality images are generated " +"fastest." +msgstr "" +"默认情况下,图像以标准质量生成,您可以设置质量:“hd”以增强细节。方形、标准质" +"量的图像生成速度最快。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42 +msgid "" +"You can request 1 image at a time (requesting more images by making parallel " +"requests), or up to 10 images at a time using the n parameter." +msgstr "" +"您可以一次请求 1 个图像(通过发出并行请求来请求更多图像),或者使用 n 参数一" +"次最多请求 10 个图像。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20 +msgid "Chinese female" +msgstr "中文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21 +msgid "Chinese male" +msgstr "中文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22 +msgid "Japanese male" +msgstr "日语男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23 +msgid "Cantonese female" +msgstr "粤语女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24 +msgid "English female" +msgstr "英文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25 +msgid "English male" +msgstr "英文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26 +msgid "Korean female" +msgstr "韩语女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 +msgid "" +"Code Llama is a language model specifically designed for code generation." +msgstr "Code Llama 是一个专门用于代码生成的语言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 +msgid "" +" \n" +"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " +"designed to perform specific tasks.\n" +" " +msgstr "" +"Code Llama Instruct 是 Code Llama 的指令微调版本,专为执行特定任务而设计。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 +msgid "" +"Code Llama Python is a language model specifically designed for Python code " +"generation." +msgstr "Code Llama Python 是一个专门用于 Python 代码生成的语言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 +msgid "" +"CodeQwen 1.5 is a language model for code generation with high performance." +msgstr "CodeQwen 1.5 是一个用于代码生成的语言模型,具有较高的性能。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 +msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." +msgstr "CodeQwen 1.5 Chat 是一个聊天模型版本的 CodeQwen 1.5。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 +msgid "Deepseek is a large-scale language model with 13 billion parameters." +msgstr "Deepseek Chat 是一个聊天模型版本的 Deepseek。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16 +msgid "" +"Image size, only cogview-3-plus supports this parameter. Optional range: " +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " +"default is 1024x1024." +msgstr "" +"图片尺寸,仅 cogview-3-plus 支持该参数。可选范围:" +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默认是" +"1024x1024。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 +msgid "" +"Have strong multi-modal understanding capabilities. Able to understand up to " +"five images simultaneously and supports video content understanding" +msgstr "具有强大的多模态理解能力。能够同时理解多达五张图像,并支持视频内容理解" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis" +msgstr "专注于单图理解。适用于需要高效图像解析的场景" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis (free)" +msgstr "专注于单图理解。适用于需要高效图像解析的场景(免费)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 +msgid "" +"Quickly and accurately generate images based on user text descriptions. " +"Resolution supports 1024x1024" +msgstr "根据用户文字描述快速、精准生成图像。分辨率支持1024x1024" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes" +msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes (free)" +msgstr "根据用户文字描述生成高质量图像,支持多图片尺寸(免费)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 +msgid "zhipu AI" +msgstr "智谱 AI" + +#: models_provider/serializers/model_serializer.py:43 +#: models_provider/serializers/model_serializer.py:222 +#: models_provider/serializers/model_serializer.py:259 +#: models_provider/serializers/model_serializer.py:323 +msgid "base model" +msgstr "基础模型" + +#: models_provider/serializers/model_serializer.py:44 +#: models_provider/serializers/model_serializer.py:260 +msgid "parameter configuration" +msgstr "参数配置" + +#: models_provider/serializers/model_serializer.py:45 +#: models_provider/serializers/model_serializer.py:225 +#: models_provider/serializers/model_serializer.py:261 +#, fuzzy +#| msgid "Get current user information" +msgid "certification information" +msgstr "获取当前用户信息" + +#: models_provider/serializers/model_serializer.py:108 +#: models_provider/serializers/model_serializer.py:215 +#: models_provider/serializers/model_serializer.py:255 +#: modules/serializers/module.py:35 tools/serializers/tool.py:52 +msgid "user id" +msgstr "用户ID" + +#: models_provider/serializers/model_serializer.py:116 +#: models_provider/serializers/model_serializer.py:132 +#: models_provider/serializers/model_serializer.py:151 +#: models_provider/serializers/model_serializer.py:178 +#: models_provider/serializers/model_serializer.py:371 +#: models_provider/tools.py:111 +msgid "Model does not exist" +msgstr "模型不存在" + +#: models_provider/serializers/model_serializer.py:233 +#: models_provider/serializers/model_serializer.py:272 +#, python-brace-format +msgid "base model【{model_name}】already exists" +msgstr "模型【{model_name}】已存在" + +#: models_provider/serializers/model_serializer.py:312 +msgid "Model saving failed" +msgstr "模型保存失败" + +#: models_provider/serializers/model_serializer.py:325 +msgid "create user" +msgstr "创建者" + +#: models_provider/views/model.py:28 models_provider/views/model.py:29 +msgid "Create model" +msgstr "创建模型" + +#: models_provider/views/model.py:30 models_provider/views/model.py:57 +#: models_provider/views/model.py:74 models_provider/views/model.py:85 +#: models_provider/views/model.py:96 models_provider/views/model.py:110 +#: models_provider/views/model.py:121 models_provider/views/model.py:137 +#: models_provider/views/model.py:150 models_provider/views/provide.py:24 +#: models_provider/views/provide.py:47 models_provider/views/provide.py:61 +#: models_provider/views/provide.py:79 models_provider/views/provide.py:96 +msgid "Model" +msgstr "模型" + +#: models_provider/views/model.py:53 models_provider/views/model.py:54 +msgid "Query model list" +msgstr "查询模型列表" + +#: models_provider/views/model.py:69 models_provider/views/model.py:70 +msgid "Update model" +msgstr "更新模型" + +#: models_provider/views/model.py:82 models_provider/views/model.py:83 +msgid "Delete model" +msgstr "删除模型" + +#: models_provider/views/model.py:92 models_provider/views/model.py:93 +msgid "Query model details" +msgstr "查询模型详情" + +#: models_provider/views/model.py:106 models_provider/views/model.py:107 +msgid "Get model parameter form" +msgstr "获取模型参数表单" + +#: models_provider/views/model.py:117 models_provider/views/model.py:118 +msgid "Save model parameter form" +msgstr "保存模型参数表单" + +#: models_provider/views/model.py:132 models_provider/views/model.py:134 +msgid "" +"Query model meta information, this interface does not carry authentication " +"information" +msgstr "查询模型元信息,该接口不携带认证信息" + +#: models_provider/views/model.py:147 models_provider/views/model.py:148 +msgid "Pause model download" +msgstr "下载模型暂停" + +#: models_provider/views/provide.py:21 models_provider/views/provide.py:22 +msgid "Get a list of model suppliers" +msgstr "获取模型供应商列表" + +#: models_provider/views/provide.py:43 models_provider/views/provide.py:44 +msgid "Get a list of model types" +msgstr "获取模型类型列表" + +#: models_provider/views/provide.py:57 models_provider/views/provide.py:58 +msgid "Example of obtaining model list" +msgstr "获取模型列表示例" + +#: models_provider/views/provide.py:75 +msgid "Get model default parameters" +msgstr "获取模型默认参数" + +#: models_provider/views/provide.py:76 models_provider/views/provide.py:92 +#: models_provider/views/provide.py:93 +msgid "Get the model creation form" +msgstr "获取模型创建表单" + +#: modules/models/module.py:6 modules/models/module.py:13 +#: modules/serializers/module.py:29 +msgid "module name" +msgstr "模块名称" + +#: modules/models/module.py:9 modules/serializers/module.py:32 +msgid "parent id" +msgstr "父级 ID" + +#: modules/serializers/module.py:28 modules/serializers/module.py:62 +msgid "module id" +msgstr "模块 ID" + +#: modules/serializers/module.py:30 +msgid "module user id" +msgstr "模块用户 ID" + +#: modules/serializers/module.py:36 modules/serializers/module.py:64 +#: modules/serializers/module.py:96 tools/serializers/tool.py:27 +msgid "source" +msgstr "来源" + +#: modules/serializers/module.py:49 +msgid "Module name already exists" +msgstr "模块名称已存在" + +#: modules/serializers/module.py:70 +msgid "Module does not exist" +msgstr "模块不存在" + +#: modules/serializers/module.py:89 +msgid "Cannot delete root module" +msgstr "无法删除根模块" + +#: modules/views/module.py:19 modules/views/module.py:20 +msgid "Create module" +msgstr "创建模块" + +#: modules/views/module.py:24 modules/views/module.py:43 +#: modules/views/module.py:56 modules/views/module.py:68 +#: modules/views/module.py:85 +msgid "Module" +msgstr "模块" + +#: modules/views/module.py:38 modules/views/module.py:39 +msgid "Update module" +msgstr "更新模块" + +#: modules/views/module.py:52 modules/views/module.py:53 +msgid "Get module" +msgstr "获取模块" + +#: modules/views/module.py:65 modules/views/module.py:66 +msgid "Delete module" +msgstr "删除模块" + +#: modules/views/module.py:81 modules/views/module.py:82 +msgid "Get module tree" +msgstr "获取模块树" + +#: tools/serializers/tool.py:21 +msgid "variable name" +msgstr "变量名称" + +#: tools/serializers/tool.py:23 +msgid "type" +msgstr "类型" + +#: tools/serializers/tool.py:25 +msgid "fields only support string|int|dict|array|float" +msgstr "字段仅支持字符串|整数|字典|数组|浮点数" + +#: tools/serializers/tool.py:29 +msgid "The field only supports custom|reference" +msgstr "字段仅支持自定义|引用" + +#: tools/serializers/tool.py:34 +msgid "tool name" +msgstr "工具名称" + +#: tools/serializers/tool.py:37 +msgid "tool description" +msgstr "工具描述" + +#: tools/serializers/tool.py:39 +msgid "tool content" +msgstr "工具内容" + +#: tools/serializers/tool.py:41 +msgid "input field list" +msgstr "输入字段列表" + +#: tools/serializers/tool.py:43 +msgid "init field list" +msgstr "内置字段列表" + +#: tools/serializers/tool.py:45 +msgid "Is active" +msgstr "是否启用" + +#: tools/views/tool.py:18 tools/views/tool.py:19 +msgid "Create tool" +msgstr "创建工具" + +#: tools/views/tool.py:22 +msgid "Tool" +msgstr "工具" + +#: users/serializers/login.py:27 msgid "Username" msgstr "用户名" -#: .\apps\users\serializers\login.py:24 +#: users/serializers/login.py:28 msgid "Password" msgstr "密码" -#: .\apps\users\serializers\login.py:31 -msgid "token" -msgstr "令牌" - -#: .\apps\users\serializers\login.py:43 -msgid "The username or password is incorrect" -msgstr "用户名或密码不正确" - -#: .\apps\users\serializers\login.py:45 -msgid "The user has been disabled, please contact the administrator!" -msgstr "用户已被禁用,请联系管理员!" - -#: .\apps\users\views\login.py:21 .\apps\users\views\login.py:22 -msgid "Log in" -msgstr "登录" - -#: .\apps\users\views\login.py:23 .\apps\users\views\user.py:26 -msgid "User management" -msgstr "用户管理" - -#: .\apps\users\views\user.py:24 .\apps\users\views\user.py:25 -msgid "Get current user information" -msgstr "获取当前用户信息" - -msgid "Get captcha" -msgstr "获取验证码" - +#: users/serializers/login.py:29 users/serializers/login.py:69 msgid "captcha" msgstr "验证码" +#: users/serializers/login.py:36 +msgid "token" +msgstr "令牌" + +#: users/serializers/login.py:50 msgid "Captcha code error or expiration" msgstr "验证码错误或过期" + +#: users/serializers/login.py:53 +msgid "The username or password is incorrect" +msgstr "用户名或密码不正确" + +#: users/serializers/login.py:55 +msgid "The user has been disabled, please contact the administrator!" +msgstr "用户已被禁用,请联系管理员!" + +#: users/views/login.py:21 users/views/login.py:22 +msgid "Log in" +msgstr "登录" + +#: users/views/login.py:23 users/views/login.py:34 users/views/user.py:28 +#: users/views/user.py:40 users/views/user.py:53 +msgid "User management" +msgstr "用户管理" + +#: users/views/login.py:32 users/views/login.py:33 +msgid "Get captcha" +msgstr "获取验证码" + +#: users/views/user.py:26 users/views/user.py:27 users/views/user.py:38 +msgid "Get current user information" +msgstr "获取当前用户信息" + +#~ msgid "ADMIN" +#~ msgstr "管理员" + +#~ msgid "Super administrator" +#~ msgstr "超级管理员" diff --git a/apps/locales/zh_Hant/LC_MESSAGES/django.po b/apps/locales/zh_Hant/LC_MESSAGES/django.po index 43014ac07..cc8144120 100644 --- a/apps/locales/zh_Hant/LC_MESSAGES/django.po +++ b/apps/locales/zh_Hant/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2025-04-14 20:02+0800\n" +"POT-Creation-Date: 2025-04-18 17:04+0800\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -16,98 +16,2195 @@ msgstr "" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" -"Plural-Forms: nplurals=1; plural=0;\n" -#: .\apps\common\auth\authenticate.py:63 .\apps\common\auth\authenticate.py:84 + +#: common/auth/authenticate.py:80 msgid "Not logged in, please log in first" msgstr "未登錄,請先登錄" -#: .\apps\common\auth\authenticate.py:69 .\apps\common\auth\authenticate.py:75 -#: .\apps\common\auth\authenticate.py:90 .\apps\common\auth\authenticate.py:96 +#: common/auth/authenticate.py:82 common/auth/authenticate.py:89 +#: common/auth/authenticate.py:95 msgid "Authentication information is incorrect! illegal user" -msgstr "身份驗證資訊不正確! 非法用戶" +msgstr "身份驗證信息不正確!非法用戶" -#: .\apps\common\auth\handle\impl\user_token.py:30 +#: common/auth/authentication.py:96 +msgid "No permission to access" +msgstr "無權限訪問" + +#: common/auth/handle/impl/user_token.py:157 msgid "Login expired" -msgstr "登入已過期" +msgstr "登錄已過期" -#: .\apps\common\constants\permission_constants.py:46 -msgid "ADMIN" -msgstr "管理員" - -#: .\apps\common\constants\permission_constants.py:46 -msgid "Super administrator" -msgstr "超級管理員" - -#: .\apps\common\exception\handle_exception.py:32 +#: common/exception/handle_exception.py:32 msgid "Unknown exception" msgstr "未知錯誤" -#: .\apps\common\result\api.py:17 .\apps\common\result\api.py:27 +#: common/forms/base_field.py:64 +#, python-brace-format +msgid "The field {field_label} is required" +msgstr "{field_label} 欄位是必填項" + +#: common/forms/slider_field.py:56 +#, python-brace-format +msgid "The {field_label} cannot be less than {min}" +msgstr "{field_label} 不能小於{min}" + +#: common/forms/slider_field.py:62 +#, python-brace-format +msgid "The {field_label} cannot be greater than {max}" +msgstr "{field_label} 不能大於{max}" + +#: common/result/api.py:17 common/result/api.py:27 msgid "response code" msgstr "響應碼" -#: .\apps\common\result\api.py:18 .\apps\common\result\api.py:19 -#: .\apps\common\result\api.py:28 .\apps\common\result\api.py:29 +#: common/result/api.py:18 common/result/api.py:19 common/result/api.py:28 +#: common/result/api.py:29 msgid "error prompt" msgstr "錯誤提示" -#: .\apps\common\result\api.py:43 +#: common/result/api.py:43 msgid "total number of data" msgstr "總數據" -#: .\apps\common\result\api.py:44 +#: common/result/api.py:44 msgid "current page" msgstr "當前頁" -#: .\apps\common\result\api.py:45 +#: common/result/api.py:45 msgid "page size" msgstr "每頁大小" -#: .\apps\common\result\result.py:31 +#: common/result/result.py:31 msgid "Success" msgstr "成功" -#: .\apps\maxkb\settings\base.py:80 -msgid "Intelligent customer service platform" -msgstr "智慧客服平臺" +#: common/utils/common.py:83 +msgid "Text-to-speech node, the text content must be of string type" +msgstr "文本轉語音節點,文本內容必須是字符串類型" -#: .\apps\users\serializers\login.py:23 +#: common/utils/common.py:85 +msgid "Text-to-speech node, the text content cannot be empty" +msgstr "文本轉語音節點,文本內容不能為空" + +#: maxkb/settings/base.py:83 +msgid "Intelligent customer service platform" +msgstr "智能客服平臺" + +#: models_provider/api/model.py:36 models_provider/api/model.py:49 +#: models_provider/serializers/model_serializer.py:262 +#: models_provider/serializers/model_serializer.py:326 +#: modules/serializers/module.py:31 modules/serializers/module.py:63 +#: modules/serializers/module.py:95 +msgid "workspace id" +msgstr "工作空間ID" + +#: models_provider/api/model.py:55 +#: models_provider/serializers/model_serializer.py:107 +#: models_provider/serializers/model_serializer.py:365 +msgid "model id" +msgstr "模型ID" + +#: models_provider/api/provide.py:17 models_provider/api/provide.py:23 +#: models_provider/api/provide.py:28 models_provider/api/provide.py:30 +#: models_provider/api/provide.py:67 +#: models_provider/serializers/model_serializer.py:40 +#: models_provider/serializers/model_serializer.py:218 +#: models_provider/serializers/model_serializer.py:256 +#: models_provider/serializers/model_serializer.py:321 +msgid "model name" +msgstr "模型名稱" + +#: models_provider/api/provide.py:18 models_provider/api/provide.py:38 +#: models_provider/api/provide.py:61 models_provider/api/provide.py:89 +#: models_provider/api/provide.py:111 +#: models_provider/serializers/model_serializer.py:41 +#: models_provider/serializers/model_serializer.py:257 +#: models_provider/serializers/model_serializer.py:324 +msgid "provider" +msgstr "供應商" + +#: models_provider/api/provide.py:19 +msgid "icon" +msgstr "" + +#: models_provider/api/provide.py:24 +msgid "value" +msgstr "值" + +#: models_provider/api/provide.py:29 models_provider/api/provide.py:55 +#: models_provider/api/provide.py:83 +#: models_provider/serializers/model_serializer.py:42 +#: models_provider/serializers/model_serializer.py:220 +#: models_provider/serializers/model_serializer.py:258 +#: models_provider/serializers/model_serializer.py:322 +msgid "model type" +msgstr "模型類型" + +#: models_provider/api/provide.py:34 +msgid "input type" +msgstr "輸入類型" + +#: models_provider/api/provide.py:35 +msgid "label" +msgstr "標籤" + +#: models_provider/api/provide.py:36 +msgid "text field" +msgstr "文本欄位" + +#: models_provider/api/provide.py:37 +msgid "value field" +msgstr "值" + +#: models_provider/api/provide.py:39 +msgid "method" +msgstr "方法" + +#: models_provider/api/provide.py:40 tools/serializers/tool.py:22 +msgid "required" +msgstr "必填" + +#: models_provider/api/provide.py:41 +msgid "default value" +msgstr "默認值" + +#: models_provider/api/provide.py:42 +msgid "relation show field dict" +msgstr "關係顯示欄位" + +#: models_provider/api/provide.py:43 +msgid "relation trigger field dict" +msgstr "關係觸發欄位" + +#: models_provider/api/provide.py:44 +msgid "trigger type" +msgstr "觸發類型" + +#: models_provider/api/provide.py:45 +msgid "attrs" +msgstr "屬性" + +#: models_provider/api/provide.py:46 +msgid "props info" +msgstr "props 信息" + +#: models_provider/base_model_provider.py:60 +msgid "Model type cannot be empty" +msgstr "模型類型不能為空" + +#: models_provider/base_model_provider.py:85 +msgid "The current platform does not support downloading models" +msgstr "當前平臺不支持下載模型" + +#: models_provider/base_model_provider.py:140 +msgid "LLM" +msgstr "大語言模型" + +#: models_provider/base_model_provider.py:141 +msgid "Embedding Model" +msgstr "向量模型" + +#: models_provider/base_model_provider.py:142 +msgid "Speech2Text" +msgstr "語音識別" + +#: models_provider/base_model_provider.py:143 +msgid "TTS" +msgstr "語音合成" + +#: models_provider/base_model_provider.py:144 +msgid "Vision Model" +msgstr "視覺模型" + +#: models_provider/base_model_provider.py:145 +msgid "Image Generation" +msgstr "圖片生成" + +#: models_provider/base_model_provider.py:146 +msgid "Rerank" +msgstr "重排模型" + +#: models_provider/base_model_provider.py:220 +msgid "The model does not support" +msgstr "模型不支持" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:42 +msgid "" +"With the GTE-Rerank text sorting series model developed by Alibaba Tongyi " +"Lab, developers can integrate high-quality text retrieval and sorting " +"through the LlamaIndex framework." +msgstr "" +"阿里巴巴通義實驗室開發的GTE-Rerank文本排序系列模型,開發者可以通過LlamaIndex" +"框架進行集成高質量文本檢索、排序。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:45 +msgid "" +"Chinese (including various dialects such as Cantonese), English, Japanese, " +"and Korean support free switching between multiple languages." +msgstr "中文(含粵語等各種方言)、英文、日語、韓語支持多個語種自由切換" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:48 +msgid "" +"CosyVoice is based on a new generation of large generative speech models, " +"which can predict emotions, intonation, rhythm, etc. based on context, and " +"has better anthropomorphic effects." +msgstr "" +"CosyVoice基於新一代生成式語音大模型,能根據上下文預測情緒、語調、韻律等,具有" +"更好的擬人效果" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:51 +msgid "" +"Universal text vector is Tongyi Lab's multi-language text unified vector " +"model based on the LLM base. It provides high-level vector services for " +"multiple mainstream languages around the world and helps developers quickly " +"convert text data into high-quality vector data." +msgstr "" +"通用文本向量,是通義實驗室基於LLM底座的多語言文本統一向量模型,面向全球多個主" +"流語種,提供高水準的向量服務,幫助開發者將文本數據快速轉換為高質量的向量數" +"據。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:69 +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:40 +msgid "" +"Tongyi Wanxiang - a large image model for text generation, supports " +"bilingual input in Chinese and English, and supports the input of reference " +"pictures for reference content or reference style migration. Key styles " +"include but are not limited to watercolor, oil painting, Chinese painting, " +"sketch, flat illustration, two-dimensional, and 3D. Cartoon." +msgstr "" +"通義萬相-文本生成圖像大模型,支持中英文雙語輸入,支持輸入參考圖片進行參考內容" +"或者參考風格遷移,重點風格包括但不限於水彩、油畫、中國畫、素描、扁平插畫、二" +"次元、3D卡通。" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py:95 +msgid "Alibaba Cloud Bailian" +msgstr "阿里雲百鍊" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py:53 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:50 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:74 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:61 +#: models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py:43 +#: models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py:37 +#: models_provider/impl/anthropic_model_provider/credential/image.py:33 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:57 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:34 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:53 +#: models_provider/impl/azure_model_provider/credential/embedding.py:37 +#: models_provider/impl/azure_model_provider/credential/image.py:55 +#: models_provider/impl/azure_model_provider/credential/llm.py:69 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:57 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:36 +#: models_provider/impl/gemini_model_provider/credential/image.py:51 +#: models_provider/impl/gemini_model_provider/credential/llm.py:57 +#: models_provider/impl/gemini_model_provider/model/stt.py:43 +#: models_provider/impl/kimi_model_provider/credential/llm.py:57 +#: models_provider/impl/local_model_provider/credential/embedding.py:36 +#: models_provider/impl/local_model_provider/credential/reranker.py:37 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:37 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:44 +#: models_provider/impl/openai_model_provider/credential/embedding.py:36 +#: models_provider/impl/openai_model_provider/credential/image.py:54 +#: models_provider/impl/openai_model_provider/credential/llm.py:59 +#: models_provider/impl/qwen_model_provider/credential/image.py:56 +#: models_provider/impl/qwen_model_provider/credential/llm.py:56 +#: models_provider/impl/qwen_model_provider/model/tti.py:43 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:36 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:54 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:58 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:37 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:58 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:23 +#: models_provider/impl/tencent_model_provider/credential/image.py:56 +#: models_provider/impl/tencent_model_provider/credential/llm.py:51 +#: models_provider/impl/tencent_model_provider/model/tti.py:54 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:36 +#: models_provider/impl/vllm_model_provider/credential/llm.py:50 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:36 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:52 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:57 +#: models_provider/impl/volcanic_engine_model_provider/model/tts.py:77 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:31 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:60 +#: models_provider/impl/xf_model_provider/credential/embedding.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:76 +#: models_provider/impl/xf_model_provider/model/tts.py:101 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:31 +#: models_provider/impl/xinference_model_provider/credential/image.py:51 +#: models_provider/impl/xinference_model_provider/credential/llm.py:50 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:34 +#: models_provider/impl/xinference_model_provider/model/tts.py:44 +#: models_provider/impl/zhipu_model_provider/credential/image.py:51 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:56 +#: models_provider/impl/zhipu_model_provider/model/tti.py:49 +msgid "Hello" +msgstr "你好" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:36 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:60 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:46 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:44 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:96 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:89 +#: models_provider/impl/anthropic_model_provider/credential/image.py:23 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:47 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:21 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:40 +#: models_provider/impl/azure_model_provider/credential/embedding.py:27 +#: models_provider/impl/azure_model_provider/credential/image.py:45 +#: models_provider/impl/azure_model_provider/credential/llm.py:59 +#: models_provider/impl/azure_model_provider/credential/stt.py:23 +#: models_provider/impl/azure_model_provider/credential/tti.py:58 +#: models_provider/impl/azure_model_provider/credential/tts.py:41 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:47 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:26 +#: models_provider/impl/gemini_model_provider/credential/image.py:41 +#: models_provider/impl/gemini_model_provider/credential/llm.py:47 +#: models_provider/impl/gemini_model_provider/credential/stt.py:21 +#: models_provider/impl/kimi_model_provider/credential/llm.py:47 +#: models_provider/impl/local_model_provider/credential/embedding.py:27 +#: models_provider/impl/local_model_provider/credential/reranker.py:28 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:26 +#: models_provider/impl/ollama_model_provider/credential/image.py:39 +#: models_provider/impl/ollama_model_provider/credential/llm.py:44 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:27 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:31 +#: models_provider/impl/openai_model_provider/credential/embedding.py:26 +#: models_provider/impl/openai_model_provider/credential/image.py:44 +#: models_provider/impl/openai_model_provider/credential/llm.py:48 +#: models_provider/impl/openai_model_provider/credential/stt.py:22 +#: models_provider/impl/openai_model_provider/credential/tti.py:61 +#: models_provider/impl/openai_model_provider/credential/tts.py:40 +#: models_provider/impl/qwen_model_provider/credential/image.py:47 +#: models_provider/impl/qwen_model_provider/credential/llm.py:47 +#: models_provider/impl/qwen_model_provider/credential/tti.py:68 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:26 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:47 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:28 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:61 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:22 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:47 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:19 +#: models_provider/impl/tencent_model_provider/credential/image.py:47 +#: models_provider/impl/tencent_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_model_provider/credential/tti.py:78 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:26 +#: models_provider/impl/vllm_model_provider/credential/image.py:42 +#: models_provider/impl/vllm_model_provider/credential/llm.py:39 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:26 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:42 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:47 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:41 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:51 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:27 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:46 +#: models_provider/impl/xf_model_provider/credential/embedding.py:27 +#: models_provider/impl/xf_model_provider/credential/image.py:29 +#: models_provider/impl/xf_model_provider/credential/llm.py:66 +#: models_provider/impl/xf_model_provider/credential/stt.py:24 +#: models_provider/impl/xf_model_provider/credential/tts.py:47 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:19 +#: models_provider/impl/xinference_model_provider/credential/image.py:41 +#: models_provider/impl/xinference_model_provider/credential/llm.py:39 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:25 +#: models_provider/impl/xinference_model_provider/credential/stt.py:21 +#: models_provider/impl/xinference_model_provider/credential/tti.py:59 +#: models_provider/impl/xinference_model_provider/credential/tts.py:39 +#: models_provider/impl/zhipu_model_provider/credential/image.py:41 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:47 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:40 +#, python-brace-format +msgid "{model_type} Model type is not supported" +msgstr "{model_type} 模型類型不支持" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:44 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:68 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:55 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:53 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:105 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:98 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填項" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py:60 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:82 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py:69 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py:67 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:121 +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:113 +#: models_provider/impl/anthropic_model_provider/credential/image.py:43 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:65 +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:42 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:61 +#: models_provider/impl/azure_model_provider/credential/image.py:65 +#: models_provider/impl/azure_model_provider/credential/stt.py:40 +#: models_provider/impl/azure_model_provider/credential/tti.py:77 +#: models_provider/impl/azure_model_provider/credential/tts.py:58 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:65 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:43 +#: models_provider/impl/gemini_model_provider/credential/image.py:61 +#: models_provider/impl/gemini_model_provider/credential/llm.py:66 +#: models_provider/impl/gemini_model_provider/credential/stt.py:38 +#: models_provider/impl/kimi_model_provider/credential/llm.py:64 +#: models_provider/impl/local_model_provider/credential/embedding.py:44 +#: models_provider/impl/local_model_provider/credential/reranker.py:45 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:51 +#: models_provider/impl/openai_model_provider/credential/embedding.py:43 +#: models_provider/impl/openai_model_provider/credential/image.py:64 +#: models_provider/impl/openai_model_provider/credential/llm.py:67 +#: models_provider/impl/openai_model_provider/credential/stt.py:39 +#: models_provider/impl/openai_model_provider/credential/tti.py:80 +#: models_provider/impl/openai_model_provider/credential/tts.py:58 +#: models_provider/impl/qwen_model_provider/credential/image.py:66 +#: models_provider/impl/qwen_model_provider/credential/llm.py:64 +#: models_provider/impl/qwen_model_provider/credential/tti.py:86 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:43 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:64 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:66 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:44 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:39 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:80 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:40 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:66 +#: models_provider/impl/tencent_model_provider/credential/embedding.py:30 +#: models_provider/impl/tencent_model_provider/credential/image.py:66 +#: models_provider/impl/tencent_model_provider/credential/llm.py:57 +#: models_provider/impl/tencent_model_provider/credential/tti.py:104 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:43 +#: models_provider/impl/vllm_model_provider/credential/image.py:62 +#: models_provider/impl/vllm_model_provider/credential/llm.py:55 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:43 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:62 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:66 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:42 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:58 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:68 +#: models_provider/impl/wenxin_model_provider/credential/embedding.py:38 +#: models_provider/impl/xf_model_provider/credential/embedding.py:38 +#: models_provider/impl/xf_model_provider/credential/image.py:50 +#: models_provider/impl/xf_model_provider/credential/llm.py:84 +#: models_provider/impl/xf_model_provider/credential/stt.py:41 +#: models_provider/impl/xf_model_provider/credential/tts.py:65 +#: models_provider/impl/xinference_model_provider/credential/image.py:60 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:40 +#: models_provider/impl/xinference_model_provider/credential/stt.py:37 +#: models_provider/impl/xinference_model_provider/credential/tti.py:77 +#: models_provider/impl/xinference_model_provider/credential/tts.py:56 +#: models_provider/impl/zhipu_model_provider/credential/image.py:61 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:64 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:59 +#, python-brace-format +msgid "" +"Verification failed, please check whether the parameters are correct: {error}" +msgstr "認證失敗,請檢查參數是否正確:{error}" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:17 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:22 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:14 +#: models_provider/impl/azure_model_provider/credential/image.py:17 +#: models_provider/impl/azure_model_provider/credential/llm.py:23 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:22 +#: models_provider/impl/gemini_model_provider/credential/image.py:15 +#: models_provider/impl/gemini_model_provider/credential/llm.py:22 +#: models_provider/impl/kimi_model_provider/credential/llm.py:22 +#: models_provider/impl/ollama_model_provider/credential/image.py:12 +#: models_provider/impl/ollama_model_provider/credential/llm.py:20 +#: models_provider/impl/openai_model_provider/credential/image.py:17 +#: models_provider/impl/openai_model_provider/credential/llm.py:23 +#: models_provider/impl/qwen_model_provider/credential/image.py:22 +#: models_provider/impl/qwen_model_provider/credential/llm.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:17 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:22 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:22 +#: models_provider/impl/tencent_model_provider/credential/image.py:22 +#: models_provider/impl/tencent_model_provider/credential/llm.py:14 +#: models_provider/impl/vllm_model_provider/credential/image.py:15 +#: models_provider/impl/vllm_model_provider/credential/llm.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:22 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:22 +#: models_provider/impl/xf_model_provider/credential/llm.py:22 +#: models_provider/impl/xf_model_provider/credential/llm.py:41 +#: models_provider/impl/xinference_model_provider/credential/image.py:14 +#: models_provider/impl/xinference_model_provider/credential/llm.py:15 +#: models_provider/impl/zhipu_model_provider/credential/image.py:15 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:22 +msgid "Temperature" +msgstr "溫度" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:30 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:31 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:23 +#: models_provider/impl/azure_model_provider/credential/image.py:26 +#: models_provider/impl/azure_model_provider/credential/llm.py:32 +#: models_provider/impl/azure_model_provider/credential/llm.py:43 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:31 +#: models_provider/impl/gemini_model_provider/credential/image.py:24 +#: models_provider/impl/gemini_model_provider/credential/llm.py:31 +#: models_provider/impl/kimi_model_provider/credential/llm.py:31 +#: models_provider/impl/ollama_model_provider/credential/image.py:21 +#: models_provider/impl/ollama_model_provider/credential/llm.py:29 +#: models_provider/impl/openai_model_provider/credential/image.py:26 +#: models_provider/impl/openai_model_provider/credential/llm.py:32 +#: models_provider/impl/qwen_model_provider/credential/image.py:31 +#: models_provider/impl/qwen_model_provider/credential/llm.py:31 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:26 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:31 +#: models_provider/impl/tencent_model_provider/credential/image.py:31 +#: models_provider/impl/vllm_model_provider/credential/image.py:24 +#: models_provider/impl/vllm_model_provider/credential/llm.py:24 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:24 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:31 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:31 +#: models_provider/impl/xf_model_provider/credential/llm.py:50 +#: models_provider/impl/xinference_model_provider/credential/image.py:23 +#: models_provider/impl/xinference_model_provider/credential/llm.py:24 +#: models_provider/impl/zhipu_model_provider/credential/image.py:24 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:31 +msgid "Output the maximum Tokens" +msgstr "輸出最大Token數" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:31 +msgid "Specify the maximum number of tokens that the model can generate." +msgstr "指定模型可以生成的最大 tokens 數" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:44 +#: models_provider/impl/anthropic_model_provider/credential/image.py:15 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:74 +msgid "API URL" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py:45 +#: models_provider/impl/anthropic_model_provider/credential/image.py:16 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:75 +msgid "API Key" +msgstr "" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 +#: models_provider/impl/azure_model_provider/credential/tti.py:15 +#: models_provider/impl/openai_model_provider/credential/tti.py:15 +#: models_provider/impl/qwen_model_provider/credential/tti.py:22 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:15 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:15 +#: models_provider/impl/xinference_model_provider/credential/tti.py:14 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:15 +#, fuzzy +#| msgid "page size" +msgid "Image size" +msgstr "每頁大小" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:20 +#: models_provider/impl/azure_model_provider/credential/tti.py:15 +#: models_provider/impl/qwen_model_provider/credential/tti.py:22 +msgid "Specify the size of the generated image, such as: 1024x1024" +msgstr "指定生成圖片的尺寸, 如: 1024x1024" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: models_provider/impl/azure_model_provider/credential/tti.py:40 +#: models_provider/impl/openai_model_provider/credential/tti.py:43 +#: models_provider/impl/qwen_model_provider/credential/tti.py:34 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:43 +#: models_provider/impl/xinference_model_provider/credential/tti.py:41 +msgid "Number of pictures" +msgstr "圖片數量" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:34 +#: models_provider/impl/azure_model_provider/credential/tti.py:40 +#: models_provider/impl/qwen_model_provider/credential/tti.py:34 +msgid "Specify the number of generated images" +msgstr "指定生成圖片的數量" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Style" +msgstr "風格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:41 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:41 +msgid "Specify the style of generated images" +msgstr "指定生成圖片的風格" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:45 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:45 +msgid "Default value, the image style is randomly output by the model" +msgstr "默認值,圖片風格由模型隨機輸出" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:46 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:46 +msgid "photography" +msgstr "攝影" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:47 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:47 +msgid "Portraits" +msgstr "人像寫真" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:48 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:48 +msgid "3D cartoon" +msgstr "3D卡通" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:49 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:49 +msgid "animation" +msgstr "動畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:50 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:50 +msgid "painting" +msgstr "油畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:51 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:51 +msgid "watercolor" +msgstr "水彩" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:52 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:52 +msgid "sketch" +msgstr "素描" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:53 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:53 +msgid "Chinese painting" +msgstr "中國畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py:54 +#: community/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py:54 +msgid "flat illustration" +msgstr "扁平插畫" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:15 +msgid "timbre" +msgstr "音色" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:15 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:15 +msgid "Chinese sounds can support mixed scenes of Chinese and English" +msgstr "中文音色支持中英文混合場景" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:20 +msgid "Long Xiaochun" +msgstr "龍小淳" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:21 +msgid "Long Xiaoxia" +msgstr "龍小夏" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:22 +msgid "Long Xiaochen" +msgstr "龍小誠" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:23 +msgid "Long Xiaobai" +msgstr "龍小白" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:24 +msgid "Long laotie" +msgstr "龍老鐵" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:25 +msgid "Long Shu" +msgstr "龍書" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:26 +msgid "Long Shuo" +msgstr "龍碩" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:27 +msgid "Long Jing" +msgstr "龍婧" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:28 +msgid "Long Miao" +msgstr "龍妙" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:29 +msgid "Long Yue" +msgstr "龍悅" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:30 +msgid "Long Yuan" +msgstr "龍媛" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:31 +msgid "Long Fei" +msgstr "龍飛" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:32 +msgid "Long Jielidou" +msgstr "龍傑力豆" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:33 +msgid "Long Tong" +msgstr "龍彤" + +#: community/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:34 +msgid "Long Xiang" +msgstr "龍祥" + + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 +msgid "Speaking speed" +msgstr "語速" + +#: models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py:47 +msgid "[0.5, 2], the default is 1, usually one decimal place is enough" +msgstr "[0.5,2],默認為1,通常一位小數就足夠了" + +#: models_provider/impl/anthropic_model_provider/credential/image.py:28 +#: models_provider/impl/anthropic_model_provider/credential/llm.py:52 +#: models_provider/impl/azure_model_provider/credential/embedding.py:32 +#: models_provider/impl/azure_model_provider/credential/image.py:50 +#: models_provider/impl/azure_model_provider/credential/llm.py:64 +#: models_provider/impl/azure_model_provider/credential/stt.py:28 +#: models_provider/impl/azure_model_provider/credential/tti.py:63 +#: models_provider/impl/azure_model_provider/credential/tts.py:46 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:52 +#: models_provider/impl/gemini_model_provider/credential/embedding.py:31 +#: models_provider/impl/gemini_model_provider/credential/image.py:46 +#: models_provider/impl/gemini_model_provider/credential/llm.py:52 +#: models_provider/impl/gemini_model_provider/credential/stt.py:26 +#: models_provider/impl/kimi_model_provider/credential/llm.py:52 +#: models_provider/impl/local_model_provider/credential/embedding.py:31 +#: models_provider/impl/local_model_provider/credential/reranker.py:32 +#: models_provider/impl/ollama_model_provider/credential/embedding.py:46 +#: models_provider/impl/ollama_model_provider/credential/llm.py:62 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:63 +#: models_provider/impl/openai_model_provider/credential/embedding.py:31 +#: models_provider/impl/openai_model_provider/credential/image.py:49 +#: models_provider/impl/openai_model_provider/credential/llm.py:53 +#: models_provider/impl/openai_model_provider/credential/stt.py:27 +#: models_provider/impl/openai_model_provider/credential/tti.py:66 +#: models_provider/impl/openai_model_provider/credential/tts.py:45 +#: models_provider/impl/qwen_model_provider/credential/image.py:51 +#: models_provider/impl/qwen_model_provider/credential/llm.py:51 +#: models_provider/impl/qwen_model_provider/credential/tti.py:72 +#: models_provider/impl/siliconCloud_model_provider/credential/embedding.py:31 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:49 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:52 +#: models_provider/impl/siliconCloud_model_provider/credential/reranker.py:32 +#: models_provider/impl/siliconCloud_model_provider/credential/stt.py:27 +#: models_provider/impl/siliconCloud_model_provider/credential/tti.py:66 +#: models_provider/impl/siliconCloud_model_provider/credential/tts.py:27 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:52 +#: models_provider/impl/tencent_model_provider/credential/image.py:51 +#: models_provider/impl/vllm_model_provider/credential/embedding.py:31 +#: models_provider/impl/vllm_model_provider/credential/image.py:47 +#: models_provider/impl/vllm_model_provider/credential/llm.py:65 +#: models_provider/impl/volcanic_engine_model_provider/credential/embedding.py:31 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:47 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:52 +#: models_provider/impl/volcanic_engine_model_provider/credential/stt.py:30 +#: models_provider/impl/volcanic_engine_model_provider/credential/tti.py:46 +#: models_provider/impl/volcanic_engine_model_provider/credential/tts.py:56 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:55 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:72 +#: models_provider/impl/xf_model_provider/credential/image.py:34 +#: models_provider/impl/xf_model_provider/credential/llm.py:71 +#: models_provider/impl/xf_model_provider/credential/stt.py:29 +#: models_provider/impl/xf_model_provider/credential/tts.py:52 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:40 +#: models_provider/impl/xinference_model_provider/credential/image.py:46 +#: models_provider/impl/xinference_model_provider/credential/llm.py:59 +#: models_provider/impl/xinference_model_provider/credential/reranker.py:29 +#: models_provider/impl/xinference_model_provider/credential/stt.py:26 +#: models_provider/impl/xinference_model_provider/credential/tti.py:64 +#: models_provider/impl/xinference_model_provider/credential/tts.py:44 +#: models_provider/impl/zhipu_model_provider/credential/image.py:46 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:51 +#: models_provider/impl/zhipu_model_provider/credential/tti.py:45 +#, python-brace-format +msgid "{key} is required" +msgstr "{key} 是必填項" + +#: models_provider/impl/anthropic_model_provider/credential/llm.py:23 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:15 +#: models_provider/impl/azure_model_provider/credential/image.py:18 +#: models_provider/impl/azure_model_provider/credential/llm.py:24 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:23 +#: models_provider/impl/gemini_model_provider/credential/image.py:16 +#: models_provider/impl/gemini_model_provider/credential/llm.py:23 +#: models_provider/impl/kimi_model_provider/credential/llm.py:23 +#: models_provider/impl/ollama_model_provider/credential/image.py:13 +#: models_provider/impl/ollama_model_provider/credential/llm.py:21 +#: models_provider/impl/openai_model_provider/credential/image.py:18 +#: models_provider/impl/openai_model_provider/credential/llm.py:24 +#: models_provider/impl/qwen_model_provider/credential/image.py:23 +#: models_provider/impl/qwen_model_provider/credential/llm.py:23 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:18 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:23 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:23 +#: models_provider/impl/tencent_model_provider/credential/image.py:23 +#: models_provider/impl/tencent_model_provider/credential/llm.py:15 +#: models_provider/impl/vllm_model_provider/credential/image.py:16 +#: models_provider/impl/vllm_model_provider/credential/llm.py:16 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:16 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:23 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:23 +#: models_provider/impl/xf_model_provider/credential/llm.py:23 +#: models_provider/impl/xf_model_provider/credential/llm.py:42 +#: models_provider/impl/xinference_model_provider/credential/image.py:15 +#: models_provider/impl/xinference_model_provider/credential/llm.py:16 +#: models_provider/impl/zhipu_model_provider/credential/image.py:16 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:23 +msgid "" +"Higher values make the output more random, while lower values make it more " +"focused and deterministic" +msgstr "較高的數值會使輸出更加隨機,而較低的數值會使其更加集中和確定" + +#: models_provider/impl/anthropic_model_provider/credential/llm.py:32 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:24 +#: models_provider/impl/azure_model_provider/credential/image.py:27 +#: models_provider/impl/azure_model_provider/credential/llm.py:33 +#: models_provider/impl/azure_model_provider/credential/llm.py:44 +#: models_provider/impl/deepseek_model_provider/credential/llm.py:32 +#: models_provider/impl/gemini_model_provider/credential/image.py:25 +#: models_provider/impl/gemini_model_provider/credential/llm.py:32 +#: models_provider/impl/kimi_model_provider/credential/llm.py:32 +#: models_provider/impl/ollama_model_provider/credential/image.py:22 +#: models_provider/impl/ollama_model_provider/credential/llm.py:30 +#: models_provider/impl/openai_model_provider/credential/image.py:27 +#: models_provider/impl/openai_model_provider/credential/llm.py:33 +#: models_provider/impl/qwen_model_provider/credential/image.py:32 +#: models_provider/impl/qwen_model_provider/credential/llm.py:32 +#: models_provider/impl/siliconCloud_model_provider/credential/image.py:27 +#: models_provider/impl/siliconCloud_model_provider/credential/llm.py:32 +#: models_provider/impl/tencent_cloud_model_provider/credential/llm.py:32 +#: models_provider/impl/tencent_model_provider/credential/image.py:32 +#: models_provider/impl/vllm_model_provider/credential/image.py:25 +#: models_provider/impl/vllm_model_provider/credential/llm.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/image.py:25 +#: models_provider/impl/volcanic_engine_model_provider/credential/llm.py:32 +#: models_provider/impl/wenxin_model_provider/credential/llm.py:32 +#: models_provider/impl/xf_model_provider/credential/llm.py:32 +#: models_provider/impl/xf_model_provider/credential/llm.py:51 +#: models_provider/impl/xinference_model_provider/credential/image.py:24 +#: models_provider/impl/xinference_model_provider/credential/llm.py:25 +#: models_provider/impl/zhipu_model_provider/credential/image.py:25 +#: models_provider/impl/zhipu_model_provider/credential/llm.py:32 +msgid "Specify the maximum number of tokens that the model can generate" +msgstr "指定模型可以生成的最大 tokens 數" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:36 +msgid "" +"An update to Claude 2 that doubles the context window and improves " +"reliability, hallucination rates, and evidence-based accuracy in long " +"documents and RAG contexts." +msgstr "" +"Claude 2 的更新,採用雙倍的上下文窗口,並在長文檔和 RAG 上下文中提高可靠性、" +"幻覺率和循證準確性。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:43 +msgid "" +"Anthropic is a powerful model that can handle a variety of tasks, from " +"complex dialogue and creative content generation to detailed command " +"obedience." +msgstr "" +"Anthropic 功能強大的模型,可處理各種任務,從複雜的對話和創意內容生成到詳細的" +"指令服從。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:50 +msgid "" +"The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-" +"instant responsiveness. The model can answer simple queries and requests " +"quickly. Customers will be able to build seamless AI experiences that mimic " +"human interactions. Claude 3 Haiku can process images and return text " +"output, and provides 200K context windows." +msgstr "" +"Claude 3 Haiku 是 Anthropic 最快速、最緊湊的模型,具有近乎即時的響應能力。該" +"模型可以快速回答簡單的查詢和請求。客戶將能夠構建模仿人類交互的無縫人工智慧體" +"驗。 Claude 3 Haiku 可以處理圖像和返回文本輸出,並且提供 200K 上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:57 +msgid "" +"The Claude 3 Sonnet model from Anthropic strikes the ideal balance between " +"intelligence and speed, especially when it comes to handling enterprise " +"workloads. This model offers maximum utility while being priced lower than " +"competing products, and it's been engineered to be a solid choice for " +"deploying AI at scale." +msgstr "" +"Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之間取得理想的平衡,尤其是在" +"處理企業工作負載方面。該模型提供最大的效用,同時價格低於競爭產品,並且其經過" +"精心設計,是大規模部署人工智慧的可靠選擇。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:64 +msgid "" +"The Claude 3.5 Sonnet raises the industry standard for intelligence, " +"outperforming competing models and the Claude 3 Opus in extensive " +"evaluations, with the speed and cost-effectiveness of our mid-range models." +msgstr "" +"Claude 3.5 Sonnet提高了智能的行業標準,在廣泛的評估中超越了競爭對手的型號和" +"Claude 3 Opus,具有我們中端型號的速度和成本效益。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:71 +msgid "" +"A faster, more affordable but still very powerful model that can handle a " +"range of tasks including casual conversation, text analysis, summarization " +"and document question answering." +msgstr "" +"一種更快速、更實惠但仍然非常強大的模型,它可以處理一系列任務,包括隨意對話、" +"文本分析、摘要和文檔問題回答。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:78 +msgid "" +"Titan Text Premier is the most powerful and advanced model in the Titan Text " +"series, designed to deliver exceptional performance for a variety of " +"enterprise applications. With its cutting-edge features, it delivers greater " +"accuracy and outstanding results, making it an excellent choice for " +"organizations looking for a top-notch text processing solution." +msgstr "" +"Titan Text Premier 是 Titan Text 系列中功能強大且先進的型號,旨在為各種企業應" +"用程序提供卓越的性能。憑藉其尖端功能,它提供了更高的準確性和出色的結果,使其" +"成為尋求一流文本處理解決方案的組織的絕佳選擇。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:85 +msgid "" +"Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-" +"tuning English-language tasks, including summarization and copywriting, " +"where customers require smaller, more cost-effective, and highly " +"customizable models." +msgstr "" +"Amazon Titan Text Lite 是一種輕量級的高效模型,非常適合英語任務的微調,包括摘" +"要和文案寫作等,在這種場景下,客戶需要更小、更經濟高效且高度可定製的模型" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:91 +msgid "" +"Amazon Titan Text Express has context lengths of up to 8,000 tokens, making " +"it ideal for a variety of high-level general language tasks, such as open-" +"ended text generation and conversational chat, as well as support in " +"retrieval-augmented generation (RAG). At launch, the model is optimized for " +"English, but other languages are supported." +msgstr "" +"Amazon Titan Text Express 的上下文長度長達 8000 個 tokens,因而非常適合各種高" +"級常規語言任務,例如開放式文本生成和對話式聊天,以及檢索增強生成(RAG)中的支" +"持。在發布時,該模型針對英語進行了優化,但也支持其他語言。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:97 +msgid "" +"7B dense converter for rapid deployment and easy customization. Small in " +"size yet powerful in a variety of use cases. Supports English and code, as " +"well as 32k context windows." +msgstr "" +"7B 密集型轉換器,可快速部署,易於定製。體積雖小,但功能強大,適用於各種用例。" +"支持英語和代碼,以及 32k 的上下文窗口。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:103 +msgid "" +"Advanced Mistral AI large-scale language model capable of handling any " +"language task, including complex multilingual reasoning, text understanding, " +"transformation, and code generation." +msgstr "" +"先進的 Mistral AI 大型語言模型,能夠處理任何語言任務,包括複雜的多語言推理、" +"文本理解、轉換和代碼生成。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:109 +msgid "" +"Ideal for content creation, conversational AI, language understanding, R&D, " +"and enterprise applications" +msgstr "非常適合內容創作、會話式人工智慧、語言理解、研發和企業應用" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:115 +msgid "" +"Ideal for limited computing power and resources, edge devices, and faster " +"training times." +msgstr "非常適合有限的計算能力和資源、邊緣設備和更快的訓練時間。" + +#: community/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py:123 +msgid "" +"Titan Embed Text is the largest embedding model in the Amazon Titan Embed " +"series and can handle various text embedding tasks, such as text " +"classification, text similarity calculation, etc." +msgstr "" +"Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以處理各種文本" +"嵌入任務,如文本分類、文本相似度計算等。" + +#: models_provider/impl/aws_bedrock_model_provider/credential/embedding.py:28 +#: models_provider/impl/aws_bedrock_model_provider/credential/llm.py:47 +#, python-brace-format +msgid "The following fields are required: {keys}" +msgstr "以下欄位是必填項: {keys}" + +#: models_provider/impl/azure_model_provider/credential/embedding.py:44 +#: models_provider/impl/azure_model_provider/credential/llm.py:76 +msgid "Verification failed, please check whether the parameters are correct" +msgstr "認證失敗,請檢查參數是否正確" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py:28 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:28 +msgid "Picture quality" +msgstr "圖片質量" + +#: community/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py:17 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tts.py:17 +msgid "" +"Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) " +"to find one that suits your desired tone and audience. The current voiceover " +"is optimized for English." +msgstr "" +"嘗試不同的聲音(合金、回聲、寓言、縞瑪瑙、新星和閃光),找到一種適合您所需的" +"音調和聽眾的聲音。當前的語音針對英語進行了優化。" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:24 +msgid "Good at common conversational tasks, supports 32K contexts" +msgstr "擅長通用對話任務,支持 32K 上下文" + +#: community/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py:29 +msgid "Good at handling programming tasks, supports 16K contexts" +msgstr "擅長處理編程任務,支持 16K 上下文" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:32 +msgid "Latest Gemini 1.0 Pro model, updated with Google update" +msgstr "最新的 Gemini 1.0 Pro 模型,更新了 Google 更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:36 +msgid "Latest Gemini 1.0 Pro Vision model, updated with Google update" +msgstr "最新的Gemini 1.0 Pro Vision模型,隨Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:43 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:47 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:54 +#: community/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py:58 +msgid "Latest Gemini 1.5 Flash model, updated with Google updates" +msgstr "最新的Gemini 1.5 Flash模型,隨Google更新而更新" + +#: community/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py:53 +msgid "convert audio to text" +msgstr "將音頻轉換為文本" + +#: models_provider/impl/local_model_provider/credential/embedding.py:53 +#: models_provider/impl/local_model_provider/credential/reranker.py:54 +msgid "Model catalog" +msgstr "模型目錄" + +#: models_provider/impl/local_model_provider/local_model_provider.py:39 +msgid "local model" +msgstr "本地模型" + +#: models_provider/impl/ollama_model_provider/credential/embedding.py:30 +#: models_provider/impl/ollama_model_provider/credential/image.py:43 +#: models_provider/impl/ollama_model_provider/credential/llm.py:48 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:35 +#: models_provider/impl/vllm_model_provider/credential/llm.py:43 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:24 +#: models_provider/impl/xinference_model_provider/credential/llm.py:44 +msgid "API domain name is invalid" +msgstr "API 域名無效" + +#: models_provider/impl/ollama_model_provider/credential/embedding.py:35 +#: models_provider/impl/ollama_model_provider/credential/image.py:48 +#: models_provider/impl/ollama_model_provider/credential/llm.py:53 +#: models_provider/impl/ollama_model_provider/credential/reranker.py:40 +#: models_provider/impl/vllm_model_provider/credential/llm.py:47 +#: models_provider/impl/xinference_model_provider/credential/embedding.py:30 +#: models_provider/impl/xinference_model_provider/credential/llm.py:48 +msgid "The model does not exist, please download the model first" +msgstr "模型不存在,請先下載模型" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:56 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 7B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。" +"這是 7B 預訓練模型的存儲庫。其他模型的連結可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:60 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 13B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。" +"這是 13B 預訓練模型的存儲庫。其他模型的連結可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:64 +msgid "" +"Llama 2 is a set of pretrained and fine-tuned generative text models ranging " +"in size from 7 billion to 70 billion. This is a repository of 70B pretrained " +"models. Links to other models can be found in the index at the bottom." +msgstr "" +"Llama 2 是一組經過預訓練和微調的生成文本模型,其規模從 70 億到 700 億個不等。" +"這是 70B 預訓練模型的存儲庫。其他模型的連結可以在底部的索引中找到。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:68 +msgid "" +"Since the Chinese alignment of Llama2 itself is weak, we use the Chinese " +"instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so " +"that it has strong Chinese conversation capabilities." +msgstr "" +"由於Llama2本身的中文對齊較弱,我們採用中文指令集,對meta-llama/Llama-2-13b-" +"chat-hf進行LoRA微調,使其具備較強的中文對話能力。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:72 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 8 billion " +"parameters." +msgstr "Meta Llama 3:迄今為止最有能力的公開產品LLM。80億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:76 +msgid "" +"Meta Llama 3: The most capable public product LLM to date. 70 billion " +"parameters." +msgstr "Meta Llama 3:迄今為止最有能力的公開產品LLM。700億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:80 +msgid "" +"Compared with previous versions, qwen 1.5 0.5b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 500 million parameters." +msgstr "" +"qwen 1.5 0.5b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有" +"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。5億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:84 +msgid "" +"Compared with previous versions, qwen 1.5 1.8b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 1.8 billion parameters." +msgstr "" +"qwen 1.5 1.8b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有" +"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。18億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:88 +msgid "" +"Compared with previous versions, qwen 1.5 4b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"4 billion parameters." +msgstr "" +"qwen 1.5 4b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。40億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:93 +msgid "" +"Compared with previous versions, qwen 1.5 7b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"7 billion parameters." +msgstr "" +"qwen 1.5 7b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。70億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:97 +msgid "" +"Compared with previous versions, qwen 1.5 14b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"14 billion parameters." +msgstr "" +"qwen 1.5 14b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。140億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:101 +msgid "" +"Compared with previous versions, qwen 1.5 32b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"32 billion parameters." +msgstr "" +"qwen 1.5 32b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。320億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:105 +msgid "" +"Compared with previous versions, qwen 1.5 72b has significantly enhanced the " +"model's alignment with human preferences and its multi-language processing " +"capabilities. Models of all sizes support a context length of 32768 tokens. " +"72 billion parameters." +msgstr "" +"qwen 1.5 72b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有顯" +"著增強。所有規模的模型都支持32768個tokens的上下文長度。720億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:109 +msgid "" +"Compared with previous versions, qwen 1.5 110b has significantly enhanced " +"the model's alignment with human preferences and its multi-language " +"processing capabilities. Models of all sizes support a context length of " +"32768 tokens. 110 billion parameters." +msgstr "" +"qwen 1.5 110b 相較於以往版本,模型與人類偏好的對齊程度以及多語言處理能力上有" +"顯著增強。所有規模的模型都支持32768個tokens的上下文長度。1100億參數。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:153 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:193 +msgid "" +"Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open " +"model." +msgstr "Phi-3 Mini是Microsoft的3.8B參數,輕量級,最先進的開放模型。" + +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:162 +#: community/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py:197 +msgid "" +"A high-performance open embedding model with a large token context window." +msgstr "一個具有大 tokens上下文窗口的高性能開放嵌入模型。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:16 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:16 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 " +"or 1792x1024 pixels." +msgstr "" +"圖像生成端點允許您根據文本提示創建原始圖像。使用 DALL·E 3 時,圖像的尺寸可以" +"為 1024x1024、1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:29 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:29 +msgid "" +" \n" +"By default, images are produced in standard quality, but with DALL·E 3 you " +"can set quality: \"hd\" to enhance detail. Square, standard quality images " +"are generated fastest.\n" +" " +msgstr "" +"默認情況下,圖像以標準質量生成,但使用 DALL·E 3 時,您可以設置質量:「hd」以增" +"強細節。方形、標準質量的圖像生成速度最快。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py:44 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/credential/tti.py:44 +msgid "" +"You can use DALL·E 3 to request 1 image at a time (requesting more images by " +"issuing parallel requests), or use DALL·E 2 with the n parameter to request " +"up to 10 images at a time." +msgstr "" +"您可以使用 DALL·E 3 一次請求 1 個圖像(通過發出並行請求來請求更多圖像),或者" +"使用帶有 n 參數的 DALL·E 2 一次最多請求 10 個圖像。" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:35 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:119 +#: community/apps/setting/models_provider/impl/siliconCloud_model_provider/siliconCloud_model_provider.py:111 +msgid "The latest gpt-3.5-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-3.5-turbo,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:38 +msgid "Latest gpt-4, updated with OpenAI adjustments" +msgstr "最新的gpt-4,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:40 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:99 +msgid "" +"The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI " +"adjustments" +msgstr "最新的GPT-4o,比gpt-4-turbo更便宜、更快,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:43 +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:102 +msgid "" +"The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI " +"adjustments" +msgstr "最新的gpt-4o-mini,比gpt-4o更便宜、更快,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:46 +msgid "The latest gpt-4-turbo, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:49 +msgid "The latest gpt-4-turbo-preview, updated with OpenAI adjustments" +msgstr "最新的gpt-4-turbo-preview,隨OpenAI調整而更新" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:53 +msgid "" +"gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 " +"tokens" +msgstr "2024年1月25日的gpt-3.5-turbo快照,支持上下文長度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:57 +msgid "" +"gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 " +"tokens" +msgstr "2023年11月6日的gpt-3.5-turbo快照,支持上下文長度16,385 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:61 +msgid "" +"[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June " +"13, 2024" +msgstr "[Legacy] 2023年6月13日的gpt-3.5-turbo快照,將於2024年6月13日棄用" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:65 +msgid "" +"gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens" +msgstr "2024年5月13日的gpt-4o快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:69 +msgid "" +"gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年4月9日的gpt-4-turbo快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:72 +msgid "" +"gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 " +"tokens" +msgstr "2024年1月25日的gpt-4-turbo快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py:75 +msgid "" +"gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 " +"tokens" +msgstr "2023年11月6日的gpt-4-turbo快照,支持上下文長度128,000 tokens" + +#: community/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py:63 +msgid "Tongyi Qianwen" +msgstr "通義千問" + +#: models_provider/impl/tencent_cloud_model_provider/tencent_cloud_model_provider.py:58 +msgid "Tencent Cloud" +msgstr "騰訊雲" + +#: models_provider/impl/tencent_model_provider/credential/llm.py:41 +#: models_provider/impl/tencent_model_provider/credential/tti.py:88 +#, python-brace-format +msgid "{keys} is required" +msgstr "{keys} 是必填項" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "painting style" +msgstr "繪畫風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:14 +msgid "If not passed, the default value is 201 (Japanese anime style)" +msgstr "如果未傳遞,則默認值為201(日本動漫風格)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:18 +msgid "Not limited to style" +msgstr "不限於風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:19 +msgid "ink painting" +msgstr "水墨畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:20 +msgid "concept art" +msgstr "概念藝術" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:21 +msgid "Oil painting 1" +msgstr "油畫1" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:22 +msgid "Oil Painting 2 (Van Gogh)" +msgstr "油畫2(梵谷)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:23 +msgid "watercolor painting" +msgstr "水彩畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:24 +msgid "pixel art" +msgstr "像素畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:25 +msgid "impasto style" +msgstr "厚塗風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:26 +msgid "illustration" +msgstr "插圖" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:27 +msgid "paper cut style" +msgstr "剪紙風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:28 +msgid "Impressionism 1 (Monet)" +msgstr "印象派1(莫奈)" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:29 +msgid "Impressionism 2" +msgstr "印象派2" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:31 +msgid "classical portraiture" +msgstr "古典肖像畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:32 +msgid "black and white sketch" +msgstr "黑白素描畫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:33 +msgid "cyberpunk" +msgstr "賽博朋克" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:34 +msgid "science fiction style" +msgstr "科幻風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:35 +msgid "dark style" +msgstr "暗黑風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:37 +msgid "vaporwave" +msgstr "蒸汽波" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:38 +msgid "Japanese animation" +msgstr "日系動漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:39 +msgid "monster style" +msgstr "怪獸風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:40 +msgid "Beautiful ancient style" +msgstr "唯美古風" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:41 +msgid "retro anime" +msgstr "復古動漫" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:42 +msgid "Game cartoon hand drawing" +msgstr "遊戲卡通手繪" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:43 +msgid "Universal realistic style" +msgstr "通用寫實風格" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "Generate image resolution" +msgstr "生成圖像解析度" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py:50 +msgid "If not transmitted, the default value is 768:768." +msgstr "不傳默認使用768:768。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:38 +msgid "" +"The most effective version of the current hybrid model, the trillion-level " +"parameter scale MOE-32K long article model. Reaching the absolute leading " +"level on various benchmarks, with complex instructions and reasoning, " +"complex mathematical capabilities, support for function call, and " +"application focus optimization in fields such as multi-language translation, " +"finance, law, and medical care" +msgstr "" +"當前混元模型中效果最優版本,萬億級參數規模 MOE-32K 長文模型。在各種 " +"benchmark 上達到絕對領先的水平,複雜指令和推理,具備複雜數學能力,支持 " +"functioncall,在多語言翻譯、金融法律醫療等領域應用重點優化" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:45 +msgid "" +"A better routing strategy is adopted to simultaneously alleviate the " +"problems of load balancing and expert convergence. For long articles, the " +"needle-in-a-haystack index reaches 99.9%" +msgstr "" +"採用更優的路由策略,同時緩解了負載均衡和專家趨同的問題。長文方面,大海撈針指" +"標達到99.9%" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:51 +msgid "" +"Upgraded to MOE structure, the context window is 256k, leading many open " +"source models in multiple evaluation sets such as NLP, code, mathematics, " +"industry, etc." +msgstr "" +"升級為 MOE 結構,上下文窗口為 256k ,在 NLP,代碼,數學,行業等多項評測集上領" +"先眾多開源模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:57 +msgid "" +"Hunyuan's latest version of the role-playing model, a role-playing model " +"launched by Hunyuan's official fine-tuning training, is based on the Hunyuan " +"model combined with the role-playing scene data set for additional training, " +"and has better basic effects in role-playing scenes." +msgstr "" +"混元最新版角色扮演模型,混元官方精調訓練推出的角色扮演模型,基於混元模型結合" +"角色扮演場景數據集進行增訓,在角色扮演場景具有更好的基礎效果" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:63 +msgid "" +"Hunyuan's latest MOE architecture FunctionCall model has been trained with " +"high-quality FunctionCall data and has a context window of 32K, leading in " +"multiple dimensions of evaluation indicators." +msgstr "" +"混元最新 MOE 架構 FunctionCall 模型,經過高質量的 FunctionCall 數據訓練,上下" +"文窗口達 32K,在多個維度的評測指標上處於領先。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:69 +msgid "" +"Hunyuan's latest code generation model, after training the base model with " +"200B high-quality code data, and iterating on high-quality SFT data for half " +"a year, the context long window length has been increased to 8K, and it " +"ranks among the top in the automatic evaluation indicators of code " +"generation in the five major languages; the five major languages In the " +"manual high-quality evaluation of 10 comprehensive code tasks that consider " +"all aspects, the performance is in the first echelon." +msgstr "" +"混元最新代碼生成模型,經過 200B 高質量代碼數據增訓基座模型,迭代半年高質量 " +"SFT 數據訓練,上下文長窗口長度增大到 8K,五大語言代碼生成自動評測指標上位居前" +"列;五大語言10項考量各方面綜合代碼任務人工高質量評測上,性能處於第一梯隊" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:77 +msgid "" +"Tencent's Hunyuan Embedding interface can convert text into high-quality " +"vector data. The vector dimension is 1024 dimensions." +msgstr "" +"騰訊混元 Embedding 接口,可以將文本轉化為高質量的向量數據。向量維度為1024維。" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:87 +msgid "Mixed element visual model" +msgstr "混元視覺模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:94 +msgid "Hunyuan graph model" +msgstr "混元生圖模型" + +#: community/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py:125 +msgid "Tencent Hunyuan" +msgstr "騰訊混元" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:24 +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:42 +msgid "Facebook’s 125M parameter model" +msgstr "Facebook的125M參數模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:25 +msgid "BAAI’s 7B parameter model" +msgstr "BAAI的7B參數模型" + +#: community/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py:26 +msgid "BAAI’s 13B parameter mode" +msgstr "BAAI的13B參數模型" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py:16 +msgid "" +"If the gap between width, height and 512 is too large, the picture rendering " +"effect will be poor and the probability of excessive delay will increase " +"significantly. Recommended ratio and corresponding width and height before " +"super score: width*height" +msgstr "" +"寬、高與512差距過大,則出圖效果不佳、延遲過長概率顯著增加。超分前建議比例及對" +"應寬高:width*height" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:23 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:29 +msgid "Universal female voice" +msgstr "通用女聲" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:25 +msgid "Supernatural timbre-ZiZi 2.0" +msgstr "超自然音色-梓梓2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:26 +msgid "Supernatural timbre-ZiZi" +msgstr "超自然音色-梓梓" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:27 +msgid "Supernatural sound-Ranran 2.0" +msgstr "超自然音色-燃燃2.0" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:28 +msgid "Supernatural sound-Ranran" +msgstr "超自然音色-燃燃" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:30 +msgid "Universal male voice" +msgstr "通用男聲" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py:33 +msgid "[0.2,3], the default is 1, usually one decimal place is enough" +msgstr "[0.2,3],默認為1,通常保留一位小數即可" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:39 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:44 +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:88 +msgid "" +"The user goes to the model inference page of Volcano Ark to create an " +"inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call " +"it." +msgstr "" +"用戶前往火山方舟的模型推理頁面創建推理接入點,這裡需要輸入ep-xxxxxxxxxx-yyyy" +"進行調用" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:59 +msgid "Universal 2.0-Vincent Diagram" +msgstr "通用2.0-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:64 +msgid "Universal 2.0Pro-Vincent Chart" +msgstr "通用2.0Pro-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:69 +msgid "Universal 1.4-Vincent Chart" +msgstr "通用1.4-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:74 +msgid "Animation 1.3.0-Vincent Picture" +msgstr "動漫1.3.0-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:79 +msgid "Animation 1.3.1-Vincent Picture" +msgstr "動漫1.3.1-文生圖" + +#: community/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py:113 +msgid "volcano engine" +msgstr "火山引擎" + + +#: models_provider/impl/wenxin_model_provider/credential/llm.py:51 +#, python-brace-format +msgid "{model_name} The model does not support" +msgstr "{model_name} 模型不支持" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:24 +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:53 +msgid "" +"ERNIE-Bot-4 is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot-4是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問答、" +"內容創作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:27 +msgid "" +"ERNIE-Bot is a large language model independently developed by Baidu. It " +"covers massive Chinese data and has stronger capabilities in dialogue Q&A, " +"content creation and generation." +msgstr "" +"ERNIE-Bot是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問答、內" +"容創作生成等能力。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:30 +msgid "" +"ERNIE-Bot-turbo is a large language model independently developed by Baidu. " +"It covers massive Chinese data, has stronger capabilities in dialogue Q&A, " +"content creation and generation, and has a faster response speed." +msgstr "" +"ERNIE-Bot-turbo是百度自行研發的大語言模型,覆蓋海量中文數據,具有更強的對話問" +"答、內容創作生成等能力,響應速度更快。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:33 +msgid "" +"BLOOMZ-7B is a well-known large language model in the industry. It was " +"developed and open sourced by BigScience and can output text in 46 languages " +"and 13 programming languages." +msgstr "" +"BLOOMZ-7B是業內知名的大語言模型,由BigScience研發並開源,能夠以46種語言和13種" +"程式語言輸出文本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:39 +msgid "" +"Llama-2-13b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning and knowledge application. " +"Llama-2-13b-chat is a native open source version with balanced performance " +"and effect, suitable for conversation scenarios." +msgstr "" +"Llama-2-13b-chat由Meta AI研發並開源,在編碼、推理及知識應用等場景表現優秀," +"Llama-2-13b-chat是性能與效果均衡的原生開源版本,適用於對話場景。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:42 +msgid "" +"Llama-2-70b-chat was developed by Meta AI and is open source. It performs " +"well in scenarios such as coding, reasoning, and knowledge application. " +"Llama-2-70b-chat is a native open source version with high-precision effects." +msgstr "" +"Llama-2-70b-chat由Meta AI研發並開源,在編碼、推理及知識應用等場景表現優秀," +"Llama-2-70b-chat是高精度效果的原生開源版本。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:45 +msgid "" +"The Chinese enhanced version developed by the Qianfan team based on " +"Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-" +"EVAL." +msgstr "" +"千帆團隊在Llama-2-7b基礎上的中文增強版本,在CMMLU、C-EVAL等中文知識庫上表現優" +"異。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:49 +msgid "" +"Embedding-V1 is a text representation model based on Baidu Wenxin large " +"model technology. It can convert text into a vector form represented by " +"numerical values and can be used in text retrieval, information " +"recommendation, knowledge mining and other scenarios. Embedding-V1 provides " +"the Embeddings interface, which can generate corresponding vector " +"representations based on input content. You can call this interface to input " +"text into the model and obtain the corresponding vector representation for " +"subsequent text processing and analysis." +msgstr "" +"Embedding-V1是一個基於百度文心大模型技術的文本表示模型,可以將文本轉化為用數" +"值表示的向量形式,用於文本檢索、信息推薦、知識挖掘等場景。 Embedding-V1提供了" +"Embeddings接口,可以根據輸入內容生成對應的向量表示。您可以通過調用該接口,將" +"文本輸入到模型中,獲取到對應的向量表示,從而進行後續的文本處理和分析。" + +#: community/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py:66 +msgid "Thousand sails large model" +msgstr "千帆大模型" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/image.py:42 +msgid "Please outline this picture" +msgstr "請描述這張圖片" + + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:15 +msgid "Speaker" +msgstr "發音人" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:16 +msgid "" +"Speaker, optional value: Please go to the console to add a trial or purchase " +"speaker. After adding, the speaker parameter value will be displayed." +msgstr "" +"發音人,可選值:請到控制臺添加試用或購買發音人,添加後即顯示發音人參數值" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:21 +msgid "iFlytek Xiaoyan" +msgstr "訊飛小燕" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:22 +msgid "iFlytek Xujiu" +msgstr "訊飛許久" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:23 +msgid "iFlytek Xiaoping" +msgstr "訊飛小萍" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:24 +msgid "iFlytek Xiaojing" +msgstr "訊飛小婧" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:25 +msgid "iFlytek Xuxiaobao" +msgstr "訊飛許小寶" + +#: community/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py:28 +msgid "Speech speed, optional value: [0-100], default is 50" +msgstr "語速,可選值:[0-100],默認為50" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:39 +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:50 +msgid "Chinese and English recognition" +msgstr "中英文識別" + +#: community/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py:66 +msgid "iFlytek Spark" +msgstr "訊飛星火" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:15 +msgid "" +"The image generation endpoint allows you to create raw images based on text " +"prompts. The dimensions of the image can be 1024x1024, 1024x1792, or " +"1792x1024 pixels." +msgstr "" +"圖像生成端點允許您根據文本提示創建原始圖像。圖像的尺寸可以為 1024x1024、" +"1024x1792 或 1792x1024 像素。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:29 +msgid "" +"By default, images are generated in standard quality, you can set quality: " +"\"hd\" to enhance detail. Square, standard quality images are generated " +"fastest." +msgstr "" +"默認情況下,圖像以標準質量生成,您可以設置質量:「hd」以增強細節。方形、標準質" +"量的圖像生成速度最快。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py:42 +msgid "" +"You can request 1 image at a time (requesting more images by making parallel " +"requests), or up to 10 images at a time using the n parameter." +msgstr "" +"您可以一次請求 1 個圖像(通過發出並行請求來請求更多圖像),或者使用 n 參數一" +"次最多請求 10 個圖像。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:20 +msgid "Chinese female" +msgstr "中文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:21 +msgid "Chinese male" +msgstr "中文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:22 +msgid "Japanese male" +msgstr "日語男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:23 +msgid "Cantonese female" +msgstr "粵語女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:24 +msgid "English female" +msgstr "英文女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:25 +msgid "English male" +msgstr "英文男" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py:26 +msgid "Korean female" +msgstr "韓語女" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:37 +msgid "" +"Code Llama is a language model specifically designed for code generation." +msgstr "Code Llama 是一個專門用於代碼生成的語言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:44 +msgid "" +" \n" +"Code Llama Instruct is a fine-tuned version of Code Llama's instructions, " +"designed to perform specific tasks.\n" +" " +msgstr "" +"Code Llama Instruct 是 Code Llama 的指令微調版本,專為執行特定任務而設計。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:53 +msgid "" +"Code Llama Python is a language model specifically designed for Python code " +"generation." +msgstr "Code Llama Python 是一個專門用於 Python 代碼生成的語言模型。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:60 +msgid "" +"CodeQwen 1.5 is a language model for code generation with high performance." +msgstr "CodeQwen 1.5 是一個用於代碼生成的語言模型,具有較高的性能。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:67 +msgid "CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5." +msgstr "CodeQwen 1.5 Chat 是一個聊天模型版本的 CodeQwen 1.5。" + +#: community/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py:74 +msgid "Deepseek is a large-scale language model with 13 billion parameters." +msgstr "Deepseek Chat 是一個聊天模型版本的 Deepseek。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py:16 +msgid "" +"Image size, only cogview-3-plus supports this parameter. Optional range: " +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the " +"default is 1024x1024." +msgstr "" +"圖片尺寸,僅 cogview-3-plus 支持該參數。可選範圍:" +"[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默認是" +"1024x1024。" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:34 +msgid "" +"Have strong multi-modal understanding capabilities. Able to understand up to " +"five images simultaneously and supports video content understanding" +msgstr "具有強大的多模態理解能力。能夠同時理解多達五張圖像,並支持視頻內容理解" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:37 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis" +msgstr "專注於單圖理解。適用於需要高效圖像解析的場景" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:40 +msgid "" +"Focus on single picture understanding. Suitable for scenarios requiring " +"efficient image analysis (free)" +msgstr "專注於單圖理解。適用於需要高效圖像解析的場景(免費)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:46 +msgid "" +"Quickly and accurately generate images based on user text descriptions. " +"Resolution supports 1024x1024" +msgstr "根據用戶文字描述快速、精準生成圖像。解析度支持1024x1024" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:49 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes" +msgstr "根據用戶文字描述生成高質量圖像,支持多圖片尺寸" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:52 +msgid "" +"Generate high-quality images based on user text descriptions, supporting " +"multiple image sizes (free)" +msgstr "根據用戶文字描述生成高質量圖像,支持多圖片尺寸(免費)" + +#: community/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py:75 +msgid "zhipu AI" +msgstr "智譜 AI" + +#: models_provider/serializers/model_serializer.py:43 +#: models_provider/serializers/model_serializer.py:222 +#: models_provider/serializers/model_serializer.py:259 +#: models_provider/serializers/model_serializer.py:323 +msgid "base model" +msgstr "基礎模型" + +#: models_provider/serializers/model_serializer.py:44 +#: models_provider/serializers/model_serializer.py:260 +msgid "parameter configuration" +msgstr "參數配置" + +#: models_provider/serializers/model_serializer.py:45 +#: models_provider/serializers/model_serializer.py:225 +#: models_provider/serializers/model_serializer.py:261 +#, fuzzy +#| msgid "Get current user information" +msgid "certification information" +msgstr "獲取當前用戶信息" + +#: models_provider/serializers/model_serializer.py:108 +#: models_provider/serializers/model_serializer.py:215 +#: models_provider/serializers/model_serializer.py:255 +#: modules/serializers/module.py:35 tools/serializers/tool.py:52 +msgid "user id" +msgstr "用戶ID" + +#: models_provider/serializers/model_serializer.py:116 +#: models_provider/serializers/model_serializer.py:132 +#: models_provider/serializers/model_serializer.py:151 +#: models_provider/serializers/model_serializer.py:178 +#: models_provider/serializers/model_serializer.py:371 +#: models_provider/tools.py:111 +msgid "Model does not exist" +msgstr "模型不存在" + +#: models_provider/serializers/model_serializer.py:233 +#: models_provider/serializers/model_serializer.py:272 +#, python-brace-format +msgid "base model【{model_name}】already exists" +msgstr "模型【{model_name}】已存在" + +#: models_provider/serializers/model_serializer.py:312 +msgid "Model saving failed" +msgstr "模型保存失敗" + +#: models_provider/serializers/model_serializer.py:325 +msgid "create user" +msgstr "創建者" + +#: models_provider/views/model.py:28 models_provider/views/model.py:29 +msgid "Create model" +msgstr "創建模型" + +#: models_provider/views/model.py:30 models_provider/views/model.py:57 +#: models_provider/views/model.py:74 models_provider/views/model.py:85 +#: models_provider/views/model.py:96 models_provider/views/model.py:110 +#: models_provider/views/model.py:121 models_provider/views/model.py:137 +#: models_provider/views/model.py:150 models_provider/views/provide.py:24 +#: models_provider/views/provide.py:47 models_provider/views/provide.py:61 +#: models_provider/views/provide.py:79 models_provider/views/provide.py:96 +msgid "Model" +msgstr "模型" + +#: models_provider/views/model.py:53 models_provider/views/model.py:54 +msgid "Query model list" +msgstr "查詢模型列表" + +#: models_provider/views/model.py:69 models_provider/views/model.py:70 +msgid "Update model" +msgstr "更新模型" + +#: models_provider/views/model.py:82 models_provider/views/model.py:83 +msgid "Delete model" +msgstr "刪除模型" + +#: models_provider/views/model.py:92 models_provider/views/model.py:93 +msgid "Query model details" +msgstr "查詢模型詳情" + +#: models_provider/views/model.py:106 models_provider/views/model.py:107 +msgid "Get model parameter form" +msgstr "獲取模型參數表單" + +#: models_provider/views/model.py:117 models_provider/views/model.py:118 +msgid "Save model parameter form" +msgstr "保存模型參數表單" + +#: models_provider/views/model.py:132 models_provider/views/model.py:134 +msgid "" +"Query model meta information, this interface does not carry authentication " +"information" +msgstr "查詢模型元信息,該接口不攜帶認證信息" + +#: models_provider/views/model.py:147 models_provider/views/model.py:148 +msgid "Pause model download" +msgstr "下載模型暫停" + +#: models_provider/views/provide.py:21 models_provider/views/provide.py:22 +msgid "Get a list of model suppliers" +msgstr "獲取模型供應商列表" + +#: models_provider/views/provide.py:43 models_provider/views/provide.py:44 +msgid "Get a list of model types" +msgstr "獲取模型類型列表" + +#: models_provider/views/provide.py:57 models_provider/views/provide.py:58 +msgid "Example of obtaining model list" +msgstr "獲取模型列表示例" + +#: models_provider/views/provide.py:75 +msgid "Get model default parameters" +msgstr "獲取模型默認參數" + +#: models_provider/views/provide.py:76 models_provider/views/provide.py:92 +#: models_provider/views/provide.py:93 +msgid "Get the model creation form" +msgstr "獲取模型創建表單" + +#: modules/models/module.py:6 modules/models/module.py:13 +#: modules/serializers/module.py:29 +msgid "module name" +msgstr "模塊名稱" + +#: modules/models/module.py:9 modules/serializers/module.py:32 +msgid "parent id" +msgstr "父級 ID" + +#: modules/serializers/module.py:28 modules/serializers/module.py:62 +msgid "module id" +msgstr "模塊 ID" + +#: modules/serializers/module.py:30 +msgid "module user id" +msgstr "模塊用戶 ID" + +#: modules/serializers/module.py:36 modules/serializers/module.py:64 +#: modules/serializers/module.py:96 tools/serializers/tool.py:27 +msgid "source" +msgstr "來源" + +#: modules/serializers/module.py:49 +msgid "Module name already exists" +msgstr "模塊名稱已存在" + +#: modules/serializers/module.py:70 +msgid "Module does not exist" +msgstr "模塊不存在" + +#: modules/serializers/module.py:89 +msgid "Cannot delete root module" +msgstr "無法刪除根模塊" + +#: modules/views/module.py:19 modules/views/module.py:20 +msgid "Create module" +msgstr "創建模塊" + +#: modules/views/module.py:24 modules/views/module.py:43 +#: modules/views/module.py:56 modules/views/module.py:68 +#: modules/views/module.py:85 +msgid "Module" +msgstr "模塊" + +#: modules/views/module.py:38 modules/views/module.py:39 +msgid "Update module" +msgstr "更新模塊" + +#: modules/views/module.py:52 modules/views/module.py:53 +msgid "Get module" +msgstr "獲取模塊" + +#: modules/views/module.py:65 modules/views/module.py:66 +msgid "Delete module" +msgstr "刪除模塊" + +#: modules/views/module.py:81 modules/views/module.py:82 +msgid "Get module tree" +msgstr "獲取模塊樹" + +#: tools/serializers/tool.py:21 +msgid "variable name" +msgstr "變量名稱" + +#: tools/serializers/tool.py:23 +msgid "type" +msgstr "類型" + +#: tools/serializers/tool.py:25 +msgid "fields only support string|int|dict|array|float" +msgstr "欄位僅支持字符串|整數|字典|數組|浮點數" + +#: tools/serializers/tool.py:29 +msgid "The field only supports custom|reference" +msgstr "欄位僅支持自定義|引用" + +#: tools/serializers/tool.py:34 +msgid "tool name" +msgstr "工具名稱" + +#: tools/serializers/tool.py:37 +msgid "tool description" +msgstr "工具描述" + +#: tools/serializers/tool.py:39 +msgid "tool content" +msgstr "工具內容" + +#: tools/serializers/tool.py:41 +msgid "input field list" +msgstr "輸入欄位列表" + +#: tools/serializers/tool.py:43 +msgid "init field list" +msgstr "內置欄位列表" + +#: tools/serializers/tool.py:45 +msgid "Is active" +msgstr "是否啟用" + +#: tools/views/tool.py:18 tools/views/tool.py:19 +msgid "Create tool" +msgstr "創建工具" + +#: tools/views/tool.py:22 +msgid "Tool" +msgstr "工具" + +#: users/serializers/login.py:27 msgid "Username" msgstr "用戶名" -#: .\apps\users\serializers\login.py:24 +#: users/serializers/login.py:28 msgid "Password" msgstr "密碼" -#: .\apps\users\serializers\login.py:31 -msgid "token" -msgstr "權杖" - -#: .\apps\users\serializers\login.py:43 -msgid "The username or password is incorrect" -msgstr "用戶名或密碼不正確" - -#: .\apps\users\serializers\login.py:45 -msgid "The user has been disabled, please contact the administrator!" -msgstr "用戶已被禁用,請聯系管理員!" - -#: .\apps\users\views\login.py:21 .\apps\users\views\login.py:22 -msgid "Log in" -msgstr "登入" - -#: .\apps\users\views\login.py:23 .\apps\users\views\user.py:26 -msgid "User management" -msgstr "用戶管理" - -#: .\apps\users\views\user.py:24 .\apps\users\views\user.py:25 -msgid "Get current user information" -msgstr "獲取當前用戶資訊" - -msgid "Get captcha" -msgstr "獲取驗證碼" - +#: users/serializers/login.py:29 users/serializers/login.py:69 msgid "captcha" msgstr "驗證碼" +#: users/serializers/login.py:36 +msgid "token" +msgstr "令牌" + +#: users/serializers/login.py:50 msgid "Captcha code error or expiration" -msgstr "驗證碼錯誤或過期" \ No newline at end of file +msgstr "驗證碼錯誤或過期" + +#: users/serializers/login.py:53 +msgid "The username or password is incorrect" +msgstr "用戶名或密碼不正確" + +#: users/serializers/login.py:55 +msgid "The user has been disabled, please contact the administrator!" +msgstr "用戶已被禁用,請聯繫管理員!" + +#: users/views/login.py:21 users/views/login.py:22 +msgid "Log in" +msgstr "登錄" + +#: users/views/login.py:23 users/views/login.py:34 users/views/user.py:28 +#: users/views/user.py:40 users/views/user.py:53 +msgid "User management" +msgstr "用戶管理" + +#: users/views/login.py:32 users/views/login.py:33 +msgid "Get captcha" +msgstr "獲取驗證碼" + +#: users/views/user.py:26 users/views/user.py:27 users/views/user.py:38 +msgid "Get current user information" +msgstr "獲取當前用戶信息" + +#~ msgid "ADMIN" +#~ msgstr "管理員" + +#~ msgid "Super administrator" +#~ msgstr "超級管理員" diff --git a/apps/models_provider/api/model.py b/apps/models_provider/api/model.py index 870324a09..363a82b32 100644 --- a/apps/models_provider/api/model.py +++ b/apps/models_provider/api/model.py @@ -1,8 +1,12 @@ # coding=utf-8 +from drf_spectacular.types import OpenApiTypes +from drf_spectacular.utils import OpenApiParameter +from rest_framework import serializers from common.mixins.api_mixin import APIMixin from common.result import ResultSerializer -from models_provider.serializers.model import ModelCreateRequest, ModelModelSerializer +from models_provider.serializers.model_serializer import ModelModelSerializer, ModelCreateRequest +from django.utils.translation import gettext_lazy as _ class ModelCreateResponse(ResultSerializer): @@ -10,6 +14,12 @@ class ModelCreateResponse(ResultSerializer): return ModelModelSerializer() +class ModelListResponse(APIMixin): + @staticmethod + def get_response(): + return serializers.ListSerializer(child=ModelModelSerializer()) + + class ModelCreateAPI(APIMixin): @staticmethod def get_request(): @@ -18,3 +28,47 @@ class ModelCreateAPI(APIMixin): @staticmethod def get_response(): return ModelCreateResponse + + @classmethod + def get_query_params_api(cls): + return [OpenApiParameter( + name="workspace_id", + description=_("workspace id"), + type=OpenApiTypes.STR, + location=OpenApiParameter.PATH, + required=True, + )] + + +class GetModelApi(APIMixin): + + @staticmethod + def get_query_params_api(): + return [OpenApiParameter( + name="workspace_id", + description=_("workspace id"), + type=OpenApiTypes.STR, + location=OpenApiParameter.PATH, + required=True, + ), OpenApiParameter( + name="model_id", + description=_("model id"), + type=OpenApiTypes.STR, + location=OpenApiParameter.PATH, + required=True, + ) + ] + + @staticmethod + def get_response(): + return ModelModelSerializer + + +class ModelEditApi(APIMixin): + @staticmethod + def get_request(): + return ModelCreateRequest + + @staticmethod + def get_response(): + return ModelModelSerializer diff --git a/apps/models_provider/api/provide.py b/apps/models_provider/api/provide.py index 5b036386d..8c2ddcc2c 100644 --- a/apps/models_provider/api/provide.py +++ b/apps/models_provider/api/provide.py @@ -30,29 +30,65 @@ class ModelListSerializer(serializers.Serializer): desc = serializers.CharField(required=True, label=_("model name")) +class ModelParamsFormSerializer(serializers.Serializer): + input_type = serializers.CharField(required=False, label=_("input type")) + label = serializers.CharField(required=False, label=_("label")) + text_field = serializers.CharField(required=False, label=_("text field")) + value_field = serializers.CharField(required=False, label=_("value field")) + provider = serializers.CharField(required=False, label=_("provider")) + method = serializers.CharField(required=False, label=_("method")) + required = serializers.BooleanField(required=False, label=_("required")) + default_value = serializers.CharField(required=False, label=_("default value")) + relation_show_field_dict = serializers.DictField(required=False, label=_("relation show field dict")) + relation_trigger_field_dict = serializers.DictField(required=False, label=_("relation trigger field dict")) + trigger_type = serializers.CharField(required=False, label=_("trigger type")) + attrs = serializers.DictField(required=False, label=_("attrs")) + props_info = serializers.DictField(required=False, label=_("props info")) + + class ProvideApi(APIMixin): + class ModelParamsForm(APIMixin): + @staticmethod + def get_query_params_api(): + return [OpenApiParameter( + name="model_type", + description=_("model type"), + type=OpenApiTypes.STR, + location=OpenApiParameter.QUERY, + required=True, + ), OpenApiParameter( + name="provider", + description=_("provider"), + type=OpenApiTypes.STR, + location=OpenApiParameter.QUERY, + required=True, + ), OpenApiParameter( + name="model_name", + description=_("model name"), + type=OpenApiTypes.STR, + location=OpenApiParameter.QUERY, + required=True, + ) + ] + + @staticmethod + def get_response(): + return serializers.ListSerializer(child=ModelParamsFormSerializer()) + class ModelList(APIMixin): @staticmethod def get_query_params_api(): return [OpenApiParameter( - # 参数的名称是done name="model_type", - # 对参数的备注 - description="model_type", - # 指定参数的类型 + description=_("model type"), type=OpenApiTypes.STR, location=OpenApiParameter.QUERY, - # 指定必须给 - required=False, + required=True, ), OpenApiParameter( - # 参数的名称是done name="provider", - # 对参数的备注 - description="provider", - # 指定参数的类型 + description=_("provider"), type=OpenApiTypes.STR, location=OpenApiParameter.QUERY, - # 指定必须给 required=True, ) ] @@ -72,7 +108,7 @@ class ProvideApi(APIMixin): # 参数的名称是done name="provider", # 对参数的备注 - description="provider", + description=_("provider"), # 指定参数的类型 type=OpenApiTypes.STR, location=OpenApiParameter.QUERY, diff --git a/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py b/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py index 409833b05..686081c03 100644 --- a/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py +++ b/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py @@ -15,7 +15,7 @@ class BaiLianLLMModelParams(BaseForm): temperature = forms.SliderField( TooltipLabel( _('Temperature'), - _('Higher values make the output more random, while lower values make it more focused and deterministic.') + _('Higher values make the output more random, while lower values make it more focused and deterministic') ), required=True, default_value=0.7, diff --git a/apps/models_provider/serializers/model.py b/apps/models_provider/serializers/model.py deleted file mode 100644 index 2a6b10778..000000000 --- a/apps/models_provider/serializers/model.py +++ /dev/null @@ -1,181 +0,0 @@ -# -*- coding: utf-8 -*- -import json -import threading -import time -from typing import Dict - -import uuid_utils.compat as uuid -from django.db.models import QuerySet -from django.utils.translation import gettext_lazy as _ -from rest_framework import serializers - -from common.exception.app_exception import AppApiException -from common.utils.rsa_util import rsa_long_encrypt, rsa_long_decrypt -from models_provider.base_model_provider import ValidCode, DownModelChunkStatus -from models_provider.constants.model_provider_constants import ModelProvideConstants -from models_provider.models import Model, Status - - -class ModelModelSerializer(serializers.ModelSerializer): - class Meta: - model = Model - fields = [ - 'id', 'name', 'status', 'model_type', 'model_name', - 'user', 'provider', 'credential', 'meta', - 'model_params_form', 'workspace_id' - ] - - -class ModelCreateRequest(serializers.Serializer): - name = serializers.CharField(required=True, max_length=64, label=_("model name")) - provider = serializers.CharField(required=True, label=_("provider")) - model_type = serializers.CharField(required=True, label=_("model type")) - model_name = serializers.CharField(required=True, label=_("model name")) - model_params_form = serializers.ListField(required=False, default=list, label=_("parameter configuration")) - credential = serializers.DictField(required=True, label=_("certification information")) - - -class ModelPullManage: - @staticmethod - def pull(model: Model, credential: Dict): - try: - response = ModelProvideConstants[model.provider].value.down_model( - model.model_type, model.model_name, credential - ) - down_model_chunk = {} - last_update_time = time.time() - - for chunk in response: - down_model_chunk[chunk.digest] = chunk.to_dict() - if time.time() - last_update_time > 5: - current_model = QuerySet(Model).filter(id=model.id).first() - if current_model and current_model.status == Status.PAUSE_DOWNLOAD: - return - QuerySet(Model).filter(id=model.id).update( - meta={"down_model_chunk": list(down_model_chunk.values())} - ) - last_update_time = time.time() - - status = Status.ERROR - message = "" - for chunk in down_model_chunk.values(): - if chunk.get('status') == DownModelChunkStatus.success.value: - status = Status.SUCCESS - elif chunk.get('status') == DownModelChunkStatus.error.value: - message = chunk.get("digest") - - QuerySet(Model).filter(id=model.id).update( - meta={"down_model_chunk": [], "message": message}, - status=status - ) - except Exception as e: - QuerySet(Model).filter(id=model.id).update( - meta={"down_model_chunk": [], "message": str(e)}, - status=Status.ERROR - ) - - -class ModelSerializer(serializers.Serializer): - @staticmethod - def model_to_dict(model: Model): - credential = json.loads(rsa_long_decrypt(model.credential)) - return { - 'id': str(model.id), - 'provider': model.provider, - 'name': model.name, - 'model_type': model.model_type, - 'model_name': model.model_name, - 'status': model.status, - 'meta': model.meta, - 'credential': ModelProvideConstants[model.provider].value.get_model_credential( - model.model_type, model.model_name - ).encryption_dict(credential), - 'workspace_id': model.workspace_id - } - - class Operate(serializers.Serializer): - id = serializers.UUIDField(required=True, label=_("模型id")) - user_id = serializers.UUIDField(required=True, label=_("user id")) - - def is_valid(self, *, raise_exception=False): - super().is_valid(raise_exception=True) - model = QuerySet(Model).filter( - id=self.data.get("id"), user_id=self.data.get("user_id") - ).first() - if model is None: - raise AppApiException(500, _('模型不存在')) - - def one(self, with_valid=False): - if with_valid: - self.is_valid(raise_exception=True) - model = QuerySet(Model).get( - id=self.data.get('id'), user_id=self.data.get('user_id') - ) - return ModelSerializer.model_to_dict(model) - - class Create(serializers.Serializer): - user_id = serializers.UUIDField(required=True, label=_('user id')) - name = serializers.CharField(required=True, max_length=64, label=_("model name")) - provider = serializers.CharField(required=True, label=_("provider")) - model_type = serializers.CharField(required=True, label=_("model type")) - model_name = serializers.CharField(required=True, label=_("model name")) - model_params_form = serializers.ListField(required=False, default=list, label=_("parameter configuration")) - credential = serializers.DictField(required=True, label=_("certification information")) - workspace_id = serializers.CharField(required=False, label=_("workspace id"), max_length=128) - - def is_valid(self, *, raise_exception=False): - super().is_valid(raise_exception=True) - if QuerySet(Model).filter( - user_id=self.data.get('user_id'), - name=self.data.get('name'), - workspace_id=self.data.get('workspace_id') - ).exists(): - raise AppApiException( - 500, - _('Model name【{model_name}】already exists').format(model_name=self.data.get("name")) - ) - default_params = {item['field']: item['default_value'] for item in self.data.get('model_params_form')} - ModelProvideConstants[self.data.get('provider')].value.is_valid_credential( - self.data.get('model_type'), - self.data.get('model_name'), - self.data.get('credential'), - default_params, - raise_exception=True - ) - - def insert(self, workspace_id, with_valid=True): - status = Status.SUCCESS - if with_valid: - try: - self.is_valid(raise_exception=True) - except AppApiException as e: - if e.code == ValidCode.model_not_fount: - status = Status.DOWNLOAD - else: - raise e - - credential = self.data.get('credential') - model_data = { - 'id': uuid.uuid1(), - 'status': status, - 'user_id': self.data.get('user_id'), - 'name': self.data.get('name'), - 'credential': rsa_long_encrypt(json.dumps(credential)), - 'provider': self.data.get('provider'), - 'model_type': self.data.get('model_type'), - 'model_name': self.data.get('model_name'), - 'model_params_form': self.data.get('model_params_form'), - 'workspace_id': workspace_id - } - model = Model(**model_data) - try: - model.save() - except Exception as save_error: - # 可添加日志记录 - raise AppApiException(500, _('模型保存失败')) from save_error - - if status == Status.DOWNLOAD: - thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential)) - thread.start() - - return ModelModelSerializer(model).data diff --git a/apps/models_provider/serializers/model_serializer.py b/apps/models_provider/serializers/model_serializer.py new file mode 100644 index 000000000..13b1f20e0 --- /dev/null +++ b/apps/models_provider/serializers/model_serializer.py @@ -0,0 +1,389 @@ +# -*- coding: utf-8 -*- +import json +import threading +import time +from typing import Dict + +import uuid_utils.compat as uuid +from django.db.models import QuerySet +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from common.config.embedding_config import ModelManage +from common.exception.app_exception import AppApiException +from common.utils.rsa_util import rsa_long_encrypt, rsa_long_decrypt +from models_provider.base_model_provider import ValidCode, DownModelChunkStatus +from models_provider.constants.model_provider_constants import ModelProvideConstants +from models_provider.models import Model, Status +from models_provider.tools import get_model_credential + + +def get_default_model_params_setting(provider, model_type, model_name): + credential = get_model_credential(provider, model_type, model_name) + setting_form = credential.get_model_params_setting_form(model_name) + if setting_form is not None: + return setting_form.to_form_list() + return [] + + +class ModelModelSerializer(serializers.ModelSerializer): + class Meta: + model = Model + fields = [ + 'id', 'name', 'status', 'model_type', 'model_name', + 'user', 'provider', 'credential', 'meta', + 'model_params_form', 'workspace_id' + ] + + +class ModelCreateRequest(serializers.Serializer): + name = serializers.CharField(required=True, max_length=64, label=_("model name")) + provider = serializers.CharField(required=True, label=_("provider")) + model_type = serializers.CharField(required=True, label=_("model type")) + model_name = serializers.CharField(required=True, label=_("base model")) + model_params_form = serializers.ListField(required=False, default=list, label=_("parameter configuration")) + credential = serializers.DictField(required=True, label=_("certification information")) + + +class ModelPullManage: + @staticmethod + def pull(model: Model, credential: Dict): + try: + response = ModelProvideConstants[model.provider].value.down_model( + model.model_type, model.model_name, credential + ) + down_model_chunk = {} + last_update_time = time.time() + + for chunk in response: + down_model_chunk[chunk.digest] = chunk.to_dict() + if time.time() - last_update_time > 5: + current_model = QuerySet(Model).filter(id=model.id).first() + if current_model and current_model.status == Status.PAUSE_DOWNLOAD: + return + QuerySet(Model).filter(id=model.id).update( + meta={"down_model_chunk": list(down_model_chunk.values())} + ) + last_update_time = time.time() + + status = Status.ERROR + message = "" + for chunk in down_model_chunk.values(): + if chunk.get('status') == DownModelChunkStatus.success.value: + status = Status.SUCCESS + elif chunk.get('status') == DownModelChunkStatus.error.value: + message = chunk.get("digest") + + QuerySet(Model).filter(id=model.id).update( + meta={"down_model_chunk": [], "message": message}, + status=status + ) + except Exception as e: + QuerySet(Model).filter(id=model.id).update( + meta={"down_model_chunk": [], "message": str(e)}, + status=Status.ERROR + ) + + +class ModelSerializer(serializers.Serializer): + @staticmethod + def model_to_dict(model: Model): + credential = json.loads(rsa_long_decrypt(model.credential)) + return { + 'id': str(model.id), + 'provider': model.provider, + 'name': model.name, + 'model_type': model.model_type, + 'model_name': model.model_name, + 'status': model.status, + 'meta': model.meta, + 'credential': ModelProvideConstants[model.provider].value.get_model_credential( + model.model_type, model.model_name + ).encryption_dict(credential), + 'workspace_id': model.workspace_id + } + + class Operate(serializers.Serializer): + id = serializers.UUIDField(required=True, label=_("model id")) + user_id = serializers.UUIDField(required=True, label=_("user id")) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + model = QuerySet(Model).filter( + id=self.data.get("id") + ).first() + if model is None: + raise AppApiException(500, _('Model does not exist')) + + def one(self, with_valid=False): + if with_valid: + self.is_valid(raise_exception=True) + model = QuerySet(Model).get( + id=self.data.get('id') + ) + return ModelSerializer.model_to_dict(model) + + def one_meta(self, with_valid=False): + model = None + if with_valid: + super().is_valid(raise_exception=True) + model = QuerySet(Model).filter(id=self.data.get("id")).first() + if model is None: + raise AppApiException(500, _('Model does not exist')) + return {'id': str(model.id), 'provider': model.provider, 'name': model.name, 'model_type': model.model_type, + 'model_name': model.model_name, + 'status': model.status, + 'meta': model.meta + } + + def pause_download(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + QuerySet(Model).filter(id=self.data.get('id')).update(status=Status.PAUSE_DOWNLOAD) + return True + + def delete(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + model_id = self.data.get('id') + model = Model.objects.filter(id=model_id).first() + if not model: + raise AppApiException(500, _("Model does not exist")) + # TODO : 这里可以添加模型删除的逻辑,需要注意删除模型时的权限和关联关系 + # if model.model_type == 'LLM': + # application_count = Application.objects.filter(model_id=model_id).count() + # if application_count > 0: + # raise AppApiException(500, f"该模型关联了{application_count} 个应用,无法删除该模型。") + # elif model.model_type == 'EMBEDDING': + # dataset_count = DataSet.objects.filter(embedding_mode_id=model_id).count() + # if dataset_count > 0: + # raise AppApiException(500, f"该模型关联了{dataset_count} 个知识库,无法删除该模型。") + # elif model.model_type == 'TTS': + # dataset_count = Application.objects.filter(tts_model_id=model_id).count() + # if dataset_count > 0: + # raise AppApiException(500, f"该模型关联了{dataset_count} 个应用,无法删除该模型。") + # elif model.model_type == 'STT': + # dataset_count = Application.objects.filter(stt_model_id=model_id).count() + # if dataset_count > 0: + # raise AppApiException(500, f"该模型关联了{dataset_count} 个应用,无法删除该模型。") + model.delete() + return True + + def edit(self, instance: Dict, user_id: str, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + model = QuerySet(Model).filter(id=self.data.get('id')).first() + + if model is None: + raise AppApiException(500, _('Model does not exist')) + else: + credential, model_credential, provider_handler = ModelSerializer.Edit( + data={**instance}).is_valid( + model=model) + try: + model.status = Status.SUCCESS + default_params = {item['field']: item['default_value'] for item in model.model_params_form} + # 校验模型认证数据 + provider_handler.is_valid_credential(model.model_type, + instance.get("model_name"), + credential, + default_params, + raise_exception=True) + + except AppApiException as e: + if e.code == ValidCode.model_not_fount: + model.status = Status.DOWNLOAD + else: + raise e + update_keys = ['credential', 'name', 'model_type', 'model_name'] + for update_key in update_keys: + if update_key in instance and instance.get(update_key) is not None: + if update_key == 'credential': + model_credential_str = json.dumps(credential) + model.__setattr__(update_key, rsa_long_encrypt(model_credential_str)) + else: + model.__setattr__(update_key, instance.get(update_key)) + + ModelManage.delete_key(str(model.id)) + model.save() + if model.status == Status.DOWNLOAD: + thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential)) + thread.start() + return self.one(with_valid=False) + + class Edit(serializers.Serializer): + user_id = serializers.CharField(required=False, label=(_('user id'))) + + name = serializers.CharField(required=False, max_length=64, + label=(_("model name"))) + + model_type = serializers.CharField(required=False, label=(_("model type"))) + + model_name = serializers.CharField(required=False, label=(_("base model"))) + + credential = serializers.DictField(required=False, + label=(_("certification information"))) + + def is_valid(self, model=None, raise_exception=False): + super().is_valid(raise_exception=True) + filter_params = {'workspace_id': self.data.get('workspace_id')} + if 'name' in self.data and self.data.get('name') is not None: + filter_params['name'] = self.data.get('name') + if QuerySet(Model).exclude(id=model.id).filter(**filter_params).exists(): + raise AppApiException(500, _('base model【{model_name}】already exists').format( + model_name=self.data.get("name"))) + + ModelSerializer.model_to_dict(model) + + provider = model.provider + model_type = self.data.get('model_type') + model_name = self.data.get( + 'model_name') + credential = self.data.get('credential') + provider_handler = ModelProvideConstants[provider].value + model_credential = ModelProvideConstants[provider].value.get_model_credential(model_type, + model_name) + source_model_credential = json.loads(rsa_long_decrypt(model.credential)) + source_encryption_model_credential = model_credential.encryption_dict(source_model_credential) + if credential is not None: + for k in source_encryption_model_credential.keys(): + if k in credential and credential[k] == source_encryption_model_credential[k]: + credential[k] = source_model_credential[k] + return credential, model_credential, provider_handler + + class Create(serializers.Serializer): + user_id = serializers.UUIDField(required=True, label=_('user id')) + name = serializers.CharField(required=True, max_length=64, label=_("model name")) + provider = serializers.CharField(required=True, label=_("provider")) + model_type = serializers.CharField(required=True, label=_("model type")) + model_name = serializers.CharField(required=True, label=_("base model")) + model_params_form = serializers.ListField(required=False, default=list, label=_("parameter configuration")) + credential = serializers.DictField(required=True, label=_("certification information")) + workspace_id = serializers.CharField(required=False, label=_("workspace id"), max_length=128) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + if QuerySet(Model).filter( + name=self.data.get('name'), + workspace_id=self.data.get('workspace_id') + ).exists(): + raise AppApiException( + 500, + _('base model【{model_name}】already exists').format(model_name=self.data.get("name")) + ) + default_params = {item['field']: item['default_value'] for item in self.data.get('model_params_form')} + ModelProvideConstants[self.data.get('provider')].value.is_valid_credential( + self.data.get('model_type'), + self.data.get('model_name'), + self.data.get('credential'), + default_params, + raise_exception=True + ) + + def insert(self, workspace_id, with_valid=True): + status = Status.SUCCESS + if with_valid: + try: + self.is_valid(raise_exception=True) + except AppApiException as e: + if e.code == ValidCode.model_not_fount: + status = Status.DOWNLOAD + else: + raise e + + credential = self.data.get('credential') + model_data = { + 'id': uuid.uuid1(), + 'status': status, + 'user_id': self.data.get('user_id'), + 'name': self.data.get('name'), + 'credential': rsa_long_encrypt(json.dumps(credential)), + 'provider': self.data.get('provider'), + 'model_type': self.data.get('model_type'), + 'model_name': self.data.get('model_name'), + 'model_params_form': self.data.get('model_params_form'), + 'workspace_id': workspace_id + } + model = Model(**model_data) + try: + model.save() + except Exception as save_error: + # 可添加日志记录 + raise AppApiException(500, _("Model saving failed")) from save_error + + if status == Status.DOWNLOAD: + thread = threading.Thread(target=ModelPullManage.pull, args=(model, credential)) + thread.start() + + return ModelModelSerializer(model).data + + class Query(serializers.Serializer): + name = serializers.CharField(required=False, max_length=64, label=_('model name')) + model_type = serializers.CharField(required=False, label=_('model type')) + model_name = serializers.CharField(required=False, label=_('base model')) + provider = serializers.CharField(required=False, label=_('provider')) + create_user = serializers.CharField(required=False, label=_('create user')) + workspace_id = serializers.CharField(required=False, label=_('workspace id')) + + def list(self, with_valid): + if with_valid: + self.is_valid(raise_exception=True) + + query_params = self._build_query_params() + return self._fetch_models(query_params) + + def _build_query_params(self): + query_params = {} + for field in ['name', 'model_type', 'model_name', 'provider', 'create_user', 'workspace_id']: + value = self.data.get(field) + if value is not None: + if field == 'name': + query_params[f'{field}__icontains'] = value + elif field == 'create_user': + query_params['user_id'] = value + else: + query_params[field] = value + return query_params + + def _fetch_models(self, query_params): + return [ + { + 'id': str(model.id), + 'provider': model.provider, + 'name': model.name, + 'model_type': model.model_type, + 'model_name': model.model_name, + 'status': model.status, + 'meta': model.meta, + 'user_id': model.user_id, + 'username': model.user.username + } + for model in Model.objects.filter(**query_params).order_by("-create_time") + ] + + class ModelParams(serializers.Serializer): + id = serializers.UUIDField(required=True, label=_('model id')) + + def is_valid(self, *, raise_exception=False): + super().is_valid(raise_exception=True) + model = QuerySet(Model).filter(id=self.data.get("id")).first() + if model is None: + raise AppApiException(500, _("Model does not exist")) + + def get_model_params(self, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + model_id = self.data.get('id') + model = QuerySet(Model).filter(id=model_id).first() + return model.model_params_form + + def save_model_params_form(self, model_params_form, with_valid=True): + if with_valid: + self.is_valid(raise_exception=True) + if model_params_form is None: + model_params_form = [] + model_id = self.data.get('id') + model = QuerySet(Model).filter(id=model_id).first() + model.model_params_form = model_params_form + model.save() + return True diff --git a/apps/models_provider/tools.py b/apps/models_provider/tools.py index 08f5780c6..7d58aac76 100644 --- a/apps/models_provider/tools.py +++ b/apps/models_provider/tools.py @@ -109,8 +109,6 @@ def get_model_by_id(_id, user_id): connection.close() if model is None: raise Exception(_('Model does not exist')) - if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id): - raise Exception(_('No permission to use this model') + f"{model.name}") return model diff --git a/apps/models_provider/urls.py b/apps/models_provider/urls.py index 7560b87cd..2f8be7ea5 100644 --- a/apps/models_provider/urls.py +++ b/apps/models_provider/urls.py @@ -4,18 +4,19 @@ from . import views app_name = "models_provider" urlpatterns = [ - # path('provider//', views.Provide.Exec.as_view(), name='provide_exec'), path('provider', views.Provide.as_view(), name='provide'), path('provider/model_type_list', views.Provide.ModelTypeList.as_view(), name="provider/model_type_list"), path('provider/model_list', views.Provide.ModelList.as_view(), name="provider/model_name_list"), - # path('provider/model_params_form', views.Provide.ModelParamsForm.as_view(), - # name="provider/model_params_form"), - # path('provider/model_form', views.Provide.ModelForm.as_view(), - # name="provider/model_form"), + path('provider/model_params_form', views.Provide.ModelParamsForm.as_view(), + name="provider/model_params_form"), + path('provider/model_form', views.Provide.ModelForm.as_view(), + name="provider/model_form"), path('workspace//model', views.Model.as_view(), name='model'), - # path('workspace//model//model_params_form', views.Model.ModelParamsForm.as_view(), - # name='model/model_params_form'), - # path('workspace//model/', views.Model.Operate.as_view(), name='model/operate'), - # path('workspace//model//pause_download', views.Model.PauseDownload.as_view(), name='model/operate'), - # path('workspace//model//meta', views.Model.ModelMeta.as_view(), name='model/operate/meta'), + path('workspace//model//model_params_form', views.Model.ModelParamsForm.as_view(), + name='model/model_params_form'), + path('workspace//model/', views.Model.Operate.as_view(), name='model/operate'), + path('workspace//model//pause_download', views.Model.PauseDownload.as_view(), + name='model/operate'), + path('workspace//model//meta', views.Model.ModelMeta.as_view(), + name='model/operate/meta'), ] diff --git a/apps/models_provider/views/model.py b/apps/models_provider/views/model.py index 539f4ed9f..b3ea3fd9d 100644 --- a/apps/models_provider/views/model.py +++ b/apps/models_provider/views/model.py @@ -15,8 +15,10 @@ from common.auth import TokenAuth from common.auth.authentication import has_permissions from common.constants.permission_constants import PermissionConstants from common.result import result -from models_provider.api.model import ModelCreateAPI -from models_provider.serializers.model import ModelSerializer +from common.utils.common import query_params_to_single_dict +from models_provider.api.model import ModelCreateAPI, GetModelApi, ModelEditApi, ModelListResponse +from models_provider.api.provide import ProvideApi +from models_provider.serializers.model_serializer import ModelSerializer class Model(APIView): @@ -26,10 +28,127 @@ class Model(APIView): description=_("Create model"), operation_id=_("Create model"), tags=[_("Model")], + parameters=ModelCreateAPI.get_query_params_api(), request=ModelCreateAPI.get_request(), responses=ModelCreateAPI.get_response()) - @has_permissions(PermissionConstants.MODEL_CREATE) + @has_permissions(PermissionConstants.MODEL_CREATE.get_workspace_permission()) def post(self, request: Request, workspace_id: str): return result.success( ModelSerializer.Create(data={**request.data, 'user_id': request.user.id}).insert(workspace_id, with_valid=True)) + + # @extend_schema(methods=['PUT'], + # description=_('Update model'), + # operation_id=_('Update model'), + # request=ModelEditApi.get_request(), + # responses=ModelCreateApi.get_response(), + # tags=[_('Model')]) + # @has_permissions(PermissionConstants.MODEL_CREATE) + # def put(self, request: Request): + # return result.success( + # ModelSerializer.Create(data={**request.data, 'user_id': str(request.user.id)}).insert(request.user.id, + # with_valid=True)) + + @extend_schema(methods=['GET'], + description=_('Query model list'), + operation_id=_('Query model list'), + parameters=ModelCreateAPI.get_query_params_api(), + responses=ModelListResponse.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ.get_workspace_permission()) + def get(self, request: Request): + return result.success( + ModelSerializer.Query( + data={**query_params_to_single_dict(request.query_params)}).list( + with_valid=True)) + + class Operate(APIView): + authentication_classes = [TokenAuth] + + @extend_schema(methods=['PUT'], + description=_('Update model'), + operation_id=_('Update model'), + request=ModelEditApi.get_request(), + parameters=GetModelApi.get_query_params_api(), + responses=ModelEditApi.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_EDIT.get_workspace_permission()) + def put(self, request: Request, workspace_id, model_id: str): + return result.success( + ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).edit(request.data, + str(request.user.id))) + + @extend_schema(methods=['DELETE'], + description=_('Delete model'), + operation_id=_('Delete model'), + parameters=GetModelApi.get_query_params_api(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_DELETE.get_workspace_permission()) + def delete(self, request: Request, workspace_id: str, model_id: str): + return result.success( + ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).delete()) + + @extend_schema(methods=['GET'], + description=_('Query model details'), + operation_id=_('Query model details'), + parameters=GetModelApi.get_query_params_api(), + responses=GetModelApi.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ.get_workspace_permission()) + def get(self, request: Request, workspace_id: str, model_id: str): + return result.success( + ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).one(with_valid=True)) + + class ModelParamsForm(APIView): + authentication_classes = [TokenAuth] + + @extend_schema(methods=['GET'], + description=_('Get model parameter form'), + operation_id=_('Get model parameter form'), + parameters=GetModelApi.get_query_params_api(), + responses=ProvideApi.ModelParamsForm.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ.get_workspace_permission()) + def get(self, request: Request, workspace_id: str, model_id: str): + return result.success( + ModelSerializer.ModelParams(data={'id': model_id}).get_model_params()) + + @extend_schema(methods=['PUT'], + description=_('Save model parameter form'), + operation_id=_('Save model parameter form'), + parameters=GetModelApi.get_query_params_api(), + responses=ProvideApi.ModelParamsForm.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ.get_workspace_permission()) + def put(self, request: Request, workspace_id: str, model_id: str): + return result.success( + ModelSerializer.ModelParams(data={'id': model_id}).save_model_params_form(request.data)) + + class ModelMeta(APIView): + authentication_classes = [TokenAuth] + + @extend_schema(methods=['GET'], + description=_( + 'Query model meta information, this interface does not carry authentication information'), + operation_id=_( + 'Query model meta information, this interface does not carry authentication information'), + parameters=GetModelApi.get_query_params_api(), + responses=GetModelApi.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ.get_workspace_permission()) + def get(self, request: Request, workspace_id: str, model_id: str): + return result.success( + ModelSerializer.Operate(data={'id': model_id}).one_meta(with_valid=True)) + + class PauseDownload(APIView): + authentication_classes = [TokenAuth] + + @extend_schema(methods=['PUT'], + description=_('Pause model download'), + operation_id=_('Pause model download'), + parameters=GetModelApi.get_query_params_api(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_CREATE.get_workspace_permission()) + def put(self, request: Request, workspace_id: str, model_id: str): + return result.success( + ModelSerializer.Operate(data={'id': model_id}).pause_download()) diff --git a/apps/models_provider/views/provide.py b/apps/models_provider/views/provide.py index 6b142bacb..8f6e232ee 100644 --- a/apps/models_provider/views/provide.py +++ b/apps/models_provider/views/provide.py @@ -11,6 +11,7 @@ from common.auth.authentication import has_permissions from common.constants.permission_constants import PermissionConstants from models_provider.api.provide import ProvideApi from models_provider.constants.model_provider_constants import ModelProvideConstants +from models_provider.serializers.model_serializer import get_default_model_params_setting class Provide(APIView): @@ -66,3 +67,37 @@ class Provide(APIView): return result.success( ModelProvideConstants[provider].value.get_model_list( model_type)) + + class ModelParamsForm(APIView): + authentication_classes = [TokenAuth] + + @extend_schema(methods=['GET'], + description=_('Get model default parameters'), + operation_id=_('Get the model creation form'), + parameters=ProvideApi.ModelParamsForm.get_query_params_api(), + responses=ProvideApi.ModelParamsForm.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ) + def get(self, request: Request): + provider = request.query_params.get('provider') + model_type = request.query_params.get('model_type') + model_name = request.query_params.get('model_name') + + return result.success(get_default_model_params_setting(provider, model_type, model_name)) + + class ModelForm(APIView): + authentication_classes = [TokenAuth] + + @extend_schema(methods=['GET'], + description=_('Get the model creation form'), + operation_id=_('Get the model creation form'), + parameters=ProvideApi.ModelParamsForm.get_query_params_api(), + responses=ProvideApi.ModelParamsForm.get_response(), + tags=[_('Model')]) + @has_permissions(PermissionConstants.MODEL_READ) + def get(self, request: Request): + provider = request.query_params.get('provider') + model_type = request.query_params.get('model_type') + model_name = request.query_params.get('model_name') + return result.success( + ModelProvideConstants[provider].value.get_model_credential(model_type, model_name).to_form_list())