From a28de6feafdd1d3b97cde958971ebc27b891970d Mon Sep 17 00:00:00 2001 From: shaohuzhang1 <80892890+shaohuzhang1@users.noreply.github.com> Date: Mon, 13 Jan 2025 11:15:51 +0800 Subject: [PATCH] feat: i18n (#2011) --- .../serializers/application_serializers.py | 9 +- apps/common/auth/authenticate.py | 14 +- apps/common/auth/authentication.py | 4 +- .../auth/handle/impl/application_key.py | 5 +- .../auth/handle/impl/public_access_token.py | 10 +- apps/common/auth/handle/impl/user_token.py | 4 +- .../constants/exception_code_constants.py | 24 +- apps/common/constants/permission_constants.py | 10 +- apps/common/event/__init__.py | 6 +- apps/common/event/listener_manage.py | 36 +- apps/common/field/common.py | 8 +- apps/common/forms/base_field.py | 3 +- apps/common/forms/slider_field.py | 11 +- apps/common/handle/handle_exception.py | 4 +- apps/common/handle/impl/doc_split_handle.py | 1 + .../handle/impl/qa/zip_parse_qa_handle.py | 4 +- apps/common/handle/impl/zip_split_handle.py | 4 +- apps/common/init/init_doc.py | 5 +- apps/common/job/clean_chat_job.py | 9 +- apps/common/job/clean_debug_file_job.py | 5 +- apps/common/job/client_access_num_job.py | 5 +- apps/common/mixins/app_model_mixin.py | 6 +- apps/common/response/result.py | 55 +-- apps/common/swagger_api/common_api.py | 98 ++-- apps/common/util/common.py | 6 +- apps/common/util/field_message.py | 97 ++-- apps/embedding/task/embedding.py | 23 +- .../serializers/function_lib_serializer.py | 67 +-- .../serializers/py_lint_serializer.py | 3 +- .../swagger_api/function_lib_api.py | 128 +++--- apps/function_lib/swagger_api/py_lint_api.py | 4 +- apps/function_lib/views/function_lib_views.py | 43 +- apps/function_lib/views/py_lint.py | 7 +- apps/locales/en_US/LC_MESSAGES/django.po | 26 ++ apps/locales/zh_CN/LC_MESSAGES/django.po | 26 ++ apps/locales/zh_Hant/LC_MESSAGES/django.po | 26 ++ .../models_provider/base_model_provider.py | 22 +- .../aliyun_bai_lian_model_provider.py | 23 +- .../credential/embedding.py | 10 +- .../credential/image.py | 18 +- .../credential/llm.py | 20 +- .../credential/reranker.py | 12 +- .../credential/stt.py | 10 +- .../credential/tti.py | 40 +- .../credential/tts.py | 44 +- .../model/tti.py | 4 +- .../model/tts.py | 4 +- .../aws_bedrock_model_provider.py | 36 +- .../credential/embedding.py | 9 +- .../credential/llm.py | 23 +- .../azure_model_provider.py | 5 +- .../credential/embedding.py | 23 +- .../azure_model_provider/credential/image.py | 26 +- .../azure_model_provider/credential/llm.py | 24 +- .../azure_model_provider/credential/stt.py | 13 +- .../azure_model_provider/credential/tti.py | 23 +- .../azure_model_provider/credential/tts.py | 15 +- .../deepseek_model_provider/credential/llm.py | 18 +- .../deepseek_model_provider.py | 6 +- .../credential/embedding.py | 10 +- .../gemini_model_provider/credential/image.py | 17 +- .../gemini_model_provider/credential/llm.py | 14 +- .../gemini_model_provider/credential/stt.py | 8 +- .../gemini_model_provider.py | 14 +- .../impl/gemini_model_provider/model/stt.py | 10 +- .../kimi_model_provider/credential/llm.py | 15 +- .../credential/embedding.py | 11 +- .../credential/reranker.py | 11 +- .../local_model_provider.py | 7 +- .../credential/embedding.py | 13 +- .../ollama_model_provider/credential/image.py | 17 +- .../ollama_model_provider/credential/llm.py | 15 +- .../ollama_model_provider.py | 57 ++- .../credential/embedding.py | 11 +- .../openai_model_provider/credential/image.py | 15 +- .../openai_model_provider/credential/llm.py | 15 +- .../openai_model_provider/credential/stt.py | 10 +- .../openai_model_provider/credential/tti.py | 18 +- .../openai_model_provider/credential/tts.py | 11 +- .../openai_model_provider.py | 33 +- .../qwen_model_provider/credential/image.py | 14 +- .../qwen_model_provider/credential/llm.py | 14 +- .../qwen_model_provider/credential/tti.py | 35 +- .../impl/qwen_model_provider/model/tti.py | 4 +- .../qwen_model_provider.py | 5 +- .../credential/embedding.py | 8 +- .../credential/image.py | 14 +- .../tencent_model_provider/credential/llm.py | 12 +- .../tencent_model_provider/credential/tti.py | 60 +-- .../impl/tencent_model_provider/model/tti.py | 4 +- .../tencent_model_provider.py | 30 +- .../vllm_model_provider/credential/llm.py | 17 +- .../vllm_model_provider.py | 9 +- .../credential/embedding.py | 11 +- .../credential/image.py | 15 +- .../credential/llm.py | 13 +- .../credential/stt.py | 9 +- .../credential/tti.py | 11 +- .../credential/tts.py | 35 +- .../model/tts.py | 3 +- .../volcanic_engine_model_provider.py | 19 +- .../credential/embedding.py | 7 +- .../wenxin_model_provider/credential/llm.py | 15 +- .../wenxin_model_provider.py | 23 +- .../xf_model_provider/credential/embedding.py | 10 +- .../xf_model_provider/credential/image.py | 12 +- .../impl/xf_model_provider/credential/llm.py | 20 +- .../impl/xf_model_provider/credential/stt.py | 9 +- .../impl/xf_model_provider/credential/tts.py | 23 +- .../impl/xf_model_provider/model/tts.py | 3 +- .../xf_model_provider/xf_model_provider.py | 7 +- .../credential/embedding.py | 13 +- .../credential/image.py | 22 +- .../credential/llm.py | 25 +- .../credential/reranker.py | 11 +- .../credential/stt.py | 9 +- .../credential/tti.py | 15 +- .../credential/tts.py | 25 +- .../xinference_model_provider/model/tts.py | 3 +- .../xinference_model_provider.py | 122 ++--- .../zhipu_model_provider/credential/image.py | 13 +- .../zhipu_model_provider/credential/llm.py | 13 +- .../zhipu_model_provider/credential/tti.py | 11 +- .../impl/zhipu_model_provider/model/tti.py | 3 +- .../zhipu_model_provider.py | 15 +- apps/setting/models_provider/tools.py | 5 +- .../serializers/model_apply_serializers.py | 16 +- .../serializers/provider_serializers.py | 76 ++-- apps/setting/serializers/system_setting.py | 17 +- apps/setting/serializers/team_serializers.py | 81 ++-- apps/setting/serializers/valid_serializers.py | 14 +- apps/setting/swagger_api/provide_api.py | 98 ++-- apps/setting/swagger_api/system_setting.py | 65 +-- apps/setting/swagger_api/valid_api.py | 5 +- apps/setting/views/Team.py | 37 +- apps/setting/views/model.py | 97 ++-- apps/setting/views/model_apply.py | 19 +- apps/setting/views/system_setting.py | 19 +- apps/setting/views/valid.py | 5 +- apps/smartdoc/conf.py | 1 + apps/smartdoc/settings/base.py | 25 +- apps/users/serializers/user_serializers.py | 295 ++++++------ apps/users/views/user.py | 122 ++--- .../component/chat-input-operate/index.vue | 4 +- ui/src/components/app-table/index.vue | 4 +- .../dynamics-form/items/JsonInput.vue | 2 +- .../items/complex/ArrayObjectCard.vue | 2 +- .../generate-related-dialog/index.vue | 80 +--- ui/src/components/index.ts | 2 + .../components/markdown/MdEditorMagnify.vue | 4 +- ui/src/components/model-select/index.vue | 156 +++++++ ui/src/enums/model.ts | 15 +- ui/src/layout/components/breadcrumb/index.vue | 3 +- .../top-bar/avatar/APIKeyDialog.vue | 45 +- .../top-bar/avatar/ResetPassword.vue | 4 +- .../top-bar/avatar/SettingAPIKeyDialog.vue | 106 ----- ui/src/layout/components/top-bar/index.vue | 5 +- ui/src/locales/lang/en_US/common.ts | 44 ++ ui/src/locales/lang/en_US/index.ts | 118 ++--- ui/src/locales/lang/en_US/layout.ts | 8 +- .../lang/en_US/views/application-overview.ts | 55 +-- .../locales/lang/en_US/views/application.ts | 208 ++++++--- ui/src/locales/lang/en_US/views/index.ts | 4 +- ui/src/locales/lang/en_US/views/login.ts | 4 + ui/src/locales/lang/en_US/views/system.ts | 135 ++++++ ui/src/locales/lang/zh_CN/common.ts | 44 ++ ui/src/locales/lang/zh_CN/index.ts | 110 ++--- ui/src/locales/lang/zh_CN/layout.ts | 5 +- .../lang/zh_CN/views/application-overview.ts | 55 +-- .../lang/zh_CN/views/application-workflow.ts | 3 + .../locales/lang/zh_CN/views/application.ts | 201 ++++++--- ui/src/locales/lang/zh_CN/views/dataset.ts | 67 +++ .../locales/lang/zh_CN/views/function-lib.ts | 82 ++++ ui/src/locales/lang/zh_CN/views/index.ts | 14 +- ui/src/locales/lang/zh_CN/views/login.ts | 4 + ui/src/locales/lang/zh_CN/views/system.ts | 133 ++++++ ui/src/locales/lang/zh_CN/views/team.ts | 31 ++ ui/src/locales/lang/zh_CN/views/template.ts | 31 ++ ui/src/locales/lang/zh_CN/views/user.ts | 67 +++ ui/src/locales/lang/zh_TW/common.ts | 44 ++ ui/src/locales/lang/zh_TW/components/index.ts | 1 + ui/src/locales/lang/zh_TW/index.ts | 46 ++ ui/src/locales/lang/zh_TW/layout.ts | 36 ++ ui/src/locales/lang/zh_TW/views/404.ts | 5 + .../lang/zh_TW/views/application-overview.ts | 111 +++++ .../locales/lang/zh_TW/views/application.ts | 191 ++++++++ ui/src/locales/lang/zh_TW/views/index.ts | 10 + ui/src/locales/lang/zh_TW/views/login.ts | 4 + ui/src/locales/lang/zh_TW/views/system.ts | 134 ++++++ ui/src/router/modules/function-lib.ts | 2 +- ui/src/styles/app.scss | 16 - ui/src/utils/clipboard.ts | 5 +- ui/src/utils/message.ts | 7 +- ui/src/utils/theme.ts | 14 +- .../component/APIKeyDialog.vue | 39 +- .../component/DisplaySettingDialog.vue | 17 +- .../component/EditAvatarDialog.vue | 4 +- .../component/LimitDialog.vue | 12 +- .../component/SettingAPIKeyDialog.vue | 22 +- .../component/XPackDisplaySettingDialog.vue | 160 +++++-- ui/src/views/application-overview/index.vue | 10 +- .../component/PublishHistory.vue | 2 +- ui/src/views/application-workflow/index.vue | 6 +- .../views/application/ApplicationAccess.vue | 25 +- .../views/application/ApplicationSetting.vue | 419 ++++++------------ .../component/AIModeParamSettingDialog.vue | 6 +- .../component/AccessSettingDrawer.vue | 203 +++++++-- .../component/AddDatasetDialog.vue | 20 +- .../component/CopyApplicationDialog.vue | 18 +- .../component/CreateApplicationDialog.vue | 50 ++- .../component/ParamSettingDialog.vue | 58 +-- .../component/TTSModeParamSettingDialog.vue | 11 +- ui/src/views/application/index.vue | 62 +-- ui/src/views/authentication/component/CAS.vue | 41 +- .../authentication/component/EditModal.vue | 66 ++- .../views/authentication/component/LDAP.vue | 92 +++- .../views/authentication/component/OAuth2.vue | 111 +++-- .../views/authentication/component/OIDC.vue | 91 ++-- .../views/authentication/component/SCAN.vue | 37 +- ui/src/views/authentication/index.vue | 12 +- ui/src/views/chat/auth/component/password.vue | 4 +- ui/src/views/dataset/DatasetSetting.vue | 60 ++- .../views/dataset/UploadDocumentDataset.vue | 25 +- ui/src/views/dataset/component/BaseForm.vue | 120 ++--- .../dataset/component/CreateDatasetDialog.vue | 56 ++- .../dataset/component/EditParagraphDialog.vue | 4 +- .../views/dataset/component/ParagraphList.vue | 2 +- .../views/dataset/component/SyncWebDialog.vue | 6 +- ui/src/views/dataset/index.vue | 63 +-- .../component/ImportDocumentDialog.vue | 6 +- .../component/SelectDatasetDialog.vue | 4 +- ui/src/views/document/index.vue | 30 +- ui/src/views/email/index.vue | 2 +- .../component/FieldFormDialog.vue | 44 +- .../component/FunctionDebugDrawer.vue | 39 +- .../component/FunctionFormDrawer.vue | 88 ++-- ui/src/views/function-lib/index.vue | 41 +- ui/src/views/hit-test/index.vue | 4 +- .../views/log/component/EditContentDialog.vue | 4 +- ui/src/views/log/component/EditMarkDialog.vue | 6 +- ui/src/views/log/index.vue | 20 +- ui/src/views/login/index.vue | 2 +- .../paragraph/component/ParagraphDialog.vue | 8 +- .../component/SelectDocumentDialog.vue | 2 +- ui/src/views/paragraph/index.vue | 16 +- .../problem/component/CreateProblemDialog.vue | 6 +- .../problem/component/RelateProblemDialog.vue | 6 +- ui/src/views/problem/index.vue | 12 +- .../team/component/CreateMemberDialog.vue | 18 +- .../team/component/PermissionSetting.vue | 24 +- ui/src/views/team/index.vue | 30 +- .../template/component/AddParamDrawer.vue | 4 +- .../template/component/CreateModelDialog.vue | 66 +-- ui/src/views/template/component/EditModel.vue | 8 +- ui/src/views/template/component/ModelCard.vue | 30 +- .../template/component/ParamSettingDialog.vue | 10 +- .../component/SelectProviderDialog.vue | 23 +- ui/src/views/template/component/data.ts | 10 + ui/src/views/template/index.vue | 43 +- ui/src/views/theme/index.vue | 138 ++++-- .../user-manage/component/UserDialog.vue | 82 ++-- .../user-manage/component/UserPwdDialog.vue | 30 +- ui/src/views/user-manage/index.vue | 68 +-- ui/src/workflow/common/AddFormCollect.vue | 4 +- ui/src/workflow/common/EditFormCollect.vue | 6 +- ui/src/workflow/common/NodeContainer.vue | 10 +- ui/src/workflow/common/data.ts | 2 +- ui/src/workflow/common/shortcut.ts | 4 +- ui/src/workflow/nodes/ai-chat-node/index.vue | 102 +---- .../component/ApiFieldFormDialog.vue | 4 +- .../component/ApiInputFieldTable.vue | 27 +- .../component/FileUploadSettingDialog.vue | 6 +- .../component/UserFieldFormDialog.vue | 4 +- .../component/UserInputFieldTable.vue | 6 +- ui/src/workflow/nodes/base-node/index.vue | 141 +----- ui/src/workflow/nodes/form-node/index.vue | 6 +- ui/src/workflow/nodes/function-node/index.vue | 8 +- .../workflow/nodes/image-generate/index.vue | 97 +--- .../workflow/nodes/image-understand/index.vue | 75 +--- ui/src/workflow/nodes/question-node/index.vue | 103 +---- .../reranker-node/ParamSettingDialog.vue | 8 +- ui/src/workflow/nodes/reranker-node/index.vue | 100 +---- .../nodes/speech-to-text-node/index.vue | 70 +-- .../nodes/text-to-speech-node/index.vue | 75 +--- 284 files changed, 5456 insertions(+), 4065 deletions(-) create mode 100644 apps/locales/en_US/LC_MESSAGES/django.po create mode 100644 apps/locales/zh_CN/LC_MESSAGES/django.po create mode 100644 apps/locales/zh_Hant/LC_MESSAGES/django.po create mode 100644 ui/src/components/model-select/index.vue delete mode 100644 ui/src/layout/components/top-bar/avatar/SettingAPIKeyDialog.vue create mode 100644 ui/src/locales/lang/en_US/common.ts create mode 100644 ui/src/locales/lang/en_US/views/login.ts create mode 100644 ui/src/locales/lang/en_US/views/system.ts create mode 100644 ui/src/locales/lang/zh_CN/common.ts create mode 100644 ui/src/locales/lang/zh_CN/views/application-workflow.ts create mode 100644 ui/src/locales/lang/zh_CN/views/dataset.ts create mode 100644 ui/src/locales/lang/zh_CN/views/function-lib.ts create mode 100644 ui/src/locales/lang/zh_CN/views/login.ts create mode 100644 ui/src/locales/lang/zh_CN/views/system.ts create mode 100644 ui/src/locales/lang/zh_CN/views/team.ts create mode 100644 ui/src/locales/lang/zh_CN/views/template.ts create mode 100644 ui/src/locales/lang/zh_CN/views/user.ts create mode 100644 ui/src/locales/lang/zh_TW/common.ts create mode 100644 ui/src/locales/lang/zh_TW/components/index.ts create mode 100644 ui/src/locales/lang/zh_TW/index.ts create mode 100644 ui/src/locales/lang/zh_TW/layout.ts create mode 100644 ui/src/locales/lang/zh_TW/views/404.ts create mode 100644 ui/src/locales/lang/zh_TW/views/application-overview.ts create mode 100644 ui/src/locales/lang/zh_TW/views/application.ts create mode 100644 ui/src/locales/lang/zh_TW/views/index.ts create mode 100644 ui/src/locales/lang/zh_TW/views/login.ts create mode 100644 ui/src/locales/lang/zh_TW/views/system.ts create mode 100644 ui/src/views/template/component/data.ts diff --git a/apps/application/serializers/application_serializers.py b/apps/application/serializers/application_serializers.py index 00ce499b7..cbd3b105a 100644 --- a/apps/application/serializers/application_serializers.py +++ b/apps/application/serializers/application_serializers.py @@ -15,7 +15,6 @@ import re import uuid from functools import reduce from typing import Dict, List - from django.contrib.postgres.fields import ArrayField from django.core import cache, validators from django.core import signing @@ -54,8 +53,7 @@ from setting.models_provider.tools import get_model_instance_by_model_user_id from setting.serializers.provider_serializers import ModelSerializer from smartdoc.conf import PROJECT_DIR from users.models import User -from django.db.models import Value -from django.db.models.fields.json import KeyTextTransform +from django.utils.translation import gettext_lazy as _ chat_cache = cache.caches['chat_cache'] @@ -194,10 +192,11 @@ def get_base_node_work_flow(work_flow): class ApplicationSerializer(serializers.Serializer): - name = serializers.CharField(required=True, max_length=64, min_length=1, error_messages=ErrMessage.char("应用名称")) + name = serializers.CharField(required=True, max_length=64, min_length=1, + error_messages=ErrMessage.char(_("application name"))) desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=256, min_length=1, - error_messages=ErrMessage.char("应用描述")) + error_messages=ErrMessage.char(_("application describe"))) model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True, error_messages=ErrMessage.char("模型")) dialogue_number = serializers.IntegerField(required=True, diff --git a/apps/common/auth/authenticate.py b/apps/common/auth/authenticate.py index 3d54d47e8..6eddb76b4 100644 --- a/apps/common/auth/authenticate.py +++ b/apps/common/auth/authenticate.py @@ -16,7 +16,7 @@ from rest_framework.authentication import TokenAuthentication from common.exception.app_exception import AppAuthenticationFailed, AppEmbedIdentityFailed, AppChatNumOutOfBoundsFailed, \ ChatException, AppApiException - +from django.utils.translation import gettext_lazy as _ token_cache = cache.caches['token_cache'] @@ -59,19 +59,19 @@ class OpenAIKeyAuth(TokenAuthentication): auth = auth.replace('Bearer ', '') # 未认证 if auth is None: - raise AppAuthenticationFailed(1003, '未登录,请先登录') + raise AppAuthenticationFailed(1003, _('Not logged in, please log in first')) try: token_details = TokenDetails(auth) for handle in handles: if handle.support(request, auth, token_details.get_token_details): return handle.handle(request, auth, token_details.get_token_details) - raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) except Exception as e: traceback.format_exc() if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed) or isinstance(e, AppApiException): raise e - raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) class TokenAuth(TokenAuthentication): @@ -80,16 +80,16 @@ class TokenAuth(TokenAuthentication): auth = request.META.get('HTTP_AUTHORIZATION') # 未认证 if auth is None: - raise AppAuthenticationFailed(1003, '未登录,请先登录') + raise AppAuthenticationFailed(1003, _('Not logged in, please log in first')) try: token_details = TokenDetails(auth) for handle in handles: if handle.support(request, auth, token_details.get_token_details): return handle.handle(request, auth, token_details.get_token_details) - raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) except Exception as e: traceback.format_exc() if isinstance(e, AppEmbedIdentityFailed) or isinstance(e, AppChatNumOutOfBoundsFailed) or isinstance(e, AppApiException): raise e - raise AppAuthenticationFailed(1002, "身份验证信息不正确!非法用户") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect! illegal user')) diff --git a/apps/common/auth/authentication.py b/apps/common/auth/authentication.py index d692d6182..e11c9d552 100644 --- a/apps/common/auth/authentication.py +++ b/apps/common/auth/authentication.py @@ -11,7 +11,7 @@ from typing import List from common.constants.permission_constants import ViewPermission, CompareConstants, RoleConstants, PermissionConstants, \ Permission from common.exception.app_exception import AppUnauthorizedFailed - +from django.utils.translation import gettext_lazy as _ def exist_permissions_by_permission_constants(user_permission: List[PermissionConstants], permission_list: List[PermissionConstants]): @@ -91,7 +91,7 @@ def has_permissions(*permission, compare=CompareConstants.OR): # 判断是否有权限 if any(exit_list) if compare == CompareConstants.OR else all(exit_list): return func(view, request, **kwargs) - raise AppUnauthorizedFailed(403, "没有权限访问") + raise AppUnauthorizedFailed(403, _('No permission to access')) return run diff --git a/apps/common/auth/handle/impl/application_key.py b/apps/common/auth/handle/impl/application_key.py index b35ef80fc..bddcfd43a 100644 --- a/apps/common/auth/handle/impl/application_key.py +++ b/apps/common/auth/handle/impl/application_key.py @@ -13,15 +13,16 @@ from common.auth.handle.auth_base_handle import AuthBaseHandle from common.constants.authentication_type import AuthenticationType from common.constants.permission_constants import Permission, Group, Operate, RoleConstants, Auth from common.exception.app_exception import AppAuthenticationFailed +from django.utils.translation import gettext_lazy as _ class ApplicationKey(AuthBaseHandle): def handle(self, request, token: str, get_token_details): application_api_key = QuerySet(ApplicationApiKey).filter(secret_key=token).first() if application_api_key is None: - raise AppAuthenticationFailed(500, "secret_key 无效") + raise AppAuthenticationFailed(500, _('Secret key is invalid')) if not application_api_key.is_active: - raise AppAuthenticationFailed(500, "secret_key 无效") + raise AppAuthenticationFailed(500, _('Secret key is invalid')) permission_list = [Permission(group=Group.APPLICATION, operate=Operate.USE, dynamic_tag=str( diff --git a/apps/common/auth/handle/impl/public_access_token.py b/apps/common/auth/handle/impl/public_access_token.py index 2b44a9ac9..fdcff4021 100644 --- a/apps/common/auth/handle/impl/public_access_token.py +++ b/apps/common/auth/handle/impl/public_access_token.py @@ -15,7 +15,7 @@ from common.constants.permission_constants import RoleConstants, Permission, Gro from common.exception.app_exception import AppAuthenticationFailed, ChatException from common.models.db_model_manage import DBModelManage from common.util.common import password_encrypt - +from django.utils.translation import gettext_lazy as _ class PublicAccessToken(AuthBaseHandle): def support(self, request, token: str, get_token_details): @@ -45,13 +45,13 @@ class PublicAccessToken(AuthBaseHandle): if application_setting.authentication_value.get('type') != authentication.get( 'type') or password_encrypt( application_setting.authentication_value.get('value')) != authentication.get('value'): - raise ChatException(1002, "身份验证信息不正确") + raise ChatException(1002, _('Authentication information is incorrect')) if application_access_token is None: - raise AppAuthenticationFailed(1002, "身份验证信息不正确") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect')) if not application_access_token.is_active: - raise AppAuthenticationFailed(1002, "身份验证信息不正确") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect')) if not application_access_token.access_token == auth_details.get('access_token'): - raise AppAuthenticationFailed(1002, "身份验证信息不正确") + raise AppAuthenticationFailed(1002, _('Authentication information is incorrect')) return application_access_token.application.user, Auth( role_list=[RoleConstants.APPLICATION_ACCESS_TOKEN], diff --git a/apps/common/auth/handle/impl/user_token.py b/apps/common/auth/handle/impl/user_token.py index 6559797ba..dbb6bd2b5 100644 --- a/apps/common/auth/handle/impl/user_token.py +++ b/apps/common/auth/handle/impl/user_token.py @@ -17,7 +17,7 @@ from users.models import User from django.core import cache from users.models.user import get_user_dynamics_permission - +from django.utils.translation import gettext_lazy as _ token_cache = cache.caches['token_cache'] @@ -31,7 +31,7 @@ class UserToken(AuthBaseHandle): def handle(self, request, token: str, get_token_details): cache_token = token_cache.get(token) if cache_token is None: - raise AppAuthenticationFailed(1002, "登录过期") + raise AppAuthenticationFailed(1002, _('Login expired')) auth_details = get_token_details() user = QuerySet(User).get(id=auth_details['id']) # 续期 diff --git a/apps/common/constants/exception_code_constants.py b/apps/common/constants/exception_code_constants.py index ba7a8105f..821318d23 100644 --- a/apps/common/constants/exception_code_constants.py +++ b/apps/common/constants/exception_code_constants.py @@ -9,6 +9,7 @@ from enum import Enum from common.exception.app_exception import AppApiException +from django.utils.translation import gettext_lazy as _ class ExceptionCodeConstantsValue: @@ -27,13 +28,16 @@ class ExceptionCodeConstantsValue: class ExceptionCodeConstants(Enum): - INCORRECT_USERNAME_AND_PASSWORD = ExceptionCodeConstantsValue(1000, "用户名或者密码不正确") - NOT_AUTHENTICATION = ExceptionCodeConstantsValue(1001, "请先登录,并携带用户Token") - EMAIL_SEND_ERROR = ExceptionCodeConstantsValue(1002, "邮件发送失败") - EMAIL_FORMAT_ERROR = ExceptionCodeConstantsValue(1003, "邮箱格式错误") - EMAIL_IS_EXIST = ExceptionCodeConstantsValue(1004, "邮箱已经被注册,请勿重复注册") - EMAIL_IS_NOT_EXIST = ExceptionCodeConstantsValue(1005, "邮箱尚未注册,请先注册") - CODE_ERROR = ExceptionCodeConstantsValue(1005, "验证码不正确,或者验证码过期") - USERNAME_IS_EXIST = ExceptionCodeConstantsValue(1006, "用户名已被使用,请使用其他用户名") - USERNAME_ERROR = ExceptionCodeConstantsValue(1006, "用户名不能为空,并且长度在6-20") - PASSWORD_NOT_EQ_RE_PASSWORD = ExceptionCodeConstantsValue(1007, "密码与确认密码不一致") + INCORRECT_USERNAME_AND_PASSWORD = ExceptionCodeConstantsValue(1000, _('The username or password is incorrect')) + NOT_AUTHENTICATION = ExceptionCodeConstantsValue(1001, _('Please log in first and bring the user Token')) + EMAIL_SEND_ERROR = ExceptionCodeConstantsValue(1002, _('Email sending failed')) + EMAIL_FORMAT_ERROR = ExceptionCodeConstantsValue(1003, _('Email format error')) + EMAIL_IS_EXIST = ExceptionCodeConstantsValue(1004, _('The email has been registered, please log in directly')) + EMAIL_IS_NOT_EXIST = ExceptionCodeConstantsValue(1005, _('The email is not registered, please register first')) + CODE_ERROR = ExceptionCodeConstantsValue(1005, + _('The verification code is incorrect or the verification code has expired')) + USERNAME_IS_EXIST = ExceptionCodeConstantsValue(1006, _('The username has been registered, please log in directly')) + USERNAME_ERROR = ExceptionCodeConstantsValue(1006, + _('The username cannot be empty and must be between 6 and 20 characters long.')) + PASSWORD_NOT_EQ_RE_PASSWORD = ExceptionCodeConstantsValue(1007, + _('Password and confirmation password are inconsistent')) diff --git a/apps/common/constants/permission_constants.py b/apps/common/constants/permission_constants.py index 04f86bbc7..a5c198da7 100644 --- a/apps/common/constants/permission_constants.py +++ b/apps/common/constants/permission_constants.py @@ -7,7 +7,7 @@ """ from enum import Enum from typing import List - +from django.utils.translation import gettext_lazy as _ class Group(Enum): """ @@ -58,10 +58,10 @@ class Role: class RoleConstants(Enum): - ADMIN = Role("管理员", "管理员,预制目前不会使用", RoleGroup.USER) - USER = Role("用户", "用户所有权限", RoleGroup.USER) - APPLICATION_ACCESS_TOKEN = Role("会话", "只拥有应用会话框接口权限", RoleGroup.APPLICATION_ACCESS_TOKEN), - APPLICATION_KEY = Role("应用私钥", "应用私钥", RoleGroup.APPLICATION_KEY) + ADMIN = Role(_("ADMIN"), _('Admin, prefabs are not currently used'), RoleGroup.USER) + USER = Role(_("USER"), _('All user permissions'), RoleGroup.USER) + APPLICATION_ACCESS_TOKEN = Role(_('chat'), _('Only has application dialog interface permissions'), RoleGroup.APPLICATION_ACCESS_TOKEN), + APPLICATION_KEY = Role(_('Apply private key'), _('Apply private key'), RoleGroup.APPLICATION_KEY) class Permission: diff --git a/apps/common/event/__init__.py b/apps/common/event/__init__.py index fc5a08738..a76500346 100644 --- a/apps/common/event/__init__.py +++ b/apps/common/event/__init__.py @@ -9,7 +9,9 @@ import setting.models from setting.models import Model from .listener_manage import * -from common.db.sql_execute import update_execute +from django.utils.translation import gettext as _ + +from ..db.sql_execute import update_execute update_document_status_sql = """ UPDATE "public"."document" @@ -20,5 +22,5 @@ SET status ="replace"("replace"("replace"(status, '1', '3'), '0', '3'), '4', '3' def run(): # QuerySet(Document).filter(status__in=[Status.embedding, Status.queue_up]).update(**{'status': Status.error}) QuerySet(Model).filter(status=setting.models.Status.DOWNLOAD).update(status=setting.models.Status.ERROR, - meta={'message': "下载程序被中断,请重试"}) + meta={'message': _('The download process was interrupted, please try again')}) update_execute(update_document_status_sql, []) diff --git a/apps/common/event/listener_manage.py b/apps/common/event/listener_manage.py index e0d56e41c..8f3faf29e 100644 --- a/apps/common/event/listener_manage.py +++ b/apps/common/event/listener_manage.py @@ -25,6 +25,7 @@ from common.util.page_utils import page_desc from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping, TaskType, State from embedding.models import SourceType, SearchMode from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ max_kb_error = logging.getLogger(__file__) max_kb = logging.getLogger(__file__) @@ -86,11 +87,12 @@ class ListenerManagement: ListenerManagement.embedding_by_paragraph_data_list(data_list, paragraph_id_list=paragraph_id_list, embedding_model=embedding_model) except Exception as e: - max_kb_error.error(f'查询向量数据:{paragraph_id_list}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Query vector data: {paragraph_id_list} error {error} {traceback}').format( + paragraph_id_list=paragraph_id_list, error=str(e), traceback=traceback.format_exc())) @staticmethod def embedding_by_paragraph_data_list(data_list, paragraph_id_list, embedding_model: Embeddings): - max_kb.info(f'开始--->向量化段落:{paragraph_id_list}') + max_kb.info(_('Start--->Embedding paragraph: {paragraph_id_list}').format(paragraph_id_list=paragraph_id_list)) status = Status.success try: # 删除段落 @@ -102,11 +104,13 @@ class ListenerManagement: # 批量向量化 VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_save_function) except Exception as e: - max_kb_error.error(f'向量化段落:{paragraph_id_list}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Vectorized paragraph: {paragraph_id_list} error {error} {traceback}').format( + paragraph_id_list=paragraph_id_list, error=str(e), traceback=traceback.format_exc())) status = Status.error finally: QuerySet(Paragraph).filter(id__in=paragraph_id_list).update(**{'status': status}) - max_kb.info(f'结束--->向量化段落:{paragraph_id_list}') + max_kb.info( + _('End--->Embedding paragraph: {paragraph_id_list}').format(paragraph_id_list=paragraph_id_list)) @staticmethod def embedding_by_paragraph(paragraph_id, embedding_model: Embeddings): @@ -115,7 +119,7 @@ class ListenerManagement: @param paragraph_id: 段落id @param embedding_model: 向量模型 """ - max_kb.info(f"开始--->向量化段落:{paragraph_id}") + max_kb.info(_('Start--->Embedding paragraph: {paragraph_id}').format(paragraph_id=paragraph_id)) # 更新到开始状态 ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, State.STARTED) try: @@ -140,11 +144,12 @@ class ListenerManagement: ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, State.SUCCESS) except Exception as e: - max_kb_error.error(f'向量化段落:{paragraph_id}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Vectorized paragraph: {paragraph_id} error {error} {traceback}').format( + paragraph_id=paragraph_id, error=str(e), traceback=traceback.format_exc())) ListenerManagement.update_status(QuerySet(Paragraph).filter(id=paragraph_id), TaskType.EMBEDDING, State.FAILURE) finally: - max_kb.info(f'结束--->向量化段落:{paragraph_id}') + max_kb.info(_('End--->Embedding paragraph: {paragraph_id}').format(paragraph_id=paragraph_id)) @staticmethod def embedding_by_data_list(data_list: List, embedding_model: Embeddings): @@ -258,7 +263,8 @@ class ListenerManagement: if is_the_task_interrupted(): return - max_kb.info(f"开始--->向量化文档:{document_id}") + max_kb.info(_('Start--->Embedding document: {document_id}').format(document_id=document_id) + ) # 批量修改状态为PADDING ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING, State.STARTED) @@ -279,11 +285,12 @@ class ListenerManagement: document_id)), is_the_task_interrupted) except Exception as e: - max_kb_error.error(f'向量化文档:{document_id}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Vectorized document: {document_id} error {error} {traceback}').format( + document_id=document_id, error=str(e), traceback=traceback.format_exc())) finally: ListenerManagement.post_update_document_status(document_id, TaskType.EMBEDDING) ListenerManagement.get_aggregation_document_status(document_id)() - max_kb.info(f"结束--->向量化文档:{document_id}") + max_kb.info(_('End--->Embedding document: {document_id}').format(document_id=document_id)) un_lock('embedding' + str(document_id)) @staticmethod @@ -294,17 +301,18 @@ class ListenerManagement: @param embedding_model 向量模型 :return: None """ - max_kb.info(f"开始--->向量化数据集:{dataset_id}") + max_kb.info(_('Start--->Embedding dataset: {dataset_id}').format(dataset_id=dataset_id)) try: ListenerManagement.delete_embedding_by_dataset(dataset_id) document_list = QuerySet(Document).filter(dataset_id=dataset_id) - max_kb.info(f"数据集文档:{[d.name for d in document_list]}") + max_kb.info(_('Start--->Embedding document: {document_list}').format(document_list=document_list)) for document in document_list: ListenerManagement.embedding_by_document(document.id, embedding_model=embedding_model) except Exception as e: - max_kb_error.error(f'向量化数据集:{dataset_id}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error(_('Vectorized dataset: {dataset_id} error {error} {traceback}').format( + dataset_id=dataset_id, error=str(e), traceback=traceback.format_exc())) finally: - max_kb.info(f"结束--->向量化数据集:{dataset_id}") + max_kb.info(_('End--->Embedding dataset: {dataset_id}').format(dataset_id=dataset_id)) @staticmethod def delete_embedding_by_document(document_id): diff --git a/apps/common/field/common.py b/apps/common/field/common.py index 3025ec5d8..61c852d35 100644 --- a/apps/common/field/common.py +++ b/apps/common/field/common.py @@ -7,7 +7,7 @@ @desc: """ from rest_framework import serializers - +from django.utils.translation import gettext_lazy as _ class ObjectField(serializers.Field): def __init__(self, model_type_list, **kwargs): @@ -18,7 +18,7 @@ class ObjectField(serializers.Field): for model_type in self.model_type_list: if isinstance(data, model_type): return data - self.fail('message类型错误', value=data) + self.fail(_('Message type error'), value=data) def to_representation(self, value): return value @@ -31,7 +31,7 @@ class InstanceField(serializers.Field): def to_internal_value(self, data): if not isinstance(data, self.model_type): - self.fail('message类型错误', value=data) + self.fail(_('Message type error'), value=data) return data def to_representation(self, value): @@ -42,7 +42,7 @@ class FunctionField(serializers.Field): def to_internal_value(self, data): if not callable(data): - self.fail('不是一个函數', value=data) + self.fail(_('not a function'), value=data) return data def to_representation(self, value): diff --git a/apps/common/forms/base_field.py b/apps/common/forms/base_field.py index dedd78d83..b0cf0f202 100644 --- a/apps/common/forms/base_field.py +++ b/apps/common/forms/base_field.py @@ -11,6 +11,7 @@ from typing import List, Dict from common.exception.app_exception import AppApiException from common.forms.label.base_label import BaseLabel +from django.utils.translation import gettext_lazy as _ class TriggerType(Enum): @@ -60,7 +61,7 @@ class BaseField: field_label = self.label.label if hasattr(self.label, 'to_dict') else self.label if self.required and value is None: raise AppApiException(500, - f"{field_label} 为必填参数") + _('The field {field_label} is required').format(field_label=field_label)) def to_dict(self, **kwargs): return { diff --git a/apps/common/forms/slider_field.py b/apps/common/forms/slider_field.py index 6bf3625d6..3919891fd 100644 --- a/apps/common/forms/slider_field.py +++ b/apps/common/forms/slider_field.py @@ -10,6 +10,7 @@ from typing import Dict from common.exception.app_exception import AppApiException from common.forms import BaseField, TriggerType, BaseLabel +from django.utils.translation import gettext_lazy as _ class SliderField(BaseField): @@ -52,7 +53,13 @@ class SliderField(BaseField): if value is not None: if value < self.attrs.get('min'): raise AppApiException(500, - f"{field_label} 不能小于{self.attrs.get('min')}") + _("The {field_label} cannot be less than {min}").format(field_label=field_label, + min=self.attrs.get( + 'min'))) + if value > self.attrs.get('max'): raise AppApiException(500, - f"{field_label} 不能大于{self.attrs.get('max')}") + _("The {field_label} cannot be greater than {max}").format( + field_label=field_label, + max=self.attrs.get( + 'max'))) diff --git a/apps/common/handle/handle_exception.py b/apps/common/handle/handle_exception.py index bff0c4c5c..21e8c8ef1 100644 --- a/apps/common/handle/handle_exception.py +++ b/apps/common/handle/handle_exception.py @@ -14,7 +14,7 @@ from rest_framework.views import exception_handler from common.exception.app_exception import AppApiException from common.response import result - +from django.utils.translation import gettext_lazy as _ def to_result(key, args, parent_key=None): """ @@ -27,7 +27,7 @@ def to_result(key, args, parent_key=None): error_detail = list(filter( lambda d: True if isinstance(d, ErrorDetail) else True if isinstance(d, dict) and len( d.keys()) > 0 else False, - (args[0] if len(args) > 0 else {key: [ErrorDetail('未知异常', code='unknown')]}).get(key)))[0] + (args[0] if len(args) > 0 else {key: [ErrorDetail(_('Unknown exception'), code='unknown')]}).get(key)))[0] if isinstance(error_detail, dict): return list(map(lambda k: to_result(k, args=[error_detail], diff --git a/apps/common/handle/impl/doc_split_handle.py b/apps/common/handle/impl/doc_split_handle.py index 4170eb70e..d97a8e45b 100644 --- a/apps/common/handle/impl/doc_split_handle.py +++ b/apps/common/handle/impl/doc_split_handle.py @@ -21,6 +21,7 @@ from docx.text.paragraph import Paragraph from common.handle.base_split_handle import BaseSplitHandle from common.util.split_model import SplitModel from dataset.models import Image +from django.utils.translation import gettext_lazy as _ default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'), re.compile('(?<=\\n)(?向量化数据集:{dataset_id}") + max_kb.info(_('Start--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id)) try: ListenerManagement.delete_embedding_by_dataset(dataset_id) document_list = QuerySet(Document).filter(dataset_id=dataset_id) - max_kb.info(f"数据集文档:{[d.name for d in document_list]}") + max_kb.info(_('Dataset documentation: {document_names}').format( + document_names=", ".join([d.name for d in document_list]))) for document in document_list: try: embedding_by_document.delay(document.id, model_id) except Exception as e: pass except Exception as e: - max_kb_error.error(f'向量化数据集:{dataset_id}出现错误{str(e)}{traceback.format_exc()}') + max_kb_error.error( + _('Vectorized dataset: {dataset_id} error {error} {traceback}'.format(dataset_id=dataset_id, + error=str(e), + traceback=traceback.format_exc()))) finally: - max_kb.info(f"结束--->向量化数据集:{dataset_id}") + max_kb.info(_('End--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id)) def embedding_by_problem(args, model_id): diff --git a/apps/function_lib/serializers/function_lib_serializer.py b/apps/function_lib/serializers/function_lib_serializer.py index 3f5655fba..df98c981a 100644 --- a/apps/function_lib/serializers/function_lib_serializer.py +++ b/apps/function_lib/serializers/function_lib_serializer.py @@ -20,6 +20,7 @@ from common.util.field_message import ErrMessage from common.util.function_code import FunctionExecutor from function_lib.models.function import FunctionLib from smartdoc.const import CONFIG +from django.utils.translation import gettext_lazy as _ function_executor = FunctionExecutor(CONFIG.get('SANDBOX')) @@ -32,72 +33,72 @@ class FunctionLibModelSerializer(serializers.ModelSerializer): class FunctionLibInputField(serializers.Serializer): - name = serializers.CharField(required=True, error_messages=ErrMessage.char('变量名')) - is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("是否必填")) - type = serializers.CharField(required=True, error_messages=ErrMessage.char("类型"), validators=[ + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('variable name'))) + is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('required'))) + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')), validators=[ validators.RegexValidator(regex=re.compile("^string|int|dict|array|float$"), - message="字段只支持string|int|dict|array|float", code=500) + message=_('fields only support string|int|dict|array|float'), code=500) ]) - source = serializers.CharField(required=True, error_messages=ErrMessage.char("来源"), validators=[ + source = serializers.CharField(required=True, error_messages=ErrMessage.char(_('source')), validators=[ validators.RegexValidator(regex=re.compile("^custom|reference$"), - message="字段只支持custom|reference", code=500) + message=_('The field only supports custom|reference'), code=500) ]) class DebugField(serializers.Serializer): - name = serializers.CharField(required=True, error_messages=ErrMessage.char('变量名')) + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('variable name'))) value = serializers.CharField(required=False, allow_blank=True, allow_null=True, - error_messages=ErrMessage.char("变量值")) + error_messages=ErrMessage.char(_('variable value'))) class DebugInstance(serializers.Serializer): debug_field_list = DebugField(required=True, many=True) input_field_list = FunctionLibInputField(required=True, many=True) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("函数内容")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function content'))) class EditFunctionLib(serializers.Serializer): name = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数名称")) + error_messages=ErrMessage.char(_('function name'))) desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数描述")) + error_messages=ErrMessage.char(_('function description'))) code = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数内容")) + error_messages=ErrMessage.char(_('function content'))) input_field_list = FunctionLibInputField(required=False, many=True) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char('是否可用')) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) class CreateFunctionLib(serializers.Serializer): - name = serializers.CharField(required=True, error_messages=ErrMessage.char("函数名称")) + name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function name'))) desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数描述")) + error_messages=ErrMessage.char(_('function description'))) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("函数内容")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_('function content'))) input_field_list = FunctionLibInputField(required=True, many=True) - permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char("权限"), validators=[ + permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('permission')), validators=[ validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), message="权限只支持PUBLIC|PRIVATE", code=500) ]) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char('是否可用')) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) class FunctionLibSerializer(serializers.Serializer): class Query(serializers.Serializer): name = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数名称")) + error_messages=ErrMessage.char(_('function name'))) desc = serializers.CharField(required=False, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数描述")) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("是否可用")) + error_messages=ErrMessage.char(_('function description'))) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_('Is active'))) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) select_user_id = serializers.CharField(required=False, allow_null=True, allow_blank=True) def get_query_set(self): @@ -126,7 +127,7 @@ class FunctionLibSerializer(serializers.Serializer): post_records_handler=lambda row: FunctionLibModelSerializer(row).data) class Create(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) def insert(self, instance, with_valid=True): if with_valid: @@ -142,7 +143,7 @@ class FunctionLibSerializer(serializers.Serializer): return FunctionLibModelSerializer(function_lib).data class Debug(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) def debug(self, debug_instance, with_valid=True): if with_valid: @@ -165,7 +166,7 @@ class FunctionLibSerializer(serializers.Serializer): if len(result) > 0: return result[-1].get('value') if is_required: - raise AppApiException(500, f"{name}字段未设置值") + raise AppApiException(500, f"{name}" + _('field has no value set')) return None @staticmethod @@ -181,24 +182,26 @@ class FunctionLibSerializer(serializers.Serializer): v = json.loads(value) if isinstance(v, dict): return v - raise Exception("类型错误") + raise Exception(_('type error')) if _type == 'array': v = json.loads(value) if isinstance(v, list): return v - raise Exception("类型错误") + raise Exception(_('type error')) return value except Exception as e: - raise AppApiException(500, f'字段:{name}类型:{_type}值:{value}类型转换错误') + raise AppApiException(500, _('Field: {name} Type: {_type} Value: {value} Type conversion error').format( + name=name, type=_type, value=value + )) class Operate(serializers.Serializer): - id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("函数id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('function id'))) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(FunctionLib).filter(id=self.data.get('id'), user_id=self.data.get('user_id')).exists(): - raise AppApiException(500, '函数不存在') + raise AppApiException(500, _('Function does not exist')) def delete(self, with_valid=True): if with_valid: @@ -221,6 +224,6 @@ class FunctionLibSerializer(serializers.Serializer): super().is_valid(raise_exception=True) if not QuerySet(FunctionLib).filter(id=self.data.get('id')).filter( Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC')).exists(): - raise AppApiException(500, '函数不存在') + raise AppApiException(500, _('Function does not exist')) function_lib = QuerySet(FunctionLib).filter(id=self.data.get('id')).first() return FunctionLibModelSerializer(function_lib).data diff --git a/apps/function_lib/serializers/py_lint_serializer.py b/apps/function_lib/serializers/py_lint_serializer.py index f58ce603f..6fa6d4c44 100644 --- a/apps/function_lib/serializers/py_lint_serializer.py +++ b/apps/function_lib/serializers/py_lint_serializer.py @@ -15,11 +15,12 @@ from rest_framework import serializers from common.util.field_message import ErrMessage from smartdoc.const import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ class PyLintInstance(serializers.Serializer): code = serializers.CharField(required=True, allow_null=True, allow_blank=True, - error_messages=ErrMessage.char("函数内容")) + error_messages=ErrMessage.char(_('function content'))) def to_dict(message, file_name): diff --git a/apps/function_lib/swagger_api/function_lib_api.py b/apps/function_lib/swagger_api/function_lib_api.py index 9ab7f7cd3..90e745a28 100644 --- a/apps/function_lib/swagger_api/function_lib_api.py +++ b/apps/function_lib/swagger_api/function_lib_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class FunctionLibApi(ApiMixin): @@ -19,13 +20,19 @@ class FunctionLibApi(ApiMixin): required=['id', 'name', 'desc', 'code', 'input_field_list', 'create_time', 'update_time'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"), - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="函数名称", description="函数名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="函数描述", description="函数描述"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容"), - 'input_field_list': openapi.Schema(type=openapi.TYPE_STRING, title="输入字段", description="输入字段"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description="创建时间"), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description="修改时间"), + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'input_field_list': openapi.Schema(type=openapi.TYPE_STRING, title=_('input field'), + description=_('input field')), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'), + description=_('create time')), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'), + description=_('update time')), } ) @@ -36,12 +43,12 @@ class FunctionLibApi(ApiMixin): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='函数名称'), + description=_('function name')), openapi.Parameter(name='desc', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='函数描述') + description=_('function description')), ] class Debug(ApiMixin): @@ -52,42 +59,45 @@ class FunctionLibApi(ApiMixin): required=[], properties={ 'debug_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, - description="输入变量列表", + description=_('Input variable list'), items=openapi.Schema(type=openapi.TYPE_OBJECT, required=[], properties={ 'name': openapi.Schema( type=openapi.TYPE_STRING, - title="变量名", - description="变量名"), + title=_('variable name'), + description=_('variable name')), 'value': openapi.Schema( type=openapi.TYPE_STRING, - title="变量值", - description="变量值"), + title=_('variable value'), + description=_('variable value')), })), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容"), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, - description="输入变量列表", + description=_('Input variable list'), items=openapi.Schema(type=openapi.TYPE_OBJECT, required=['name', 'is_required', 'source'], properties={ 'name': openapi.Schema( type=openapi.TYPE_STRING, - title="变量名", - description="变量名"), + title=_('variable name'), + description=_('variable name')), 'is_required': openapi.Schema( type=openapi.TYPE_BOOLEAN, - title="是否必填", - description="是否必填"), + title=_('required'), + description=_('required')), 'type': openapi.Schema( type=openapi.TYPE_STRING, - title="字段类型", - description="字段类型 string|int|dict|array|float" + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') ), 'source': openapi.Schema( type=openapi.TYPE_STRING, - title="来源", - description="来源只支持custom|reference"), + title=_('source'), + description=_( + 'The source only supports custom|reference')), })) } @@ -100,33 +110,40 @@ class FunctionLibApi(ApiMixin): type=openapi.TYPE_OBJECT, required=[], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="函数名称", description="函数名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="函数描述", description="函数描述"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容"), - 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title="权限", description="权限"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), + description=_('permission')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active')), 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, - description="输入变量列表", + description=_('Input variable list'), items=openapi.Schema(type=openapi.TYPE_OBJECT, required=[], properties={ 'name': openapi.Schema( type=openapi.TYPE_STRING, - title="变量名", - description="变量名"), + title=_('variable name'), + description=_('variable name')), 'is_required': openapi.Schema( type=openapi.TYPE_BOOLEAN, - title="是否必填", - description="是否必填"), + title=_('required'), + description=_('required')), 'type': openapi.Schema( type=openapi.TYPE_STRING, - title="字段类型", - description="字段类型 string|int|dict|array|float" + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') ), 'source': openapi.Schema( type=openapi.TYPE_STRING, - title="来源", - description="来源只支持custom|reference"), + title=_('source'), + description=_( + 'The source only supports custom|reference')), })) } @@ -139,33 +156,40 @@ class FunctionLibApi(ApiMixin): type=openapi.TYPE_OBJECT, required=['name', 'code', 'input_field_list', 'permission_type'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="函数名称", description="函数名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="函数描述", description="函数描述"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容"), - 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title="权限", description="权限"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'), + description=_('function name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'), + description=_('function description')), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), + description=_('permission')), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'), + description=_('Is active')), 'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY, - description="输入变量列表", + description=_('Input variable list'), items=openapi.Schema(type=openapi.TYPE_OBJECT, required=['name', 'is_required', 'source'], properties={ 'name': openapi.Schema( type=openapi.TYPE_STRING, - title="变量名", - description="变量名"), + title=_('variable name'), + description=_('variable name')), 'is_required': openapi.Schema( type=openapi.TYPE_BOOLEAN, - title="是否必填", - description="是否必填"), + title=_('required'), + description=_('required')), 'type': openapi.Schema( type=openapi.TYPE_STRING, - title="字段类型", - description="字段类型 string|int|dict|array|float" + title=_('type'), + description=_( + 'Field type string|int|dict|array|float') ), 'source': openapi.Schema( type=openapi.TYPE_STRING, - title="来源", - description="来源只支持custom|reference"), + title=_('source'), + description=_( + 'The source only supports custom|reference')), })) } diff --git a/apps/function_lib/swagger_api/py_lint_api.py b/apps/function_lib/swagger_api/py_lint_api.py index 40c44a498..1577dfe60 100644 --- a/apps/function_lib/swagger_api/py_lint_api.py +++ b/apps/function_lib/swagger_api/py_lint_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class PyLintApi(ApiMixin): @@ -18,6 +19,7 @@ class PyLintApi(ApiMixin): type=openapi.TYPE_OBJECT, required=['code'], properties={ - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容") + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'), + description=_('function content')) } ) diff --git a/apps/function_lib/views/function_lib_views.py b/apps/function_lib/views/function_lib_views.py index eadc411a4..e24085083 100644 --- a/apps/function_lib/views/function_lib_views.py +++ b/apps/function_lib/views/function_lib_views.py @@ -16,15 +16,16 @@ from common.constants.permission_constants import RoleConstants from common.response import result from function_lib.serializers.function_lib_serializer import FunctionLibSerializer from function_lib.swagger_api.function_lib_api import FunctionLibApi +from django.utils.translation import gettext_lazy as _ class FunctionLibView(APIView): authentication_classes = [TokenAuth] @action(methods=["GET"], detail=False) - @swagger_auto_schema(operation_summary="获取函数列表", - operation_id="获取函数列表", - tags=["函数库"], + @swagger_auto_schema(operation_summary=_('Get function list'), + operation_id=_('Get function list'), + tags=[_('Function')], manual_parameters=FunctionLibApi.Query.get_request_params_api()) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def get(self, request: Request): @@ -35,10 +36,10 @@ class FunctionLibView(APIView): 'user_id': request.user.id}).list()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建函数", - operation_id="创建函数", + @swagger_auto_schema(operation_summary=_('Create function'), + operation_id=_('Create function'), request_body=FunctionLibApi.Create.get_request_body_api(), - tags=['函数库']) + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def post(self, request: Request): return result.success(FunctionLibSerializer.Create(data={'user_id': request.user.id}).insert(request.data)) @@ -47,10 +48,10 @@ class FunctionLibView(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="调试函数", - operation_id="调试函数", + @swagger_auto_schema(operation_summary=_('Debug function'), + operation_id=_('Debug function'), request_body=FunctionLibApi.Debug.get_request_body_api(), - tags=['函数库']) + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def post(self, request: Request): return result.success( @@ -61,10 +62,10 @@ class FunctionLibView(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改函数", - operation_id="修改函数", + @swagger_auto_schema(operation_summary=_('Update function'), + operation_id=_('Update function'), request_body=FunctionLibApi.Edit.get_request_body_api(), - tags=['函数库']) + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def put(self, request: Request, function_lib_id: str): return result.success( @@ -72,18 +73,18 @@ class FunctionLibView(APIView): request.data)) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除函数", - operation_id="删除函数", - tags=['函数库']) + @swagger_auto_schema(operation_summary=_('Delete function'), + operation_id=_('Delete function'), + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def delete(self, request: Request, function_lib_id: str): return result.success( FunctionLibSerializer.Operate(data={'user_id': request.user.id, 'id': function_lib_id}).delete()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取函数详情", - operation_id="获取函数详情", - tags=['函数库']) + @swagger_auto_schema(operation_summary=_('Get function details'), + operation_id=_('Get function details'), + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def get(self, request: Request, function_lib_id: str): return result.success( @@ -93,12 +94,12 @@ class FunctionLibView(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="分页获取函数列表", - operation_id="分页获取函数列表", + @swagger_auto_schema(operation_summary=_('Get function list by pagination'), + operation_id=_('Get function list by pagination'), manual_parameters=result.get_page_request_params( FunctionLibApi.Query.get_request_params_api()), responses=result.get_page_api_response(FunctionLibApi.get_response_body_api()), - tags=['函数库']) + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def get(self, request: Request, current_page: int, page_size: int): return result.success( diff --git a/apps/function_lib/views/py_lint.py b/apps/function_lib/views/py_lint.py index 15fc45a9a..acfe8fcc6 100644 --- a/apps/function_lib/views/py_lint.py +++ b/apps/function_lib/views/py_lint.py @@ -16,16 +16,17 @@ from common.constants.permission_constants import RoleConstants from common.response import result from function_lib.serializers.py_lint_serializer import PyLintSerializer from function_lib.swagger_api.py_lint_api import PyLintApi +from django.utils.translation import gettext_lazy as _ class PyLintView(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="校验代码", - operation_id="校验代码", + @swagger_auto_schema(operation_summary=_('Check code'), + operation_id=_('Check code'), request_body=PyLintApi.get_request_body_api(), - tags=['函数库']) + tags=[_('Function')]) @has_permissions(RoleConstants.ADMIN, RoleConstants.USER) def post(self, request: Request): return result.success(PyLintSerializer(data={'user_id': request.user.id}).pylint(request.data)) diff --git a/apps/locales/en_US/LC_MESSAGES/django.po b/apps/locales/en_US/LC_MESSAGES/django.po new file mode 100644 index 000000000..1a3968944 --- /dev/null +++ b/apps/locales/en_US/LC_MESSAGES/django.po @@ -0,0 +1,26 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-06 09:58+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: .\apps\application\serializers\application_serializers.py:196 +msgid "application name" +msgstr "" + +#: .\apps\application\serializers\application_serializers.py:199 +msgid "application describe" +msgstr "" diff --git a/apps/locales/zh_CN/LC_MESSAGES/django.po b/apps/locales/zh_CN/LC_MESSAGES/django.po new file mode 100644 index 000000000..1a3968944 --- /dev/null +++ b/apps/locales/zh_CN/LC_MESSAGES/django.po @@ -0,0 +1,26 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-06 09:58+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" + +#: .\apps\application\serializers\application_serializers.py:196 +msgid "application name" +msgstr "" + +#: .\apps\application\serializers\application_serializers.py:199 +msgid "application describe" +msgstr "" diff --git a/apps/locales/zh_Hant/LC_MESSAGES/django.po b/apps/locales/zh_Hant/LC_MESSAGES/django.po new file mode 100644 index 000000000..545dbd738 --- /dev/null +++ b/apps/locales/zh_Hant/LC_MESSAGES/django.po @@ -0,0 +1,26 @@ +# SOME DESCRIPTIVE TITLE. +# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER +# This file is distributed under the same license as the PACKAGE package. +# FIRST AUTHOR , YEAR. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: PACKAGE VERSION\n" +"Report-Msgid-Bugs-To: \n" +"POT-Creation-Date: 2025-01-06 09:58+0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"Language: \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=UTF-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Plural-Forms: nplurals=1; plural=0;\n" +#: .\apps\application\serializers\application_serializers.py:196 +msgid "application name" +msgstr "" + +#: .\apps\application\serializers\application_serializers.py:199 +msgid "application describe" +msgstr "" diff --git a/apps/setting/models_provider/base_model_provider.py b/apps/setting/models_provider/base_model_provider.py index 35c3ef029..88b782a36 100644 --- a/apps/setting/models_provider/base_model_provider.py +++ b/apps/setting/models_provider/base_model_provider.py @@ -14,7 +14,7 @@ from typing import Dict, Iterator, Type, List from pydantic.v1 import BaseModel from common.exception.app_exception import AppApiException - +from django.utils.translation import gettext_lazy as _ class DownModelChunkStatus(Enum): success = "success" @@ -60,7 +60,7 @@ class IModelProvider(ABC): def get_model_list(self, model_type): if model_type is None: - raise AppApiException(500, '模型类型不能为空') + raise AppApiException(500, _('Model type cannot be empty')) return self.get_model_info_manage().get_model_list_by_model_type(model_type) def get_model_credential(self, model_type, model_name): @@ -84,7 +84,7 @@ class IModelProvider(ABC): return 3 def down_model(self, model_type: str, model_name, model_credential: Dict[str, object]) -> Iterator[DownModelChunk]: - raise AppApiException(500, "当前平台不支持下载模型") + raise AppApiException(500, _('The current platform does not support downloading models')) class MaxKBBaseModel(ABC): @@ -149,13 +149,13 @@ class BaseModelCredential(ABC): class ModelTypeConst(Enum): - LLM = {'code': 'LLM', 'message': '大语言模型'} - EMBEDDING = {'code': 'EMBEDDING', 'message': '向量模型'} - STT = {'code': 'STT', 'message': '语音识别'} - TTS = {'code': 'TTS', 'message': '语音合成'} - IMAGE = {'code': 'IMAGE', 'message': '图片理解'} - TTI = {'code': 'TTI', 'message': '图片生成'} - RERANKER = {'code': 'RERANKER', 'message': '重排模型'} + LLM = {'code': 'LLM', 'message': _('large language model')} + EMBEDDING = {'code': 'EMBEDDING', 'message': _('vector model')} + STT = {'code': 'STT', 'message': _('speech to text')} + TTS = {'code': 'TTS', 'message': _('text to speech')} + IMAGE = {'code': 'IMAGE', 'message': _('picture understanding')} + TTI = {'code': 'TTI', 'message': _('text to image')} + RERANKER = {'code': 'RERANKER', 'message': _('re-ranking')} class ModelInfo: @@ -229,7 +229,7 @@ class ModelInfoManage: def get_model_info(self, model_type, model_name) -> ModelInfo: model_info = self.model_dict.get(model_type, {}).get(model_name, self.default_model_dict.get(model_type)) if model_info is None: - raise AppApiException(500, '模型不支持') + raise AppApiException(500, _('The model does not support')) return model_info class builder: diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py index c508a5002..5c90dde7c 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/aliyun_bai_lian_model_provider.py @@ -28,6 +28,7 @@ from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.stt impor from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tti import QwenTextToImageModel from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.tts import AliyunBaiLianTextToSpeech from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ aliyun_bai_lian_model_credential = AliyunBaiLianRerankerCredential() aliyun_bai_lian_tts_model_credential = AliyunBaiLianTTSModelCredential() @@ -38,16 +39,18 @@ qwenvl_model_credential = QwenVLModelCredential() qwentti_model_credential = QwenTextToImageModelCredential() model_info_list = [ModelInfo('gte-rerank', - '阿里巴巴通义实验室开发的GTE-Rerank文本排序系列模型,开发者可以通过LlamaIndex框架进行集成高质量文本检索、排序。', + _('With the GTE-Rerank text sorting series model developed by Alibaba Tongyi Lab, developers can integrate high-quality text retrieval and sorting through the LlamaIndex framework.'), ModelTypeConst.RERANKER, aliyun_bai_lian_model_credential, AliyunBaiLianReranker), ModelInfo('paraformer-realtime-v2', - '中文(含粤语等各种方言)、英文、日语、韩语支持多个语种自由切换', + _('Chinese (including various dialects such as Cantonese), English, Japanese, and Korean support free switching between multiple languages.'), ModelTypeConst.STT, aliyun_bai_lian_stt_model_credential, AliyunBaiLianSpeechToText), ModelInfo('cosyvoice-v1', - 'CosyVoice基于新一代生成式语音大模型,能根据上下文预测情绪、语调、韵律等,具有更好的拟人效果', + _('CosyVoice is based on a new generation of large generative speech models, which can predict emotions, intonation, rhythm, etc. based on context, and has better anthropomorphic effects.'), ModelTypeConst.TTS, aliyun_bai_lian_tts_model_credential, AliyunBaiLianTextToSpeech), ModelInfo('text-embedding-v1', - '通用文本向量,是通义实验室基于LLM底座的多语言文本统一向量模型,面向全球多个主流语种,提供高水准的向量服务,帮助开发者将文本数据快速转换为高质量的向量数据。', + _(''' + Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data. + '''), ModelTypeConst.EMBEDDING, aliyun_bai_lian_embedding_model_credential, AliyunBaiLianEmbedding), ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential, @@ -65,7 +68,7 @@ module_info_vl_list = [ ] module_info_tti_list = [ ModelInfo('wanx-v1', - '通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二次元、3D卡通。', + _('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'), ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel), ] @@ -90,7 +93,9 @@ class AliyunBaiLianModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='aliyun_bai_lian_model_provider', name='阿里云百炼', icon=get_file_content( - os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'aliyun_bai_lian_model_provider', - 'icon', - 'aliyun_bai_lian_icon_svg'))) + return ModelProvideInfo(provider='aliyun_bai_lian_model_provider', name=_('Alibaba Cloud Bailian'), + icon=get_file_content( + os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', + 'aliyun_bai_lian_model_provider', + 'icon', + 'aliyun_bai_lian_icon_svg'))) diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py index 09ba7a752..7ae87a754 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/embedding.py @@ -13,6 +13,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import ValidCode, BaseModelCredential from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.embedding import AliyunBaiLianEmbedding +from django.utils.translation import gettext_lazy as _ class AliyunBaiLianEmbeddingCredential(BaseForm, BaseModelCredential): @@ -21,21 +22,22 @@ class AliyunBaiLianEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['dashscope_api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model: AliyunBaiLianEmbedding = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py index d3205d8ab..9149d4f61 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py @@ -16,10 +16,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class QwenModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=1.0, _min=0.1, _max=1.9, @@ -27,7 +29,8 @@ class QwenModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -41,23 +44,26 @@ class QwenVLModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py index db5f156cc..43c778a78 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/llm.py @@ -7,10 +7,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class BaiLianLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -18,7 +20,8 @@ class BaiLianLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -32,22 +35,25 @@ class BaiLianLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True @@ -55,7 +61,7 @@ class BaiLianLLMModelCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def get_model_params_setting_form(self, model_name): diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py index cd72274d3..e38a85c1f 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/reranker.py @@ -15,6 +15,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.aliyun_bai_lian_model_provider.model.reranker import AliyunBaiLianReranker +from django.utils.translation import gettext_lazy as _ class AliyunBaiLianRerankerCredential(BaseForm, BaseModelCredential): @@ -22,21 +23,24 @@ class AliyunBaiLianRerankerCredential(BaseForm, BaseModelCredential): def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): if not model_type == 'RERANKER': - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['dashscope_api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model: AliyunBaiLianReranker = provider.get_model(model_type, model_name, model_credential) - model.compress_documents([Document(page_content='你好')], '你好') + model.compress_documents([Document(page_content=_('Hello'))], _('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py index 286650f1c..f83ce9059 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/stt.py @@ -6,6 +6,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class AliyunBaiLianSTTModelCredential(BaseForm, BaseModelCredential): @@ -15,12 +16,13 @@ class AliyunBaiLianSTTModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -30,7 +32,9 @@ class AliyunBaiLianSTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py index 8fc39db7f..ed6832be7 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py @@ -6,21 +6,18 @@ @date:2024/7/11 18:41 @desc: """ -import base64 -import os from typing import Dict -from langchain_core.messages import HumanMessage - from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class QwenModelParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', '指定生成图片的尺寸, 如: 1024x1024'), + TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')), required=True, default_value='1024*1024', option_list=[ @@ -32,27 +29,27 @@ class QwenModelParams(BaseForm): text_field='label', value_field='value') n = forms.SliderField( - TooltipLabel('图片数量', '指定生成图片的数量'), + TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')), required=True, default_value=1, _min=1, _max=4, _step=1, precision=0) style = forms.SingleSelect( - TooltipLabel('风格', '指定生成图片的风格'), + TooltipLabel(_('Style'), _('Specify the style of generated images')), required=True, default_value='', option_list=[ - {'value': '', 'label': '默认值,由模型随机输出图像风格'}, - {'value': '', 'label': '摄影'}, - {'value': '', 'label': '人像写真'}, - {'value': '<3d cartoon>', 'label': '3D卡通'}, - {'value': '', 'label': '动画'}, - {'value': '', 'label': '油画'}, - {'value': '', 'label': '水彩'}, - {'value': '', 'label': '素描'}, - {'value': '', 'label': '中国画'}, - {'value': '', 'label': '扁平插画'}, + {'value': '', 'label': _('Default value, the image style is randomly output by the model')}, + {'value': '', 'label': _('photography')}, + {'value': '', 'label': _('Portraits')}, + {'value': '<3d cartoon>', 'label': _('3D cartoon')}, + {'value': '', 'label': _('animation')}, + {'value': '', 'label': _('painting')}, + {'value': '', 'label': _('watercolor')}, + {'value': '', 'label': _('sketch')}, + {'value': '', 'label': _('Chinese painting')}, + {'value': '', 'label': _('flat illustration')}, ], text_field='label', value_field='value' @@ -65,11 +62,12 @@ class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -80,7 +78,9 @@ class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py index 09a2bbe4b..c5d39892c 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/credential/tts.py @@ -6,35 +6,36 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class AliyunBaiLianTTSModelGeneralParams(BaseForm): voice = forms.SingleSelect( - TooltipLabel('音色', '中文音色可支持中英文混合场景'), + TooltipLabel(_('timbre'), _('Chinese sounds can support mixed scenes of Chinese and English')), required=True, default_value='longxiaochun', text_field='value', value_field='value', option_list=[ - {'text': '龙小淳', 'value': 'longxiaochun'}, - {'text': '龙小夏', 'value': 'longxiaoxia'}, - {'text': '龙小诚', 'value': 'longxiaocheng'}, - {'text': '龙小白', 'value': 'longxiaobai'}, - {'text': '龙老铁', 'value': 'longlaotie'}, - {'text': '龙书', 'value': 'longshu'}, - {'text': '龙硕', 'value': 'longshuo'}, - {'text': '龙婧', 'value': 'longjing'}, - {'text': '龙妙', 'value': 'longmiao'}, - {'text': '龙悦', 'value': 'longyue'}, - {'text': '龙媛', 'value': 'longyuan'}, - {'text': '龙飞', 'value': 'longfei'}, - {'text': '龙杰力豆', 'value': 'longjielidou'}, - {'text': '龙彤', 'value': 'longtong'}, - {'text': '龙祥', 'value': 'longxiang'}, + {'text': _('Long Xiaochun'), 'value': 'longxiaochun'}, + {'text': _('Long Xiaoxia'), 'value': 'longxiaoxia'}, + {'text': _('Long Xiaochen'), 'value': 'longxiaocheng'}, + {'text': _('Long Xiaobai'), 'value': 'longxiaobai'}, + {'text': _('Long laotie'), 'value': 'longlaotie'}, + {'text': _('Long Shu'), 'value': 'longshu'}, + {'text': _('Long Shuo'), 'value': 'longshuo'}, + {'text': _('Long Jing'), 'value': 'longjing'}, + {'text': _('Long Miao'), 'value': 'longmiao'}, + {'text': _('Long Yue'), 'value': 'longyue'}, + {'text': _('Long Yuan'), 'value': 'longyuan'}, + {'text': _('Long Fei'), 'value': 'longfei'}, + {'text': _('Long Jielidou'), 'value': 'longjielidou'}, + {'text': _('Long Tong'), 'value': 'longtong'}, + {'text': _('Long Xiang'), 'value': 'longxiang'}, {'text': 'Stella', 'value': 'loongstella'}, {'text': 'Bella', 'value': 'loongbella'}, ]) speech_rate = forms.SliderField( - TooltipLabel('语速', '[0.5,2],默认为1,通常保留一位小数即可'), + TooltipLabel(_('speaking speed'), _('[0.5,2], the default is 1, usually one decimal place is enough')), required=True, default_value=1, _min=0.5, _max=2, @@ -49,12 +50,13 @@ class AliyunBaiLianTTSModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -64,7 +66,9 @@ class AliyunBaiLianTTSModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py index c2fd32877..c070363b9 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py @@ -8,7 +8,7 @@ from langchain_core.messages import HumanMessage from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tti import BaseTextToImage - +from django.utils.translation import gettext_lazy as _ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): api_key: str @@ -39,7 +39,7 @@ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): def check_auth(self): chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max') - chat.invoke([HumanMessage([{"type": "text", "text": "你好"}])]) + chat.invoke([HumanMessage([{"type": "text", "text": _('Hello')}])]) def generate_image(self, prompt: str, negative_prompt: str = None): # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py index 9bfe38f42..c9d29b6b5 100644 --- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py +++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py @@ -6,7 +6,7 @@ from dashscope.audio.tts_v2 import * from common.util.common import _remove_empty_lines from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tts import BaseTextToSpeech - +from django.utils.translation import gettext_lazy as _ class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): api_key: str @@ -33,7 +33,7 @@ class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): ) def check_auth(self): - self.text_to_speech('你好') + self.text_to_speech(_('Hello')) def text_to_speech(self, text): dashscope.api_key = self.api_key diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py index ddb5afd52..f8b11ee4e 100644 --- a/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/aws_bedrock_model_provider.py @@ -11,7 +11,7 @@ from setting.models_provider.impl.aws_bedrock_model_provider.credential.llm impo from setting.models_provider.impl.aws_bedrock_model_provider.model.embedding import BedrockEmbeddingModel from setting.models_provider.impl.aws_bedrock_model_provider.model.llm import BedrockModel from smartdoc.conf import PROJECT_DIR - +from django.utils.translation import gettext_lazy as _ def _create_model_info(model_name, description, model_type, credential_class, model_class): return ModelInfo( @@ -32,86 +32,92 @@ def _initialize_model_info(): model_info_list = [ _create_model_info( 'anthropic.claude-v2:1', - 'Claude 2 的更新,采用双倍的上下文窗口,并在长文档和 RAG 上下文中提高可靠性、幻觉率和循证准确性。', + _('An update to Claude 2 that doubles the context window and improves reliability, hallucination rates, and evidence-based accuracy in long documents and RAG contexts.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'anthropic.claude-v2', - 'Anthropic 功能强大的模型,可处理各种任务,从复杂的对话和创意内容生成到详细的指令服从。', + _('Anthropic is a powerful model that can handle a variety of tasks, from complex dialogue and creative content generation to detailed command obedience.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'anthropic.claude-3-haiku-20240307-v1:0', - 'Claude 3 Haiku 是 Anthropic 最快速、最紧凑的模型,具有近乎即时的响应能力。该模型可以快速回答简单的查询和请求。客户将能够构建模仿人类交互的无缝人工智能体验。 Claude 3 Haiku 可以处理图像和返回文本输出,并且提供 200K 上下文窗口。', + _(''' + The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-instant responsiveness. The model can answer simple queries and requests quickly. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text output, and provides 200K context windows. + '''), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'anthropic.claude-3-sonnet-20240229-v1:0', - 'Anthropic 推出的 Claude 3 Sonnet 模型在智能和速度之间取得理想的平衡,尤其是在处理企业工作负载方面。该模型提供最大的效用,同时价格低于竞争产品,并且其经过精心设计,是大规模部署人工智能的可靠选择。', + _(''' +The Claude 3 Sonnet model from Anthropic strikes the ideal balance between intelligence and speed, especially when it comes to handling enterprise workloads. This model offers maximum utility while being priced lower than competing products, and it's been engineered to be a solid choice for deploying AI at scale. + '''), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'anthropic.claude-3-5-sonnet-20240620-v1:0', - 'Claude 3.5 Sonnet提高了智能的行业标准,在广泛的评估中超越了竞争对手的型号和Claude 3 Opus,具有我们中端型号的速度和成本效益。', + _('The Claude 3.5 Sonnet raises the industry standard for intelligence, outperforming competing models and the Claude 3 Opus in extensive evaluations, with the speed and cost-effectiveness of our mid-range models.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'anthropic.claude-instant-v1', - '一种更快速、更实惠但仍然非常强大的模型,它可以处理一系列任务,包括随意对话、文本分析、摘要和文档问题回答。', + _('A faster, more affordable but still very powerful model that can handle a range of tasks including casual conversation, text analysis, summarization and document question answering.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'amazon.titan-text-premier-v1:0', - 'Titan Text Premier 是 Titan Text 系列中功能强大且先进的型号,旨在为各种企业应用程序提供卓越的性能。凭借其尖端功能,它提供了更高的准确性和出色的结果,使其成为寻求一流文本处理解决方案的组织的绝佳选择。', + _(''' + Titan Text Premier is the most powerful and advanced model in the Titan Text series, designed to deliver exceptional performance for a variety of enterprise applications. With its cutting-edge features, it delivers greater accuracy and outstanding results, making it an excellent choice for organizations looking for a top-notch text processing solution. + '''), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel ), _create_model_info( 'amazon.titan-text-lite-v1', - 'Amazon Titan Text Lite 是一种轻量级的高效模型,非常适合英语任务的微调,包括摘要和文案写作等,在这种场景下,客户需要更小、更经济高效且高度可定制的模型', + _('Amazon Titan Text Lite is a lightweight, efficient model ideal for fine-tuning English-language tasks, including summarization and copywriting, where customers require smaller, more cost-effective, and highly customizable models.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel), _create_model_info( 'amazon.titan-text-express-v1', - 'Amazon Titan Text Express 的上下文长度长达 8000 个令牌,因而非常适合各种高级常规语言任务,例如开放式文本生成和对话式聊天,以及检索增强生成(RAG)中的支持。在发布时,该模型针对英语进行了优化,但也支持其他语言。', + _('Amazon Titan Text Express has context lengths of up to 8,000 tokens, making it ideal for a variety of high-level general language tasks, such as open-ended text generation and conversational chat, as well as support in retrieval-augmented generation (RAG). At launch, the model is optimized for English, but other languages are supported.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel), _create_model_info( 'mistral.mistral-7b-instruct-v0:2', - '7B 密集型转换器,可快速部署,易于定制。体积虽小,但功能强大,适用于各种用例。支持英语和代码,以及 32k 的上下文窗口。', + _('7B dense converter for rapid deployment and easy customization. Small in size yet powerful in a variety of use cases. Supports English and code, as well as 32k context windows.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel), _create_model_info( 'mistral.mistral-large-2402-v1:0', - '先进的 Mistral AI 大型语言模型,能够处理任何语言任务,包括复杂的多语言推理、文本理解、转换和代码生成。', + _('Advanced Mistral AI large-scale language model capable of handling any language task, including complex multilingual reasoning, text understanding, transformation, and code generation.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel), _create_model_info( 'meta.llama3-70b-instruct-v1:0', - '非常适合内容创作、会话式人工智能、语言理解、研发和企业应用', + _('Ideal for content creation, conversational AI, language understanding, R&D, and enterprise applications'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel), _create_model_info( 'meta.llama3-8b-instruct-v1:0', - '非常适合有限的计算能力和资源、边缘设备和更快的训练时间。', + _('Ideal for limited computing power and resources, edge devices, and faster training times.'), ModelTypeConst.LLM, BedrockLLMModelCredential, BedrockModel), @@ -119,7 +125,7 @@ def _initialize_model_info(): embedded_model_info_list = [ _create_model_info( 'amazon.titan-embed-text-v1', - 'Titan Embed Text 是 Amazon Titan Embed 系列中最大的嵌入模型,可以处理各种文本嵌入任务,如文本分类、文本相似度计算等。', + _('Titan Embed Text is the largest embedding model in the Amazon Titan Embed series and can handle various text embedding tasks, such as text classification, text similarity calculation, etc.'), ModelTypeConst.EMBEDDING, BedrockEmbeddingCredential, BedrockEmbeddingModel diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py index 7e2bb6cac..9acd2a0b1 100644 --- a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py @@ -7,6 +7,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.aws_bedrock_model_provider.model.embedding import BedrockEmbeddingModel +from django.utils.translation import gettext_lazy as _ class BedrockEmbeddingCredential(BaseForm, BaseModelCredential): @@ -16,24 +17,24 @@ class BedrockEmbeddingCredential(BaseForm, BaseModelCredential): model_type_list = provider.get_model_type_list() if not any(mt.get('value') == model_type for mt in model_type_list): if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) return False required_keys = ['region_name', 'access_key_id', 'secret_access_key'] if not all(key in model_credential for key in required_keys): if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'以下字段为必填字段: {", ".join(required_keys)}') + raise AppApiException(ValidCode.valid_error.value, _('The following fields are required: {keys}').format(keys=", ".join(required_keys))) return False try: model: BedrockEmbeddingModel = provider.get_model(model_type, model_name, model_credential) - aa = model.embed_query('你好') + aa = model.embed_query(_('Hello')) print(aa) except AppApiException: raise except Exception as e: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) return False return True diff --git a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py index 3474fab7f..de324966e 100644 --- a/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/aws_bedrock_model_provider/credential/llm.py @@ -1,4 +1,3 @@ - from typing import Dict from langchain_core.messages import HumanMessage @@ -7,10 +6,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import ValidCode, BaseModelCredential +from django.utils.translation import gettext_lazy as _ class BedrockLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -18,7 +19,8 @@ class BedrockLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -28,30 +30,33 @@ class BedrockLLMModelParams(BaseForm): class BedrockLLMModelCredential(BaseForm, BaseModelCredential): - - def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(mt.get('value') == model_type for mt in model_type_list): if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) return False required_keys = ['region_name', 'access_key_id', 'secret_access_key'] if not all(key in model_credential for key in required_keys): if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'以下字段为必填字段: {", ".join(required_keys)}') + raise AppApiException(ValidCode.valid_error.value, + _('The following fields are required: {keys}').format( + keys=", ".join(required_keys))) return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except AppApiException: raise except Exception as e: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) return False return True diff --git a/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py b/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py index 111c4390b..e249f0b7c 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py +++ b/apps/setting/models_provider/impl/azure_model_provider/azure_model_provider.py @@ -24,6 +24,7 @@ from setting.models_provider.impl.azure_model_provider.model.stt import AzureOpe from setting.models_provider.impl.azure_model_provider.model.tti import AzureOpenAITextToImage from setting.models_provider.impl.azure_model_provider.model.tts import AzureOpenAITextToSpeech from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ base_azure_llm_model_credential = AzureLLMModelCredential() base_azure_embedding_model_credential = AzureOpenAIEmbeddingCredential() @@ -33,7 +34,7 @@ base_azure_tts_model_credential = AzureOpenAITTSModelCredential() base_azure_stt_model_credential = AzureOpenAISTTModelCredential() default_model_info = [ - ModelInfo('Azure OpenAI', '具体的基础模型由部署名决定', ModelTypeConst.LLM, + ModelInfo('Azure OpenAI', '', ModelTypeConst.LLM, base_azure_llm_model_credential, AzureChatModel, api_version='2024-02-15-preview' ), ModelInfo('gpt-4', '', ModelTypeConst.LLM, @@ -48,7 +49,7 @@ default_model_info = [ ] embedding_model_info = [ - ModelInfo('text-embedding-3-large', '具体的基础模型由部署名决定', ModelTypeConst.EMBEDDING, + ModelInfo('text-embedding-3-large', '', ModelTypeConst.EMBEDDING, base_azure_embedding_model_credential, AzureOpenAIEmbeddingModel, api_version='2023-05-15' ), ModelInfo('text-embedding-3-small', '', ModelTypeConst.EMBEDDING, diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py index 12f26b53f..4deda9f67 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/embedding.py @@ -8,13 +8,11 @@ """ from typing import Dict -from langchain_core.messages import HumanMessage - from common import forms from common.exception.app_exception import AppApiException -from common.forms import BaseForm, TooltipLabel +from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class AzureOpenAIEmbeddingCredential(BaseForm, BaseModelCredential): @@ -23,22 +21,24 @@ class AzureOpenAIEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key', 'api_version']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, '校验失败,请检查参数是否正确') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct')) else: return False @@ -47,9 +47,8 @@ class AzureOpenAIEmbeddingCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_version = forms.TextInputField("API 版本 (api_version)", required=True) + api_version = forms.TextInputField("Api Version", required=True) - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) - - api_key = forms.PasswordInputField("API Key (api_key)", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/image.py b/apps/setting/models_provider/impl/azure_model_provider/credential/image.py index 3c93d557d..98b64e03f 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/image.py @@ -9,9 +9,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ + class AzureOpenAIImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +22,8 @@ class AzureOpenAIImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -27,34 +31,36 @@ class AzureOpenAIImageModelParams(BaseForm): precision=0) - class AzureOpenAIImageModelCredential(BaseForm, BaseModelCredential): - api_version = forms.TextInputField("API 版本 (api_version)", required=True) - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) - api_key = forms.PasswordInputField("API Key (api_key)", required=True) + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key', 'api_version']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py index a902551c8..04d8ea498 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/llm.py @@ -14,10 +14,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class AzureLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -25,7 +27,8 @@ class AzureLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -39,22 +42,23 @@ class AzureLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key', 'deployment_name', 'api_version']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, '校验失败,请检查参数是否正确') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct')) else: return False @@ -63,13 +67,13 @@ class AzureLLMModelCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_version = forms.TextInputField("API 版本 (api_version)", required=True) + api_version = forms.TextInputField("API Version", required=True) - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) - api_key = forms.PasswordInputField("API Key (api_key)", required=True) + api_key = forms.PasswordInputField("API Key", required=True) - deployment_name = forms.TextInputField("部署名 (deployment_name)", required=True) + deployment_name = forms.TextInputField("Deployment name", required=True) def get_model_params_setting_form(self, model_name): return AzureLLMModelParams() diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py b/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py index e337f848b..f7127a0be 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/stt.py @@ -5,23 +5,24 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class AzureOpenAISTTModelCredential(BaseForm, BaseModelCredential): - api_version = forms.TextInputField("API 版本 (api_version)", required=True) - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) - api_key = forms.PasswordInputField("API Key (api_key)", required=True) + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key', 'api_version']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -31,7 +32,7 @@ class AzureOpenAISTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py b/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py index 227700351..76796057b 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/tti.py @@ -1,19 +1,16 @@ # coding=utf-8 -import base64 -import os from typing import Dict -from langchain_core.messages import HumanMessage - from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class AzureOpenAITTIModelParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', '指定生成图片的尺寸, 如: 1024x1024'), + TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')), required=True, default_value='1024x1024', option_list=[ @@ -26,7 +23,7 @@ class AzureOpenAITTIModelParams(BaseForm): ) quality = forms.SingleSelect( - TooltipLabel('图片质量', ''), + TooltipLabel(_('Picture quality'), ''), required=True, default_value='standard', option_list=[ @@ -38,7 +35,7 @@ class AzureOpenAITTIModelParams(BaseForm): ) n = forms.SliderField( - TooltipLabel('图片数量', '指定生成图片的数量'), + TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')), required=True, default_value=1, _min=1, _max=10, @@ -47,20 +44,20 @@ class AzureOpenAITTIModelParams(BaseForm): class AzureOpenAITextToImageModelCredential(BaseForm, BaseModelCredential): - api_version = forms.TextInputField("API 版本 (api_version)", required=True) - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) - api_key = forms.PasswordInputField("API Key (api_key)", required=True) + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key', 'api_version']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -71,7 +68,7 @@ class AzureOpenAITextToImageModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py b/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py index 9aed903d9..d662f37ef 100644 --- a/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py +++ b/apps/setting/models_provider/impl/azure_model_provider/credential/tts.py @@ -5,11 +5,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class AzureOpenAITTSModelGeneralParams(BaseForm): # alloy, echo, fable, onyx, nova, shimmer voice = forms.SingleSelect( - TooltipLabel('Voice', '尝试不同的声音(合金、回声、寓言、缟玛瑙、新星和闪光),找到一种适合您所需的音调和听众的声音。当前的语音针对英语进行了优化。'), + TooltipLabel('Voice', _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')), required=True, default_value='alloy', text_field='value', value_field='value', @@ -24,20 +25,20 @@ class AzureOpenAITTSModelGeneralParams(BaseForm): class AzureOpenAITTSModelCredential(BaseForm, BaseModelCredential): - api_version = forms.TextInputField("API 版本 (api_version)", required=True) - api_base = forms.TextInputField('API 域名 (azure_endpoint)', required=True) - api_key = forms.PasswordInputField("API Key (api_key)", required=True) + api_version = forms.TextInputField("API Version", required=True) + api_base = forms.TextInputField('Azure Endpoint', required=True) + api_key = forms.PasswordInputField("API Key", required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key', 'api_version']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -47,7 +48,7 @@ class AzureOpenAITTSModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py index 3e861ec36..7e7d0fa41 100644 --- a/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/deepseek_model_provider/credential/llm.py @@ -14,10 +14,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class DeepSeekLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -25,7 +27,8 @@ class DeepSeekLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -39,22 +42,25 @@ class DeepSeekLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py b/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py index f60f26fa3..a0afa6e6a 100644 --- a/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py +++ b/apps/setting/models_provider/impl/deepseek_model_provider/deepseek_model_provider.py @@ -14,14 +14,14 @@ from setting.models_provider.base_model_provider import IModelProvider, ModelPro from setting.models_provider.impl.deepseek_model_provider.credential.llm import DeepSeekLLMModelCredential from setting.models_provider.impl.deepseek_model_provider.model.llm import DeepSeekChatModel from smartdoc.conf import PROJECT_DIR - +from django.utils.translation import gettext_lazy as _ deepseek_llm_model_credential = DeepSeekLLMModelCredential() -deepseek_chat = ModelInfo('deepseek-chat', '擅长通用对话任务,支持 32K 上下文', ModelTypeConst.LLM, +deepseek_chat = ModelInfo('deepseek-chat', _('Good at common conversational tasks, supports 32K contexts'), ModelTypeConst.LLM, deepseek_llm_model_credential, DeepSeekChatModel ) -deepseek_coder = ModelInfo('deepseek-coder', '擅长处理编程任务,支持 16K 上下文', ModelTypeConst.LLM, +deepseek_coder = ModelInfo('deepseek-coder', _('Good at handling programming tasks, supports 16K contexts'), ModelTypeConst.LLM, deepseek_llm_model_credential, DeepSeekChatModel) diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py index 6649f5923..41f7bb0fe 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/embedding.py @@ -12,29 +12,29 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class GeminiEmbeddingCredential(BaseForm, BaseModelCredential): def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=True): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py index 2c3fe7366..b45e7e4ab 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/image.py @@ -1,6 +1,5 @@ # coding=utf-8 -import base64 -import os + from typing import Dict from langchain_core.messages import HumanMessage @@ -9,9 +8,9 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class GeminiImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +18,7 @@ class GeminiImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -35,24 +34,24 @@ class GeminiImageModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py index c2f2cb780..be86da0d1 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/llm.py @@ -14,10 +14,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class GeminiLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -25,7 +25,7 @@ class GeminiLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -39,23 +39,23 @@ class GeminiLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.invoke([HumanMessage(content='你好')]) + res = model.invoke([HumanMessage(content=_('Hello'))]) print(res) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py b/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py index cfa3aa79c..e4a1c5208 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/credential/stt.py @@ -5,7 +5,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class GeminiSTTModelCredential(BaseForm, BaseModelCredential): api_key = forms.PasswordInputField('API Key', required=True) @@ -14,12 +14,12 @@ class GeminiSTTModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -29,7 +29,7 @@ class GeminiSTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py b/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py index 556c87ccb..23d1d9471 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/gemini_model_provider.py @@ -20,6 +20,8 @@ from setting.models_provider.impl.gemini_model_provider.model.image import Gemin from setting.models_provider.impl.gemini_model_provider.model.llm import GeminiChatModel from setting.models_provider.impl.gemini_model_provider.model.stt import GeminiSpeechToText from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ + gemini_llm_model_credential = GeminiLLMModelCredential() gemini_image_model_credential = GeminiImageModelCredential() @@ -27,33 +29,33 @@ gemini_stt_model_credential = GeminiSTTModelCredential() gemini_embedding_model_credential = GeminiEmbeddingCredential() model_info_list = [ - ModelInfo('gemini-1.0-pro', '最新的Gemini 1.0 Pro模型,随Google更新而更新', + ModelInfo('gemini-1.0-pro', _('Latest Gemini 1.0 Pro model, updated with Google update'), ModelTypeConst.LLM, gemini_llm_model_credential, GeminiChatModel), - ModelInfo('gemini-1.0-pro-vision', '最新的Gemini 1.0 Pro Vision模型,随Google更新而更新', + ModelInfo('gemini-1.0-pro-vision', _('Latest Gemini 1.0 Pro Vision model, updated with Google update'), ModelTypeConst.LLM, gemini_llm_model_credential, GeminiChatModel), ] model_image_info_list = [ - ModelInfo('gemini-1.5-flash', '最新的Gemini 1.5 Flash模型,随Google更新而更新', + ModelInfo('gemini-1.5-flash', _('Latest Gemini 1.5 Flash model, updated with Google updates'), ModelTypeConst.IMAGE, gemini_image_model_credential, GeminiImage), - ModelInfo('gemini-1.5-pro', '最新的Gemini 1.5 Flash模型,随Google更新而更新', + ModelInfo('gemini-1.5-pro', _('Latest Gemini 1.5 Flash model, updated with Google updates'), ModelTypeConst.IMAGE, gemini_image_model_credential, GeminiImage), ] model_stt_info_list = [ - ModelInfo('gemini-1.5-flash', '最新的Gemini 1.5 Flash模型,随Google更新而更新', + ModelInfo('gemini-1.5-flash', _('Latest Gemini 1.5 Flash model, updated with Google updates'), ModelTypeConst.STT, gemini_stt_model_credential, GeminiSpeechToText), - ModelInfo('gemini-1.5-pro', '最新的Gemini 1.5 Flash模型,随Google更新而更新', + ModelInfo('gemini-1.5-pro', _('Latest Gemini 1.5 Flash model, updated with Google updates'), ModelTypeConst.STT, gemini_stt_model_credential, GeminiSpeechToText), diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py b/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py index 96fa6f0d4..3724b5086 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/model/stt.py @@ -1,15 +1,13 @@ -import asyncio -import io + from typing import Dict from langchain_core.messages import HumanMessage from langchain_google_genai import ChatGoogleGenerativeAI -from openai import OpenAI from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_stt import BaseSpeechToText -import google.generativeai as genai +from django.utils.translation import gettext_lazy as _ def custom_get_token_ids(text: str): @@ -43,7 +41,7 @@ class GeminiSpeechToText(MaxKBBaseModel, BaseSpeechToText): model=self.model, google_api_key=self.api_key ) - response_list = client.invoke('你好') + response_list = client.invoke(_('Hello')) # print(response_list) def speech_to_text(self, audio_file): @@ -53,7 +51,7 @@ class GeminiSpeechToText(MaxKBBaseModel, BaseSpeechToText): ) audio_data = audio_file.read() msg = HumanMessage(content=[ - {'type': 'text', 'text': '把音频转成文字'}, + {'type': 'text', 'text': _('convert audio to text')}, {"type": "media", 'mime_type': 'audio/mp3', "data": audio_data} ]) res = client.invoke([msg]) diff --git a/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py index 2feed9a03..4de385bb7 100644 --- a/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/kimi_model_provider/credential/llm.py @@ -14,10 +14,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class KimiLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.3, _min=0.1, _max=1.0, @@ -25,7 +26,7 @@ class KimiLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -39,22 +40,22 @@ class KimiLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -62,7 +63,7 @@ class KimiLLMModelCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def get_model_params_setting_form(self, model_name): diff --git a/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py index ec899c25d..edb08610f 100644 --- a/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/local_model_provider/credential/embedding.py @@ -13,6 +13,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding +from django.utils.translation import gettext_lazy as _ class LocalEmbeddingCredential(BaseForm, BaseModelCredential): @@ -20,21 +21,21 @@ class LocalEmbeddingCredential(BaseForm, BaseModelCredential): def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): if not model_type == 'EMBEDDING': - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['cache_folder']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -42,4 +43,4 @@ class LocalEmbeddingCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return model - cache_folder = forms.TextInputField('模型目录', required=True) + cache_folder = forms.TextInputField(_('Model catalog'), required=True) diff --git a/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py index ee89a5c96..1df993fdd 100644 --- a/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py +++ b/apps/setting/models_provider/impl/local_model_provider/credential/reranker.py @@ -15,6 +15,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.local_model_provider.model.reranker import LocalBaseReranker +from django.utils.translation import gettext_lazy as _ class LocalRerankerCredential(BaseForm, BaseModelCredential): @@ -22,21 +23,21 @@ class LocalRerankerCredential(BaseForm, BaseModelCredential): def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): if not model_type == 'RERANKER': - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['cache_dir']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model: LocalBaseReranker = provider.get_model(model_type, model_name, model_credential) - model.compress_documents([Document(page_content='你好')], '你好') + model.compress_documents([Document(page_content=_('Hello'))], _('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -44,4 +45,4 @@ class LocalRerankerCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return model - cache_dir = forms.TextInputField('模型目录', required=True) + cache_dir = forms.TextInputField(_('Model catalog'), required=True) diff --git a/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py b/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py index 2c92bbbfb..0d637d5eb 100644 --- a/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py +++ b/apps/setting/models_provider/impl/local_model_provider/local_model_provider.py @@ -7,11 +7,7 @@ @desc: """ import os -from typing import Dict -from pydantic import BaseModel - -from common.exception.app_exception import AppApiException from common.util.file_util import get_file_content from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \ ModelInfoManage @@ -20,6 +16,7 @@ from setting.models_provider.impl.local_model_provider.credential.reranker impor from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding from setting.models_provider.impl.local_model_provider.model.reranker import LocalReranker from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ embedding_text2vec_base_chinese = ModelInfo('shibing624/text2vec-base-chinese', '', ModelTypeConst.EMBEDDING, LocalEmbeddingCredential(), LocalEmbedding) @@ -39,6 +36,6 @@ class LocalModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_local_provider', name='本地模型', icon=get_file_content( + return ModelProvideInfo(provider='model_local_provider', name=_('local model'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'local_model_provider', 'icon', 'local_icon_svg'))) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py index 235aa9633..328408169 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/embedding.py @@ -13,6 +13,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding +from django.utils.translation import gettext_lazy as _ class OllamaEmbeddingModelCredential(BaseForm, BaseModelCredential): @@ -20,17 +21,17 @@ class OllamaEmbeddingModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) try: model_list = provider.get_base_model_list(model_credential.get('api_base')) except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] if len(exist) == 0: - raise AppApiException(ValidCode.model_not_fount, "模型不存在,请先下载模型") + raise AppApiException(ValidCode.model_not_fount, _('The model does not exist, please download the model first')) model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) return True def encryption_dict(self, model_info: Dict[str, object]): @@ -39,7 +40,7 @@ class OllamaEmbeddingModelCredential(BaseForm, BaseModelCredential): def build_model(self, model_info: Dict[str, object]): for key in ['model']: if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') + raise AppApiException(500, _('{key} is required').format(key=key)) return self - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py index c285feefa..63be15d22 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/image.py @@ -1,17 +1,14 @@ # coding=utf-8 -import base64 -import os from typing import Dict -from langchain_core.messages import HumanMessage - from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class OllamaImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +16,7 @@ class OllamaImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -29,22 +26,22 @@ class OllamaImageModelParams(BaseForm): class OllamaImageModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) try: model_list = provider.get_base_model_list(model_credential.get('api_base')) except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] if len(exist) == 0: - raise AppApiException(ValidCode.model_not_fount, "模型不存在,请先下载模型") + raise AppApiException(ValidCode.model_not_fount, _('The model does not exist, please download the model first')) return True diff --git a/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py index ab0749fe3..d951aefa3 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/credential/llm.py @@ -12,10 +12,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class OllamaLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.3, _min=0.1, _max=1.0, @@ -23,7 +24,7 @@ class OllamaLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -36,15 +37,15 @@ class OllamaLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) try: model_list = provider.get_base_model_list(model_credential.get('api_base')) except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) exist = [model for model in (model_list.get('models') if model_list.get('models') is not None else []) if model.get('model') == model_name or model.get('model').replace(":latest", "") == model_name] if len(exist) == 0: - raise AppApiException(ValidCode.model_not_fount, "模型不存在,请先下载模型") + raise AppApiException(ValidCode.model_not_fount, _('The model does not exist, please download the model first')) return True def encryption_dict(self, model_info: Dict[str, object]): @@ -53,11 +54,11 @@ class OllamaLLMModelCredential(BaseForm, BaseModelCredential): def build_model(self, model_info: Dict[str, object]): for key in ['api_key', 'model']: if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') + raise AppApiException(500, _('{key} is required').format(key=key)) self.api_key = model_info.get('api_key') return self - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def get_model_params_setting_form(self, model_name): diff --git a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py index 3fbbde893..928ed3d2b 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/ollama_model_provider.py @@ -12,11 +12,6 @@ from typing import Dict, Iterator from urllib.parse import urlparse, ParseResult import requests -from langchain.chat_models.base import BaseChatModel - -from common import forms -from common.exception.app_exception import AppApiException -from common.forms import BaseForm from common.util.file_util import get_file_content from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, ModelTypeConst, \ BaseModelCredential, DownModelChunk, DownModelChunkStatus, ValidCode, ModelInfoManage @@ -27,6 +22,7 @@ from setting.models_provider.impl.ollama_model_provider.model.embedding import O from setting.models_provider.impl.ollama_model_provider.model.image import OllamaImage from setting.models_provider.impl.ollama_model_provider.model.llm import OllamaChatModel from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ "" @@ -34,60 +30,73 @@ ollama_llm_model_credential = OllamaLLMModelCredential() model_info_list = [ ModelInfo( 'llama2', - 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 7B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。', + _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 7B pretrained models. Links to other models can be found in the index at the bottom.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'llama2:13b', - 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 13B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。', + _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 13B pretrained models. Links to other models can be found in the index at the bottom.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'llama2:70b', - 'Llama 2 是一组经过预训练和微调的生成文本模型,其规模从 70 亿到 700 亿个不等。这是 70B 预训练模型的存储库。其他模型的链接可以在底部的索引中找到。', + _('Llama 2 is a set of pretrained and fine-tuned generative text models ranging in size from 7 billion to 70 billion. This is a repository of 70B pretrained models. Links to other models can be found in the index at the bottom.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'llama2-chinese:13b', - '由于Llama2本身的中文对齐较弱,我们采用中文指令集,对meta-llama/Llama-2-13b-chat-hf进行LoRA微调,使其具备较强的中文对话能力。', + _('Since the Chinese alignment of Llama2 itself is weak, we use the Chinese instruction set to fine-tune meta-llama/Llama-2-13b-chat-hf with LoRA so that it has strong Chinese conversation capabilities.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'llama3:8b', - 'Meta Llama 3:迄今为止最有能力的公开产品LLM。80亿参数。', + _('Meta Llama 3: The most capable public product LLM to date. 8 billion parameters.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'llama3:70b', - 'Meta Llama 3:迄今为止最有能力的公开产品LLM。700亿参数。', + _('Meta Llama 3: The most capable public product LLM to date. 70 billion parameters.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:0.5b', - 'qwen 1.5 0.5b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。5亿参数。', + _(''' + Compared with previous versions, qwen 1.5 0.5b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 500 million parameters. + '''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:1.8b', - 'qwen 1.5 1.8b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。18亿参数。', + _(''' + +Compared with previous versions, qwen 1.5 1.8b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 1.8 billion parameters. + '''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:4b', - 'qwen 1.5 4b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。40亿参数。', + _(''' + +Compared with previous versions, qwen 1.5 4b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 4 billion parameters. + '''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:7b', - 'qwen 1.5 7b 相较于以往版本,模型与人类偏好的对齐程度以及多语1言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。70亿参数。', + _(''' + Compared with previous versions, qwen 1.5 7b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 7 billion parameters. + '''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:14b', - 'qwen 1.5 14b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。140亿参数。', + _('''Compared with previous versions, qwen 1.5 14b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 14 billion parameters.'''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:32b', - 'qwen 1.5 32b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。320亿参数。', + _('''Compared with previous versions, qwen 1.5 32b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 32 billion parameters.'''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:72b', - 'qwen 1.5 72b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。720亿参数。', + _(''' +Compared with previous versions, qwen 1.5 72b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 72 billion parameters.'''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen:110b', - 'qwen 1.5 110b 相较于以往版本,模型与人类偏好的对齐程度以及多语言处理能力上有显著增强。所有规模的模型都支持32768个tokens的上下文长度。1100亿参数。', + _(''' + Compared with previous versions, qwen 1.5 110b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 110 billion parameters. + '''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'qwen2:72b-instruct', @@ -131,7 +140,9 @@ model_info_list = [ ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ModelInfo( 'phi3', - 'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。', + _(''' + Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open model. + '''), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel), ] ollama_embedding_model_credential = OllamaEmbeddingModelCredential() @@ -139,7 +150,7 @@ ollama_image_model_credential = OllamaImageModelCredential() embedding_model_info = [ ModelInfo( 'nomic-embed-text', - '一个具有大令牌上下文窗口的高性能开放嵌入模型。', + _('A high-performance open embedding model with a large token context window.'), ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding), ] @@ -164,11 +175,11 @@ model_info_manage = ( .append_model_info_list(embedding_model_info) .append_default_model_info(ModelInfo( 'phi3', - 'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。', + _('Phi-3 Mini is Microsoft\'s 3.8B parameter, lightweight, state-of-the-art open model.'), ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel)) .append_default_model_info(ModelInfo( 'nomic-embed-text', - '一个具有大令牌上下文窗口的高性能开放嵌入模型。', + _('A high-performance open embedding model with a large token context window.'), ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding), ) .append_model_info_list(image_model_info) .append_default_model_info(image_model_info[0]) diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py index 15c1b2add..c3e1f22d4 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/embedding.py @@ -12,6 +12,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class OpenAIEmbeddingCredential(BaseForm, BaseModelCredential): @@ -19,22 +20,22 @@ class OpenAIEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=True): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -42,5 +43,5 @@ class OpenAIEmbeddingCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/image.py b/apps/setting/models_provider/impl/openai_model_provider/credential/image.py index 9196bbede..0149848e4 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/image.py @@ -9,9 +9,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class OpenAIImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +20,7 @@ class OpenAIImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -29,31 +30,31 @@ class OpenAIImageModelParams(BaseForm): class OpenAIImageModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py index 8e606b754..03eecca44 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/llm.py @@ -14,10 +14,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class OpenAILLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -25,7 +26,7 @@ class OpenAILLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -39,23 +40,23 @@ class OpenAILLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -63,7 +64,7 @@ class OpenAILLMModelCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def get_model_params_setting_form(self, model_name): diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py b/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py index e237a77bd..cfb8a71ad 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/stt.py @@ -5,22 +5,22 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class OpenAISTTModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -30,7 +30,7 @@ class OpenAISTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py b/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py index 280d6cced..78fc438d7 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/tti.py @@ -9,11 +9,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class OpenAITTIModelParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', '图像生成端点允许您根据文本提示创建原始图像。使用 DALL·E 3 时,图像的尺寸可以为 1024x1024、1024x1792 或 1792x1024 像素。'), + TooltipLabel(_('Image size'), _('The image generation endpoint allows you to create raw images based on text prompts. When using the DALL·E 3, the image size can be 1024x1024, 1024x1792 or 1792x1024 pixels.')), required=True, default_value='1024x1024', option_list=[ @@ -26,7 +26,9 @@ class OpenAITTIModelParams(BaseForm): ) quality = forms.SingleSelect( - TooltipLabel('图片质量', '默认情况下,图像以标准质量生成,但使用 DALL·E 3 时,您可以设置质量:“hd”以增强细节。方形、标准质量的图像生成速度最快。'), + TooltipLabel(_('Picture quality'), _(''' +By default, images are produced in standard quality, but with DALL·E 3 you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest. + ''')), required=True, default_value='standard', option_list=[ @@ -38,7 +40,7 @@ class OpenAITTIModelParams(BaseForm): ) n = forms.SliderField( - TooltipLabel('图片数量', '您可以使用 DALL·E 3 一次请求 1 个图像(通过发出并行请求来请求更多图像),或者使用带有 n 参数的 DALL·E 2 一次最多请求 10 个图像。'), + TooltipLabel(_('Number of pictures'), _('You can use DALL·E 3 to request 1 image at a time (requesting more images by issuing parallel requests), or use DALL·E 2 with the n parameter to request up to 10 images at a time.')), required=True, default_value=1, _min=1, _max=10, @@ -47,19 +49,19 @@ class OpenAITTIModelParams(BaseForm): class OpenAITextToImageModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -70,7 +72,7 @@ class OpenAITextToImageModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py b/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py index 4f607a9e7..e8409df5e 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py +++ b/apps/setting/models_provider/impl/openai_model_provider/credential/tts.py @@ -5,11 +5,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class OpenAITTSModelGeneralParams(BaseForm): # alloy, echo, fable, onyx, nova, shimmer voice = forms.SingleSelect( - TooltipLabel('Voice', '尝试不同的声音(合金、回声、寓言、缟玛瑙、新星和闪光),找到一种适合您所需的音调和听众的声音。当前的语音针对英语进行了优化。'), + TooltipLabel('Voice', _('Try out the different sounds (Alloy, Echo, Fable, Onyx, Nova, and Sparkle) to find one that suits your desired tone and audience. The current voiceover is optimized for English.')), required=True, default_value='alloy', text_field='value', value_field='value', @@ -24,19 +25,19 @@ class OpenAITTSModelGeneralParams(BaseForm): class OpenAITTSModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -46,7 +47,7 @@ class OpenAITTSModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py index 60c5318ce..a06d3b75f 100644 --- a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py +++ b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py @@ -24,6 +24,7 @@ from setting.models_provider.impl.openai_model_provider.model.stt import OpenAIS from setting.models_provider.impl.openai_model_provider.model.tti import OpenAITextToImage from setting.models_provider.impl.openai_model_provider.model.tts import OpenAITextToSpeech from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ openai_llm_model_credential = OpenAILLMModelCredential() openai_stt_model_credential = OpenAISTTModelCredential() @@ -31,47 +32,47 @@ openai_tts_model_credential = OpenAITTSModelCredential() openai_image_model_credential = OpenAIImageModelCredential() openai_tti_model_credential = OpenAITextToImageModelCredential() model_info_list = [ - ModelInfo('gpt-3.5-turbo', '最新的gpt-3.5-turbo,随OpenAI调整而更新', ModelTypeConst.LLM, + ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel ), - ModelInfo('gpt-4', '最新的gpt-4,随OpenAI调整而更新', ModelTypeConst.LLM, openai_llm_model_credential, + ModelInfo('gpt-4', _('Latest gpt-4, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), - ModelInfo('gpt-4o', '最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新', + ModelInfo('gpt-4o', _('The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), - ModelInfo('gpt-4o-mini', '最新的gpt-4o-mini,比gpt-4o更便宜、更快,随OpenAI调整而更新', + ModelInfo('gpt-4o-mini', _('The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), - ModelInfo('gpt-4-turbo', '最新的gpt-4-turbo,随OpenAI调整而更新', ModelTypeConst.LLM, + ModelInfo('gpt-4-turbo', _('The latest gpt-4-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), - ModelInfo('gpt-4-turbo-preview', '最新的gpt-4-turbo-preview,随OpenAI调整而更新', + ModelInfo('gpt-4-turbo-preview', _('The latest gpt-4-turbo-preview, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), ModelInfo('gpt-3.5-turbo-0125', - '2024年1月25日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM, + _('gpt-3.5-turbo snapshot on January 25, 2024, supporting context length 16,385 tokens'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), ModelInfo('gpt-3.5-turbo-1106', - '2023年11月6日的gpt-3.5-turbo快照,支持上下文长度16,385 tokens', ModelTypeConst.LLM, + _('gpt-3.5-turbo snapshot on November 6, 2023, supporting context length 16,385 tokens'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), ModelInfo('gpt-3.5-turbo-0613', - '[Legacy] 2023年6月13日的gpt-3.5-turbo快照,将于2024年6月13日弃用', + _('[Legacy] gpt-3.5-turbo snapshot on June 13, 2023, will be deprecated on June 13, 2024'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), ModelInfo('gpt-4o-2024-05-13', - '2024年5月13日的gpt-4o快照,支持上下文长度128,000 tokens', + _('gpt-4o snapshot on May 13, 2024, supporting context length 128,000 tokens'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), ModelInfo('gpt-4-turbo-2024-04-09', - '2024年4月9日的gpt-4-turbo快照,支持上下文长度128,000 tokens', + _('gpt-4-turbo snapshot on April 9, 2024, supporting context length 128,000 tokens'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), - ModelInfo('gpt-4-0125-preview', '2024年1月25日的gpt-4-turbo快照,支持上下文长度128,000 tokens', + ModelInfo('gpt-4-0125-preview', _('gpt-4-turbo snapshot on January 25, 2024, supporting context length 128,000 tokens'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), - ModelInfo('gpt-4-1106-preview', '2023年11月6日的gpt-4-turbo快照,支持上下文长度128,000 tokens', + ModelInfo('gpt-4-1106-preview', _('gpt-4-turbo snapshot on November 6, 2023, supporting context length 128,000 tokens'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel), ModelInfo('whisper-1', '', @@ -95,10 +96,10 @@ model_info_embedding_list = [ ] model_info_image_list = [ - ModelInfo('gpt-4o', '最新的GPT-4o,比gpt-4-turbo更便宜、更快,随OpenAI调整而更新', + ModelInfo('gpt-4o', _('The latest GPT-4o, cheaper and faster than gpt-4-turbo, updated with OpenAI adjustments'), ModelTypeConst.IMAGE, openai_image_model_credential, OpenAIImage), - ModelInfo('gpt-4o-mini', '最新的gpt-4o-mini,比gpt-4o更便宜、更快,随OpenAI调整而更新', + ModelInfo('gpt-4o-mini', _('The latest gpt-4o-mini, cheaper and faster than gpt-4o, updated with OpenAI adjustments'), ModelTypeConst.IMAGE, openai_image_model_credential, OpenAIImage), ] @@ -115,7 +116,7 @@ model_info_tti_list = [ model_info_manage = ( ModelInfoManage.builder() .append_model_info_list(model_info_list) - .append_default_model_info(ModelInfo('gpt-3.5-turbo', '最新的gpt-3.5-turbo,随OpenAI调整而更新', ModelTypeConst.LLM, + .append_default_model_info(ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM, openai_llm_model_credential, OpenAIChatModel )) .append_model_info_list(model_info_embedding_list) diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py index d3205d8ab..399f66845 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/image.py @@ -16,10 +16,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class QwenModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=1.0, _min=0.1, _max=1.9, @@ -27,7 +27,7 @@ class QwenModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -41,23 +41,23 @@ class QwenVLModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py index 68745a175..0cc026089 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/llm.py @@ -14,10 +14,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class QwenModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=1.0, _min=0.1, _max=1.9, @@ -25,7 +25,7 @@ class QwenModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -39,21 +39,21 @@ class OpenAILLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py b/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py index 8fc39db7f..6a7f6fcf1 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/credential/tti.py @@ -16,11 +16,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - - +from django.utils.translation import gettext_lazy as _ class QwenModelParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', '指定生成图片的尺寸, 如: 1024x1024'), + TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')), required=True, default_value='1024*1024', option_list=[ @@ -32,27 +31,27 @@ class QwenModelParams(BaseForm): text_field='label', value_field='value') n = forms.SliderField( - TooltipLabel('图片数量', '指定生成图片的数量'), + TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')), required=True, default_value=1, _min=1, _max=4, _step=1, precision=0) style = forms.SingleSelect( - TooltipLabel('风格', '指定生成图片的风格'), + TooltipLabel(_('Style'), _('Specify the style of generated images')), required=True, default_value='', option_list=[ - {'value': '', 'label': '默认值,由模型随机输出图像风格'}, - {'value': '', 'label': '摄影'}, - {'value': '', 'label': '人像写真'}, - {'value': '<3d cartoon>', 'label': '3D卡通'}, - {'value': '', 'label': '动画'}, - {'value': '', 'label': '油画'}, - {'value': '', 'label': '水彩'}, - {'value': '', 'label': '素描'}, - {'value': '', 'label': '中国画'}, - {'value': '', 'label': '扁平插画'}, + {'value': '', 'label': _('Default value, the image style is randomly output by the model')}, + {'value': '', 'label': _('photography')}, + {'value': '', 'label': _('Portraits')}, + {'value': '<3d cartoon>', 'label': _('3D cartoon')}, + {'value': '', 'label': _('animation')}, + {'value': '', 'label': _('painting')}, + {'value': '', 'label': _('watercolor')}, + {'value': '', 'label': _('sketch')}, + {'value': '', 'label': _('Chinese painting')}, + {'value': '', 'label': _('flat illustration')}, ], text_field='label', value_field='value' @@ -65,11 +64,11 @@ class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -80,7 +79,7 @@ class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py b/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py index c2fd32877..c070363b9 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/model/tti.py @@ -8,7 +8,7 @@ from langchain_core.messages import HumanMessage from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tti import BaseTextToImage - +from django.utils.translation import gettext_lazy as _ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): api_key: str @@ -39,7 +39,7 @@ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): def check_auth(self): chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max') - chat.invoke([HumanMessage([{"type": "text", "text": "你好"}])]) + chat.invoke([HumanMessage([{"type": "text", "text": _('Hello')}])]) def generate_image(self, prompt: str, negative_prompt: str = None): # api_base='https://dashscope.aliyuncs.com/compatible-mode/v1', diff --git a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py index de91c9fab..98e0a52de 100644 --- a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py +++ b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py @@ -19,6 +19,7 @@ from setting.models_provider.impl.qwen_model_provider.model.image import QwenVLC from setting.models_provider.impl.qwen_model_provider.model.llm import QwenChatModel from setting.models_provider.impl.qwen_model_provider.model.tti import QwenTextToImageModel from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ qwen_model_credential = OpenAILLMModelCredential() qwenvl_model_credential = QwenVLModelCredential() @@ -36,7 +37,7 @@ module_info_vl_list = [ ] module_info_tti_list = [ ModelInfo('wanx-v1', - '通义万相-文本生成图像大模型,支持中英文双语输入,支持输入参考图片进行参考内容或者参考风格迁移,重点风格包括但不限于水彩、油画、中国画、素描、扁平插画、二次元、3D卡通。', + _('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'), ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel), ] @@ -59,6 +60,6 @@ class QwenModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_qwen_provider', name='通义千问', icon=get_file_content( + return ModelProvideInfo(provider='model_qwen_provider', name=_('Tongyi Qianwen'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'qwen_model_provider', 'icon', 'qwen_icon_svg'))) diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py index 9f4d7e58b..2d3a8e1d0 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/embedding.py @@ -4,7 +4,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class TencentEmbeddingCredential(BaseForm, BaseModelCredential): @@ -12,16 +12,16 @@ class TencentEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=True) -> bool: model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) self.valid_form(model_credential) try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py index 9f0634833..6d2b74f37 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/image.py @@ -16,10 +16,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class QwenModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=1.0, _min=0.1, _max=1.9, @@ -27,7 +27,7 @@ class QwenModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -41,23 +41,23 @@ class TencentVisionModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py index fadc7602e..531bfff46 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/llm.py @@ -5,10 +5,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class TencentLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.5, _min=0.1, _max=2.0, @@ -23,7 +23,7 @@ class TencentLLMModelCredential(BaseForm, BaseModelCredential): def _validate_model_type(cls, model_type, provider, raise_exception=False): if not any(mt['value'] == model_type for mt in provider.get_model_type_list()): if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) return False return True @@ -32,7 +32,7 @@ class TencentLLMModelCredential(BaseForm, BaseModelCredential): missing_keys = [key for key in cls.REQUIRED_FIELDS if key not in model_credential] if missing_keys: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{", ".join(missing_keys)} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{keys} is required').format(keys=", ".join(missing_keys))) return False return True @@ -42,10 +42,10 @@ class TencentLLMModelCredential(BaseForm, BaseModelCredential): return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) return False return True diff --git a/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py b/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py index c0c8d583d..77410c182 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/credential/tti.py @@ -5,47 +5,47 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class TencentTTIModelParams(BaseForm): Style = forms.SingleSelect( - TooltipLabel('绘画风格', '不传默认使用201(日系动漫风格)'), + TooltipLabel(_('painting style'), _('If not passed, the default value is 201 (Japanese anime style)')), required=True, default_value='201', option_list=[ - {'value': '000', 'label': '不限定风格'}, - {'value': '101', 'label': '水墨画'}, - {'value': '102', 'label': '概念艺术'}, - {'value': '103', 'label': '油画1'}, - {'value': '118', 'label': '油画2(梵高)'}, - {'value': '104', 'label': '水彩画'}, - {'value': '105', 'label': '像素画'}, - {'value': '106', 'label': '厚涂风格'}, - {'value': '107', 'label': '插图'}, - {'value': '108', 'label': '剪纸风格'}, - {'value': '109', 'label': '印象派1(莫奈)'}, - {'value': '119', 'label': '印象派2'}, + {'value': '000', 'label': _('Not limited to style')}, + {'value': '101', 'label': _('ink painting')}, + {'value': '102', 'label': _('concept art')}, + {'value': '103', 'label': _('Oil painting 1')}, + {'value': '118', 'label': _('Oil Painting 2 (Van Gogh)')}, + {'value': '104', 'label': _('watercolor painting')}, + {'value': '105', 'label': _('pixel art')}, + {'value': '106', 'label': _('impasto style')}, + {'value': '107', 'label': _('illustration')}, + {'value': '108', 'label': _('paper cut style')}, + {'value': '109', 'label': _('Impressionism 1 (Monet)')}, + {'value': '119', 'label': _('Impressionism 2')}, {'value': '110', 'label': '2.5D'}, - {'value': '111', 'label': '古典肖像画'}, - {'value': '112', 'label': '黑白素描画'}, - {'value': '113', 'label': '赛博朋克'}, - {'value': '114', 'label': '科幻风格'}, - {'value': '115', 'label': '暗黑风格'}, + {'value': '111', 'label': _('classical portraiture')}, + {'value': '112', 'label': _('black and white sketch')}, + {'value': '113', 'label': _('cyberpunk')}, + {'value': '114', 'label': _('science fiction style')}, + {'value': '115', 'label': _('dark style')}, {'value': '116', 'label': '3D'}, - {'value': '117', 'label': '蒸汽波'}, - {'value': '201', 'label': '日系动漫'}, - {'value': '202', 'label': '怪兽风格'}, - {'value': '203', 'label': '唯美古风'}, - {'value': '204', 'label': '复古动漫'}, - {'value': '301', 'label': '游戏卡通手绘'}, - {'value': '401', 'label': '通用写实风格'}, + {'value': '117', 'label': _('vaporwave')}, + {'value': '201', 'label': _('Japanese animation')}, + {'value': '202', 'label': _('monster style')}, + {'value': '203', 'label': _('Beautiful ancient style')}, + {'value': '204', 'label': _('retro anime')}, + {'value': '301', 'label': _('Game cartoon hand drawing')}, + {'value': '401', 'label': _('Universal realistic style')}, ], value_field='value', text_field='label' ) Resolution = forms.SingleSelect( - TooltipLabel('生成图分辨率', '不传默认使用768:768。'), + TooltipLabel(_('Generate image resolution'), _('If not transmitted, the default value is 768:768.')), required=True, default_value='768:768', option_list=[ @@ -72,7 +72,7 @@ class TencentTTIModelCredential(BaseForm, BaseModelCredential): def _validate_model_type(cls, model_type, provider, raise_exception=False): if not any(mt['value'] == model_type for mt in provider.get_model_type_list()): if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) return False return True @@ -81,7 +81,7 @@ class TencentTTIModelCredential(BaseForm, BaseModelCredential): missing_keys = [key for key in cls.REQUIRED_FIELDS if key not in model_credential] if missing_keys: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{", ".join(missing_keys)} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{keys} is required').format(keys=", ".join(missing_keys))) return False return True @@ -94,7 +94,7 @@ class TencentTTIModelCredential(BaseForm, BaseModelCredential): model.check_auth() except Exception as e: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) return False return True diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py b/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py index e8d57dc13..d1e60c78c 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/tti.py @@ -12,7 +12,7 @@ from tencentcloud.hunyuan.v20230901 import hunyuan_client, models from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tti import BaseTextToImage from setting.models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan - +from django.utils.translation import gettext_lazy as _ class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage): hunyuan_secret_id: str @@ -50,7 +50,7 @@ class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage): hunyuan_secret_id=self.hunyuan_secret_id, hunyuan_secret_key=self.hunyuan_secret_key, model="hunyuan-standard") - res = chat.invoke('你好') + res = chat.invoke(_('Hello')) # print(res) def generate_image(self, prompt: str, negative_prompt: str = None): diff --git a/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py b/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py index c7402c07a..9dc5ed3c9 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/tencent_model_provider.py @@ -15,7 +15,7 @@ from setting.models_provider.impl.tencent_model_provider.model.image import Tenc from setting.models_provider.impl.tencent_model_provider.model.llm import TencentModel from setting.models_provider.impl.tencent_model_provider.model.tti import TencentTextToImageModel from smartdoc.conf import PROJECT_DIR - +from django.utils.translation import gettext_lazy as _ def _create_model_info(model_name, description, model_type, credential_class, model_class): return ModelInfo( @@ -35,38 +35,44 @@ def _get_tencent_icon_path(): def _initialize_model_info(): model_info_list = [_create_model_info( 'hunyuan-pro', - '当前混元模型中效果最优版本,万亿级参数规模 MOE-32K 长文模型。在各种 benchmark 上达到绝对领先的水平,复杂指令和推理,具备复杂数学能力,支持 functioncall,在多语言翻译、金融法律医疗等领域应用重点优化', + _('The most effective version of the current hybrid model, the trillion-level parameter scale MOE-32K long article model. Reaching the absolute leading level on various benchmarks, with complex instructions and reasoning, complex mathematical capabilities, support for function call, and application focus optimization in fields such as multi-language translation, finance, law, and medical care'), ModelTypeConst.LLM, TencentLLMModelCredential, TencentModel ), _create_model_info( 'hunyuan-standard', - '采用更优的路由策略,同时缓解了负载均衡和专家趋同的问题。长文方面,大海捞针指标达到99.9%', + _('A better routing strategy is adopted to simultaneously alleviate the problems of load balancing and expert convergence. For long articles, the needle-in-a-haystack index reaches 99.9%'), ModelTypeConst.LLM, TencentLLMModelCredential, TencentModel), _create_model_info( 'hunyuan-lite', - '升级为 MOE 结构,上下文窗口为 256k ,在 NLP,代码,数学,行业等多项评测集上领先众多开源模型', + _('Upgraded to MOE structure, the context window is 256k, leading many open source models in multiple evaluation sets such as NLP, code, mathematics, industry, etc.'), ModelTypeConst.LLM, TencentLLMModelCredential, TencentModel), _create_model_info( 'hunyuan-role', - '混元最新版角色扮演模型,混元官方精调训练推出的角色扮演模型,基于混元模型结合角色扮演场景数据集进行增训,在角色扮演场景具有更好的基础效果', + _(''' + Hunyuan's latest version of the role-playing model, a role-playing model launched by Hunyuan's official fine-tuning training, is based on the Hunyuan model combined with the role-playing scene data set for additional training, and has better basic effects in role-playing scenes. + '''), ModelTypeConst.LLM, TencentLLMModelCredential, TencentModel), _create_model_info( 'hunyuan-functioncall', - '混元最新 MOE 架构 FunctionCall 模型,经过高质量的 FunctionCall 数据训练,上下文窗口达 32K,在多个维度的评测指标上处于领先。', + _(''' + Hunyuan's latest MOE architecture FunctionCall model has been trained with high-quality FunctionCall data and has a context window of 32K, leading in multiple dimensions of evaluation indicators. + '''), ModelTypeConst.LLM, TencentLLMModelCredential, TencentModel), _create_model_info( 'hunyuan-code', - '混元最新代码生成模型,经过 200B 高质量代码数据增训基座模型,迭代半年高质量 SFT 数据训练,上下文长窗口长度增大到 8K,五大语言代码生成自动评测指标上位居前列;五大语言10项考量各方面综合代码任务人工高质量评测上,性能处于第一梯队', + _(''' + Hunyuan's latest code generation model, after training the base model with 200B high-quality code data, and iterating on high-quality SFT data for half a year, the context long window length has been increased to 8K, and it ranks among the top in the automatic evaluation indicators of code generation in the five major languages; the five major languages In the manual high-quality evaluation of 10 comprehensive code tasks that consider all aspects, the performance is in the first echelon. + '''), ModelTypeConst.LLM, TencentLLMModelCredential, TencentModel), @@ -74,7 +80,9 @@ def _initialize_model_info(): tencent_embedding_model_info = _create_model_info( 'hunyuan-embedding', - '腾讯混元 Embedding 接口,可以将文本转化为高质量的向量数据。向量维度为1024维。', + _(''' + Tencent's Hunyuan Embedding interface can convert text into high-quality vector data. The vector dimension is 1024 dimensions. + '''), ModelTypeConst.EMBEDDING, TencentEmbeddingCredential, TencentEmbeddingModel @@ -84,14 +92,14 @@ def _initialize_model_info(): model_info_vision_list = [_create_model_info( 'hunyuan-vision', - '混元视觉模型', + _('Mixed element visual model'), ModelTypeConst.IMAGE, TencentVisionModelCredential, TencentVision)] model_info_tti_list = [_create_model_info( 'hunyuan-dit', - '混元生图模型', + _('Hunyuan graph model'), ModelTypeConst.TTI, TencentTTIModelCredential, TencentTextToImageModel)] @@ -122,6 +130,6 @@ class TencentModelProvider(IModelProvider): icon_data = get_file_content(icon_path) return ModelProvideInfo( provider='model_tencent_provider', - name='腾讯混元', + name=_('Tencent Hunyuan'), icon=icon_data ) diff --git a/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py index 5768d5f00..ba6e2235a 100644 --- a/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/vllm_model_provider/credential/llm.py @@ -8,10 +8,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +20,7 @@ class VLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -32,17 +33,17 @@ class VLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) try: model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key')) except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) exist = provider.get_model_info_by_name(model_list, model_name) if len(exist) == 0: - raise AppApiException(ValidCode.valid_error.value, "模型不存在,请先下载模型") + raise AppApiException(ValidCode.valid_error.value, _('The model does not exist, please download the model first')) model = provider.get_model(model_type, model_name, model_credential, **model_params) try: - res = model.invoke([HumanMessage(content='你好')]) + res = model.invoke([HumanMessage(content=_('Hello'))]) print(res) except Exception as e: print(e) @@ -54,11 +55,11 @@ class VLLMModelCredential(BaseForm, BaseModelCredential): def build_model(self, model_info: Dict[str, object]): for key in ['api_key', 'model']: if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') + raise AppApiException(500, _('{key} is required').format(key=key)) self.api_key = model_info.get('api_key') return self - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def get_model_params_setting_form(self, model_name): diff --git a/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py b/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py index 7912fff96..aaeec9666 100644 --- a/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py +++ b/apps/setting/models_provider/impl/vllm_model_provider/vllm_model_provider.py @@ -10,19 +10,20 @@ from setting.models_provider.base_model_provider import IModelProvider, ModelPro from setting.models_provider.impl.vllm_model_provider.credential.llm import VLLMModelCredential from setting.models_provider.impl.vllm_model_provider.model.llm import VllmChatModel from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ v_llm_model_credential = VLLMModelCredential() model_info_list = [ - ModelInfo('facebook/opt-125m', 'Facebook的125M参数模型', ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), - ModelInfo('BAAI/Aquila-7B', 'BAAI的7B参数模型', ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), - ModelInfo('BAAI/AquilaChat-7B', 'BAAI的13B参数模型', ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), + ModelInfo('facebook/opt-125m', _('Facebook’s 125M parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), + ModelInfo('BAAI/Aquila-7B', _('BAAI’s 7B parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), + ModelInfo('BAAI/AquilaChat-7B', _('BAAI’s 13B parameter mode'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel), ] model_info_manage = (ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info( ModelInfo( 'facebook/opt-125m', - 'Facebook的125M参数模型', + _('Facebook’s 125M parameter model'), ModelTypeConst.LLM, v_llm_model_credential, VllmChatModel)) .build()) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py index 58f70c02e..d3dbea285 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/embedding.py @@ -12,6 +12,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VolcanicEmbeddingCredential(BaseForm, BaseModelCredential): @@ -19,22 +20,22 @@ class VolcanicEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=True): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -42,5 +43,5 @@ class VolcanicEmbeddingCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'api_key': super().encryption(model.get('api_key', ''))} - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py index 08a85a970..328f7b3ab 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/image.py @@ -9,9 +9,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VolcanicEngineImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.95, _min=0.1, _max=1.0, @@ -19,7 +20,7 @@ class VolcanicEngineImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -28,30 +29,30 @@ class VolcanicEngineImageModelParams(BaseForm): class VolcanicEngineImageModelCredential(BaseForm, BaseModelCredential): api_key = forms.PasswordInputField('API Key', required=True) - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key', 'api_base']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py index 0fb352422..5dcd60685 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/llm.py @@ -14,10 +14,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VolcanicEngineLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.3, _min=0.1, _max=1.0, @@ -25,7 +26,7 @@ class VolcanicEngineLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -39,23 +40,23 @@ class VolcanicEngineLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['access_key_id', 'secret_access_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.invoke([HumanMessage(content='你好')]) + res = model.invoke([HumanMessage(content=_('Hello'))]) print(res) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py index 37980306e..bcf2954ec 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/stt.py @@ -6,10 +6,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VolcanicEngineSTTModelCredential(BaseForm, BaseModelCredential): - volcanic_api_url = forms.TextInputField('API 域名', required=True, default_value='wss://openspeech.bytedance.com/api/v2/asr') + volcanic_api_url = forms.TextInputField('API Url', required=True, default_value='wss://openspeech.bytedance.com/api/v2/asr') volcanic_app_id = forms.TextInputField('App ID', required=True) volcanic_token = forms.PasswordInputField('Access Token', required=True) volcanic_cluster = forms.TextInputField('Cluster ID', required=True) @@ -18,12 +19,12 @@ class VolcanicEngineSTTModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['volcanic_api_url', 'volcanic_app_id', 'volcanic_token', 'volcanic_cluster']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -33,7 +34,7 @@ class VolcanicEngineSTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py index 98b78a1e9..5a68c238d 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tti.py @@ -6,12 +6,13 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VolcanicEngineTTIModelGeneralParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', - '宽、高与512差距过大,则出图效果不佳、延迟过长概率显著增加。超分前建议比例及对应宽高:width*height'), + TooltipLabel(_('Image size'), + _('If the gap between width, height and 512 is too large, the picture rendering effect will be poor and the probability of excessive delay will increase significantly. Recommended ratio and corresponding width and height before super score: width*height')), required=True, default_value='512*512', option_list=[ @@ -35,12 +36,12 @@ class VolcanicEngineTTIModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['access_key', 'secret_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -50,7 +51,7 @@ class VolcanicEngineTTIModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py index 7f157b999..95adc4ffe 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/credential/tts.py @@ -6,29 +6,30 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class VolcanicEngineTTSModelGeneralParams(BaseForm): voice_type = forms.SingleSelect( - TooltipLabel('音色', '中文音色可支持中英文混合场景'), + TooltipLabel(_('timbre'), _('Chinese sounds can support mixed scenes of Chinese and English')), required=True, default_value='BV002_streaming', text_field='value', value_field='value', option_list=[ - {'text': '灿灿 2.0', 'value': 'BV700_V2_streaming'}, - {'text': '炀炀', 'value': 'BV705_streaming'}, - {'text': '擎苍 2.0', 'value': 'BV701_V2_streaming'}, - {'text': '通用女声 2.0', 'value': 'BV001_V2_streaming'}, - {'text': '灿灿', 'value': 'BV700_streaming'}, - {'text': '超自然音色-梓梓2.0', 'value': 'BV406_V2_streaming'}, - {'text': '超自然音色-梓梓', 'value': 'BV406_streaming'}, - {'text': '超自然音色-燃燃2.0', 'value': 'BV407_V2_streaming'}, - {'text': '超自然音色-燃燃', 'value': 'BV407_streaming'}, - {'text': '通用女声', 'value': 'BV001_streaming'}, - {'text': '通用男声', 'value': 'BV002_streaming'}, + {'text': 'CanCan 2.0', 'value': 'BV700_V2_streaming'}, + {'text': 'Yangyang', 'value': 'BV705_streaming'}, + {'text': 'Qingcang 2.0', 'value': 'BV701_V2_streaming'}, + {'text': _('Universal female voice'), 'value': 'BV001_V2_streaming'}, + {'text': 'CanCan', 'value': 'BV700_streaming'}, + {'text': _('Supernatural timbre-ZiZi 2.0'), 'value': 'BV406_V2_streaming'}, + {'text': _('Supernatural timbre-ZiZi'), 'value': 'BV406_streaming'}, + {'text': _('Supernatural sound-Ranran 2.0'), 'value': 'BV407_V2_streaming'}, + {'text': _('Supernatural sound-Ranran'), 'value': 'BV407_streaming'}, + {'text': _('Universal female voice'), 'value': 'BV001_streaming'}, + {'text': _('Universal male voice'), 'value': 'BV002_streaming'}, ]) speed_ratio = forms.SliderField( - TooltipLabel('语速', '[0.2,3],默认为1,通常保留一位小数即可'), + TooltipLabel(_('speaking speed'), _('[0.2,3], the default is 1, usually one decimal place is enough')), required=True, default_value=1, _min=0.2, _max=3, @@ -37,7 +38,7 @@ class VolcanicEngineTTSModelGeneralParams(BaseForm): class VolcanicEngineTTSModelCredential(BaseForm, BaseModelCredential): - volcanic_api_url = forms.TextInputField('API 域名', required=True, default_value='wss://openspeech.bytedance.com/api/v1/tts/ws_binary') + volcanic_api_url = forms.TextInputField('API Url', required=True, default_value='wss://openspeech.bytedance.com/api/v1/tts/ws_binary') volcanic_app_id = forms.TextInputField('App ID', required=True) volcanic_token = forms.PasswordInputField('Access Token', required=True) volcanic_cluster = forms.TextInputField('Cluster ID', required=True) @@ -46,12 +47,12 @@ class VolcanicEngineTTSModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['volcanic_api_url', 'volcanic_app_id', 'volcanic_token', 'volcanic_cluster']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -61,7 +62,7 @@ class VolcanicEngineTTSModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py index acb755bb3..c462d69a3 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/tts.py @@ -21,6 +21,7 @@ import websockets from common.util.common import _remove_empty_lines from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tts import BaseTextToSpeech +from django.utils.translation import gettext_lazy as _ MESSAGE_TYPES = {11: "audio-only server response", 12: "frontend server response", 15: "error message from server"} MESSAGE_TYPE_SPECIFIC_FLAGS = {0: "no sequence number", 1: "sequence number > 0", @@ -72,7 +73,7 @@ class VolcanicEngineTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): ) def check_auth(self): - self.text_to_speech('你好') + self.text_to_speech(_('Hello')) def text_to_speech(self, text): request_json = { diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py index dc2c66094..e3dee63d3 100644 --- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py +++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/volcanic_engine_model_provider.py @@ -28,6 +28,7 @@ from setting.models_provider.impl.volcanic_engine_model_provider.model.tti impor from setting.models_provider.impl.volcanic_engine_model_provider.model.tts import VolcanicEngineTextToSpeech from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ volcanic_engine_llm_model_credential = OpenAILLMModelCredential() volcanic_engine_stt_model_credential = VolcanicEngineSTTModelCredential() @@ -37,12 +38,12 @@ volcanic_engine_tti_model_credential = VolcanicEngineTTIModelCredential() model_info_list = [ ModelInfo('ep-xxxxxxxxxx-yyyy', - '用户前往火山方舟的模型推理页面创建推理接入点,这里需要输入ep-xxxxxxxxxx-yyyy进行调用', + _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'), ModelTypeConst.LLM, volcanic_engine_llm_model_credential, VolcanicEngineChatModel ), ModelInfo('ep-xxxxxxxxxx-yyyy', - '用户前往火山方舟的模型推理页面创建推理接入点,这里需要输入ep-xxxxxxxxxx-yyyy进行调用', + _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'), ModelTypeConst.IMAGE, volcanic_engine_image_model_credential, VolcanicEngineImage ), @@ -57,27 +58,27 @@ model_info_list = [ volcanic_engine_tts_model_credential, VolcanicEngineTextToSpeech ), ModelInfo('general_v2.0', - '通用2.0-文生图', + _('Universal 2.0-Vincent Diagram'), ModelTypeConst.TTI, volcanic_engine_tti_model_credential, VolcanicEngineTextToImage ), ModelInfo('general_v2.0_L', - '通用2.0Pro-文生图', + _('Universal 2.0Pro-Vincent Chart'), ModelTypeConst.TTI, volcanic_engine_tti_model_credential, VolcanicEngineTextToImage ), ModelInfo('general_v1.4', - '通用1.4-文生图', + _('Universal 1.4-Vincent Chart'), ModelTypeConst.TTI, volcanic_engine_tti_model_credential, VolcanicEngineTextToImage ), ModelInfo('anime_v1.3', - '动漫1.3.0-文生图', + _('Animation 1.3.0-Vincent Picture'), ModelTypeConst.TTI, volcanic_engine_tti_model_credential, VolcanicEngineTextToImage ), ModelInfo('anime_v1.3.1', - '动漫1.3.1-文生图', + _('Animation 1.3.1-Vincent Picture'), ModelTypeConst.TTI, volcanic_engine_tti_model_credential, VolcanicEngineTextToImage ), @@ -86,7 +87,7 @@ model_info_list = [ open_ai_embedding_credential = VolcanicEmbeddingCredential() model_info_embedding_list = [ ModelInfo('ep-xxxxxxxxxx-yyyy', - '用户前往火山方舟的模型推理页面创建推理接入点,这里需要输入ep-xxxxxxxxxx-yyyy进行调用', + _('The user goes to the model inference page of Volcano Ark to create an inference access point. Here, you need to enter ep-xxxxxxxxxx-yyyy to call it.'), ModelTypeConst.EMBEDDING, open_ai_embedding_credential, VolcanicEngineEmbeddingModel) ] @@ -111,7 +112,7 @@ class VolcanicEngineModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_volcanic_engine_provider', name='火山引擎', icon=get_file_content( + return ModelProvideInfo(provider='model_volcanic_engine_provider', name=_('volcano engine'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'volcanic_engine_model_provider', 'icon', 'volcanic_engine_icon_svg'))) diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py index 9b6c780ba..2715663f4 100644 --- a/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/embedding.py @@ -12,6 +12,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class QianfanEmbeddingCredential(BaseForm, BaseModelCredential): @@ -20,16 +21,16 @@ class QianfanEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) self.valid_form(model_credential) try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py index 9a24becb1..fa687ed9f 100644 --- a/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/wenxin_model_provider/credential/llm.py @@ -14,10 +14,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class WenxinLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.95, _min=0.1, _max=1.0, @@ -25,7 +26,7 @@ class WenxinLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=2, _max=100000, @@ -38,20 +39,20 @@ class WenxinLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) model = provider.get_model(model_type, model_name, model_credential, **model_params) model_info = [model.lower() for model in model.client.models()] if not model_info.__contains__(model_name.lower()): - raise AppApiException(ValidCode.valid_error.value, f'{model_name} 模型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_name} The model does not support').format(model_name=model_name)) for key in ['api_key', 'secret_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model.invoke( - [HumanMessage(content='你好')]) + [HumanMessage(content=_('Hello'))]) except Exception as e: raise e return True @@ -62,7 +63,7 @@ class WenxinLLMModelCredential(BaseForm, BaseModelCredential): def build_model(self, model_info: Dict[str, object]): for key in ['api_key', 'secret_key', 'model']: if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') + raise AppApiException(500, _('{key} is required').format(key=key)) self.api_key = model_info.get('api_key') self.secret_key = model_info.get('secret_key') return self diff --git a/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py b/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py index 7944d703e..f8d2b45f4 100644 --- a/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py +++ b/apps/setting/models_provider/impl/wenxin_model_provider/wenxin_model_provider.py @@ -16,40 +16,41 @@ from setting.models_provider.impl.wenxin_model_provider.credential.llm import We from setting.models_provider.impl.wenxin_model_provider.model.embedding import QianfanEmbeddings from setting.models_provider.impl.wenxin_model_provider.model.llm import QianfanChatModel from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ win_xin_llm_model_credential = WenxinLLMModelCredential() qianfan_embedding_credential = QianfanEmbeddingCredential() model_info_list = [ModelInfo('ERNIE-Bot-4', - 'ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。', + _('ERNIE-Bot-4 is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('ERNIE-Bot', - 'ERNIE-Bot是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。', + _('ERNIE-Bot is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('ERNIE-Bot-turbo', - 'ERNIE-Bot-turbo是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力,响应速度更快。', + _('ERNIE-Bot-turbo is a large language model independently developed by Baidu. It covers massive Chinese data, has stronger capabilities in dialogue Q&A, content creation and generation, and has a faster response speed.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('BLOOMZ-7B', - 'BLOOMZ-7B是业内知名的大语言模型,由BigScience研发并开源,能够以46种语言和13种编程语言输出文本。', + _('BLOOMZ-7B is a well-known large language model in the industry. It was developed and open sourced by BigScience and can output text in 46 languages and 13 programming languages.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('Llama-2-7b-chat', - 'Llama-2-7b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-7b-chat是高性能原生开源版本,适用于对话场景。', + 'Llama-2-7b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning and knowledge application. Llama-2-7b-chat is a high-performance native open source version suitable for conversation scenarios.', ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('Llama-2-13b-chat', - 'Llama-2-13b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-13b-chat是性能与效果均衡的原生开源版本,适用于对话场景。', + _('Llama-2-13b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning and knowledge application. Llama-2-13b-chat is a native open source version with balanced performance and effect, suitable for conversation scenarios.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('Llama-2-70b-chat', - 'Llama-2-70b-chat由Meta AI研发并开源,在编码、推理及知识应用等场景表现优秀,Llama-2-70b-chat是高精度效果的原生开源版本。', + _('Llama-2-70b-chat was developed by Meta AI and is open source. It performs well in scenarios such as coding, reasoning, and knowledge application. Llama-2-70b-chat is a native open source version with high-precision effects.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel), ModelInfo('Qianfan-Chinese-Llama-2-7B', - '千帆团队在Llama-2-7b基础上的中文增强版本,在CMMLU、C-EVAL等中文知识库上表现优异。', + _('The Chinese enhanced version developed by the Qianfan team based on Llama-2-7b has performed well on Chinese knowledge bases such as CMMLU and C-EVAL.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel) ] embedding_model_info = ModelInfo('Embedding-V1', - 'Embedding-V1是一个基于百度文心大模型技术的文本表示模型,可以将文本转化为用数值表示的向量形式,用于文本检索、信息推荐、知识挖掘等场景。 Embedding-V1提供了Embeddings接口,可以根据输入内容生成对应的向量表示。您可以通过调用该接口,将文本输入到模型中,获取到对应的向量表示,从而进行后续的文本处理和分析。', + _('Embedding-V1 is a text representation model based on Baidu Wenxin large model technology. It can convert text into a vector form represented by numerical values and can be used in text retrieval, information recommendation, knowledge mining and other scenarios. Embedding-V1 provides the Embeddings interface, which can generate corresponding vector representations based on input content. You can call this interface to input text into the model and obtain the corresponding vector representation for subsequent text processing and analysis.'), ModelTypeConst.EMBEDDING, qianfan_embedding_credential, QianfanEmbeddings) model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info( ModelInfo('ERNIE-Bot-4', - 'ERNIE-Bot-4是百度自行研发的大语言模型,覆盖海量中文数据,具有更强的对话问答、内容创作生成等能力。', + _('ERNIE-Bot-4 is a large language model independently developed by Baidu. It covers massive Chinese data and has stronger capabilities in dialogue Q&A, content creation and generation.'), ModelTypeConst.LLM, win_xin_llm_model_credential, QianfanChatModel)).append_model_info(embedding_model_info).append_default_model_info( @@ -62,6 +63,6 @@ class WenxinModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_wenxin_provider', name='千帆大模型', icon=get_file_content( + return ModelProvideInfo(provider='model_wenxin_provider', name=_('Thousand sails large model'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'wenxin_model_provider', 'icon', 'azure_icon_svg'))) diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py index 085a33065..89a8e91d9 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/embedding.py @@ -12,7 +12,7 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class XFEmbeddingCredential(BaseForm, BaseModelCredential): @@ -20,16 +20,16 @@ class XFEmbeddingCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) self.valid_form(model_credential) try: model = provider.get_model(model_type, model_name, model_credential) - model.embed_query('你好') + model.embed_query(_('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -37,7 +37,7 @@ class XFEmbeddingCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} - base_url = forms.TextInputField('API 域名', required=True, default_value="https://emb-cn-huabei-1.xf-yun.com/") + base_url = forms.TextInputField('API Url', required=True, default_value="https://emb-cn-huabei-1.xf-yun.com/") spark_app_id = forms.TextInputField('APP ID', required=True) spark_api_key = forms.PasswordInputField("API Key", required=True) spark_api_secret = forms.PasswordInputField('API Secret', required=True) diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/image.py b/apps/setting/models_provider/impl/xf_model_provider/credential/image.py index e0486adb2..9e021db65 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/image.py @@ -10,10 +10,10 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.xf_model_provider.model.image import ImageMessage - +from django.utils.translation import gettext_lazy as _ class XunFeiImageModelCredential(BaseForm, BaseModelCredential): - spark_api_url = forms.TextInputField('API 域名', required=True, default_value='wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image') + spark_api_url = forms.TextInputField('API Url', required=True, default_value='wss://spark-api.cn-huabei-1.xf-yun.com/v2.1/image') spark_app_id = forms.TextInputField('APP ID', required=True) spark_api_key = forms.PasswordInputField("API Key", required=True) spark_api_secret = forms.PasswordInputField('API Secret', required=True) @@ -22,25 +22,25 @@ class XunFeiImageModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) cwd = os.path.dirname(os.path.abspath(__file__)) with open(f'{cwd}/img_1.png', 'rb') as f: - message_list = [ImageMessage(str(base64.b64encode(f.read()), 'utf-8')), HumanMessage('请概述这张图片')] + message_list = [ImageMessage(str(base64.b64encode(f.read()), 'utf-8')), HumanMessage(_('Please outline this picture'))] model.stream(message_list) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py index ae2cbea19..8bb78ba2c 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/llm.py @@ -14,10 +14,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode - +from django.utils.translation import gettext_lazy as _ class XunFeiLLMModelGeneralParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.5, _min=0.1, _max=1.0, @@ -25,7 +25,7 @@ class XunFeiLLMModelGeneralParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=4096, _min=1, _max=100000, @@ -34,7 +34,7 @@ class XunFeiLLMModelGeneralParams(BaseForm): class XunFeiLLMModelProParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.5, _min=0.1, _max=1.0, @@ -42,7 +42,7 @@ class XunFeiLLMModelProParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=4096, _min=1, _max=100000, @@ -56,22 +56,22 @@ class XunFeiLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -79,7 +79,7 @@ class XunFeiLLMModelCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model: Dict[str, object]): return {**model, 'spark_api_secret': super().encryption(model.get('spark_api_secret', ''))} - spark_api_url = forms.TextInputField('API 域名', required=True) + spark_api_url = forms.TextInputField('API Url', required=True) spark_app_id = forms.TextInputField('APP ID', required=True) spark_api_key = forms.PasswordInputField("API Key", required=True) spark_api_secret = forms.PasswordInputField('API Secret', required=True) diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py b/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py index f93922800..cf8f74e1e 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/stt.py @@ -6,10 +6,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class XunFeiSTTModelCredential(BaseForm, BaseModelCredential): - spark_api_url = forms.TextInputField('API 域名', required=True, default_value='wss://iat-api.xfyun.cn/v2/iat') + spark_api_url = forms.TextInputField('API Url', required=True, default_value='wss://iat-api.xfyun.cn/v2/iat') spark_app_id = forms.TextInputField('APP ID', required=True) spark_api_key = forms.PasswordInputField("API Key", required=True) spark_api_secret = forms.PasswordInputField('API Secret', required=True) @@ -18,12 +19,12 @@ class XunFeiSTTModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -33,7 +34,7 @@ class XunFeiSTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py b/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py index 99f2c6cc5..a88856b3d 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py +++ b/apps/setting/models_provider/impl/xf_model_provider/credential/tts.py @@ -6,23 +6,24 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class XunFeiTTSModelGeneralParams(BaseForm): vcn = forms.SingleSelect( - TooltipLabel('发音人', '发音人,可选值:请到控制台添加试用或购买发音人,添加后即显示发音人参数值'), + TooltipLabel(_('Speaker'), _('Speaker, optional value: Please go to the console to add a trial or purchase speaker. After adding, the speaker parameter value will be displayed.')), required=True, default_value='xiaoyan', text_field='value', value_field='value', option_list=[ - {'text': '讯飞小燕', 'value': 'xiaoyan'}, - {'text': '讯飞许久', 'value': 'aisjiuxu'}, - {'text': '讯飞小萍', 'value': 'aisxping'}, - {'text': '讯飞小婧', 'value': 'aisjinger'}, - {'text': '讯飞许小宝', 'value': 'aisbabyxu'}, + {'text': _('iFlytek Xiaoyan'), 'value': 'xiaoyan'}, + {'text': _('iFlytek Xujiu'), 'value': 'aisjiuxu'}, + {'text': _('iFlytek Xiaoping'), 'value': 'aisxping'}, + {'text': _('iFlytek Xiaojing'), 'value': 'aisjinger'}, + {'text': _('iFlytek Xuxiaobao'), 'value': 'aisbabyxu'}, ]) speed = forms.SliderField( - TooltipLabel('语速', '语速,可选值:[0-100],默认为50'), + TooltipLabel(_('speaking speed'), _('Speech speed, optional value: [0-100], default is 50')), required=True, default_value=50, _min=1, _max=100, @@ -31,7 +32,7 @@ class XunFeiTTSModelGeneralParams(BaseForm): class XunFeiTTSModelCredential(BaseForm, BaseModelCredential): - spark_api_url = forms.TextInputField('API 域名', required=True, default_value='wss://tts-api.xfyun.cn/v2/tts') + spark_api_url = forms.TextInputField('API Url', required=True, default_value='wss://tts-api.xfyun.cn/v2/tts') spark_app_id = forms.TextInputField('APP ID', required=True) spark_api_key = forms.PasswordInputField("API Key", required=True) spark_api_secret = forms.PasswordInputField('API Secret', required=True) @@ -40,12 +41,12 @@ class XunFeiTTSModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['spark_api_url', 'spark_app_id', 'spark_api_key', 'spark_api_secret']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -55,7 +56,7 @@ class XunFeiTTSModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xf_model_provider/model/tts.py b/apps/setting/models_provider/impl/xf_model_provider/model/tts.py index 1db844df1..206e3cbca 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/model/tts.py +++ b/apps/setting/models_provider/impl/xf_model_provider/model/tts.py @@ -21,6 +21,7 @@ import websockets from common.util.common import _remove_empty_lines from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tts import BaseTextToSpeech +from django.utils.translation import gettext_lazy as _ max_kb = logging.getLogger("max_kb") @@ -97,7 +98,7 @@ class XFSparkTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): return url def check_auth(self): - self.text_to_speech("你好") + self.text_to_speech(_('Hello')) def text_to_speech(self, text): diff --git a/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py b/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py index c89456b04..c2962271e 100644 --- a/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py +++ b/apps/setting/models_provider/impl/xf_model_provider/xf_model_provider.py @@ -23,6 +23,7 @@ from setting.models_provider.impl.xf_model_provider.model.llm import XFChatSpark from setting.models_provider.impl.xf_model_provider.model.stt import XFSparkSpeechToText from setting.models_provider.impl.xf_model_provider.model.tts import XFSparkTextToSpeech from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ ssl._create_default_https_context = ssl.create_default_context() @@ -35,7 +36,7 @@ model_info_list = [ ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), ModelInfo('generalv3', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), ModelInfo('generalv2', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM), - ModelInfo('iat', '中英文识别', ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), + ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech), ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding) ] @@ -46,7 +47,7 @@ model_info_manage = ( .append_default_model_info( ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM)) .append_default_model_info( - ModelInfo('iat', '中英文识别', ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), + ModelInfo('iat', _('Chinese and English recognition'), ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText), ) .append_default_model_info( ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech)) @@ -62,6 +63,6 @@ class XunFeiModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_xf_provider', name='讯飞星火', icon=get_file_content( + return ModelProvideInfo(provider='model_xf_provider', name=_('iFlytek Spark'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'xf_model_provider', 'icon', 'xf_icon_svg'))) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py index 75e58ff53..3437938be 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/embedding.py @@ -6,6 +6,7 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode from setting.models_provider.impl.local_model_provider.model.embedding import LocalEmbedding +from django.utils.translation import gettext_lazy as _ class XinferenceEmbeddingModelCredential(BaseForm, BaseModelCredential): @@ -13,18 +14,18 @@ class XinferenceEmbeddingModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) try: model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'), 'embedding') except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) exist = provider.get_model_info_by_name(model_list, model_name) model: LocalEmbedding = provider.get_model(model_type, model_name, model_credential) if len(exist) == 0: model.start_down_model_thread() - raise AppApiException(ValidCode.model_not_fount, "模型不存在,请先下载模型") - model.embed_query('你好') + raise AppApiException(ValidCode.model_not_fount, _('The model does not exist, please download the model first')) + model.embed_query(_('Hello')) return True def encryption_dict(self, model_info: Dict[str, object]): @@ -33,8 +34,8 @@ class XinferenceEmbeddingModelCredential(BaseForm, BaseModelCredential): def build_model(self, model_info: Dict[str, object]): for key in ['model']: if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') + raise AppApiException(500, _('{key} is required').format(key=key)) return self - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py index addddae53..d8b187e6d 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/image.py @@ -9,9 +9,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ + class XinferenceImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +22,8 @@ class XinferenceImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -27,33 +31,35 @@ class XinferenceImageModelParams(BaseForm): precision=0) - class XinferenceImageModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, + _('Verification failed, please check whether the parameters are correct: {error}').format( + error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py index 48e540aac..cd3628a23 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/llm.py @@ -9,9 +9,12 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ + class XinferenceLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), + _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.7, _min=0.1, _max=1.0, @@ -19,7 +22,8 @@ class XinferenceLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), + _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=800, _min=1, _max=100000, @@ -32,16 +36,19 @@ class XinferenceLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, + _('{model_type} Model type is not supported').format(model_type=model_type)) try: - model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'), model_type) + model_list = provider.get_base_model_list(model_credential.get('api_base'), model_credential.get('api_key'), + model_type) except Exception as e: - raise AppApiException(ValidCode.valid_error.value, "API 域名无效") + raise AppApiException(ValidCode.valid_error.value, _('API domain name is invalid')) exist = provider.get_model_info_by_name(model_list, model_name) if len(exist) == 0: - raise AppApiException(ValidCode.valid_error.value, "模型不存在,请先下载模型") + raise AppApiException(ValidCode.valid_error.value, + _('The model does not exist, please download the model first')) model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) return True def encryption_dict(self, model_info: Dict[str, object]): @@ -50,11 +57,11 @@ class XinferenceLLMModelCredential(BaseForm, BaseModelCredential): def build_model(self, model_info: Dict[str, object]): for key in ['api_key', 'model']: if key not in model_info: - raise AppApiException(500, f'{key} 字段为必填字段') + raise AppApiException(500, _('{key} is required').format(key=key)) self.api_key = model_info.get('api_key') return self - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def get_model_params_setting_form(self, model_name): diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py index 856e28fd7..3033c4091 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/reranker.py @@ -14,27 +14,28 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class XInferenceRerankerModelCredential(BaseForm, BaseModelCredential): def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=True): if not model_type == 'RERANKER': - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['server_url']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential) - model.compress_documents([Document(page_content='你好')], '你好') + model.compress_documents([Document(page_content=_('Hello'))], _('Hello')) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True @@ -42,6 +43,6 @@ class XInferenceRerankerModelCredential(BaseForm, BaseModelCredential): def encryption_dict(self, model_info: Dict[str, object]): return model_info - server_url = forms.TextInputField('API 域名', required=True) + server_url = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=False) diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py index 3f47be6fc..046b60fa4 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/stt.py @@ -6,21 +6,22 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class XInferenceSTTModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -30,7 +31,7 @@ class XInferenceSTTModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py index 35d5028a1..497af8991 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/tti.py @@ -9,11 +9,12 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class XinferenceTTIModelParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', '图像生成端点允许您根据文本提示创建原始图像。图像的尺寸可以为 1024x1024、1024x1792 或 1792x1024 像素。'), + TooltipLabel(_('Image size'), _('The image generation endpoint allows you to create raw images based on text prompts. The dimensions of the image can be 1024x1024, 1024x1792, or 1792x1024 pixels.')), required=True, default_value='1024x1024', option_list=[ @@ -26,7 +27,7 @@ class XinferenceTTIModelParams(BaseForm): ) quality = forms.SingleSelect( - TooltipLabel('图片质量', '默认情况下,图像以标准质量生成,您可以设置质量:“hd”以增强细节。方形、标准质量的图像生成速度最快。'), + TooltipLabel(_('Picture quality'), _('By default, images are generated in standard quality, you can set quality: "hd" to enhance detail. Square, standard quality images are generated fastest.')), required=True, default_value='standard', option_list=[ @@ -38,7 +39,7 @@ class XinferenceTTIModelParams(BaseForm): ) n = forms.SliderField( - TooltipLabel('图片数量', '您可以一次请求 1 个图像(通过发出并行请求来请求更多图像),或者使用 n 参数一次最多请求 10 个图像。'), + TooltipLabel(_('Number of pictures'), _('You can request 1 image at a time (requesting more images by making parallel requests), or up to 10 images at a time using the n parameter.')), required=True, default_value=1, _min=1, _max=10, @@ -47,19 +48,19 @@ class XinferenceTTIModelParams(BaseForm): class XinferenceTextToImageModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -70,7 +71,7 @@ class XinferenceTextToImageModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py b/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py index 883519b7e..7cbeed3ec 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/credential/tts.py @@ -5,40 +5,41 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class XInferenceTTSModelGeneralParams(BaseForm): # ['中文女', '中文男', '日语男', '粤语女', '英文女', '英文男', '韩语女'] voice = forms.SingleSelect( - TooltipLabel('音色', ''), + TooltipLabel(_('timbre'), ''), required=True, default_value='中文女', text_field='value', value_field='value', option_list=[ - {'text': '中文女', 'value': '中文女'}, - {'text': '中文男', 'value': '中文男'}, - {'text': '日语男', 'value': '日语男'}, - {'text': '粤语女', 'value': '粤语女'}, - {'text': '英文女', 'value': '英文女'}, - {'text': '英文男', 'value': '英文男'}, - {'text': '韩语女', 'value': '韩语女'}, + {'text': _('Chinese female'), 'value': '中文女'}, + {'text': _('Chinese male'), 'value': '中文男'}, + {'text': _('Japanese male'), 'value': '日语男'}, + {'text': _('Cantonese female'), 'value': '粤语女'}, + {'text': _('English female'), 'value': '英文女'}, + {'text': _('English male'), 'value': '英文男'}, + {'text': _('Korean female'), 'value': '韩语女'}, ]) class XInferenceTTSModelCredential(BaseForm, BaseModelCredential): - api_base = forms.TextInputField('API 域名', required=True) + api_base = forms.TextInputField('API Url', required=True) api_key = forms.PasswordInputField('API Key', required=True) def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider, raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_base', 'api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -48,7 +49,7 @@ class XInferenceTTSModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py b/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py index cf529fb48..a68f2896f 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/model/tts.py @@ -6,6 +6,7 @@ from common.config.tokenizer_manage_config import TokenizerManage from common.util.common import _remove_empty_lines from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tts import BaseTextToSpeech +from django.utils.translation import gettext_lazy as _ def custom_get_token_ids(text: str): @@ -40,7 +41,7 @@ class XInferenceTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): ) def check_auth(self): - self.text_to_speech('你好') + self.text_to_speech(_('Hello')) def text_to_speech(self, text): client = OpenAI( diff --git a/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py index a2846ae8c..2d406b271 100644 --- a/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py +++ b/apps/setting/models_provider/impl/xinference_model_provider/xinference_model_provider.py @@ -23,6 +23,8 @@ from setting.models_provider.impl.xinference_model_provider.model.stt import XIn from setting.models_provider.impl.xinference_model_provider.model.tti import XinferenceTextToImage from setting.models_provider.impl.xinference_model_provider.model.tts import XInferenceTextToSpeech from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ + xinference_llm_model_credential = XinferenceLLMModelCredential() xinference_stt_model_credential = XInferenceSTTModelCredential() @@ -33,175 +35,177 @@ xinference_tti_model_credential = XinferenceTextToImageModelCredential() model_info_list = [ ModelInfo( 'code-llama', - 'Code Llama 是一个专门用于代码生成的语言模型。', + _('Code Llama is a language model specifically designed for code generation.'), ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'code-llama-instruct', - 'Code Llama Instruct 是 Code Llama 的指令微调版本,专为执行特定任务而设计。', + _(''' +Code Llama Instruct is a fine-tuned version of Code Llama's instructions, designed to perform specific tasks. + '''), ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'code-llama-python', - 'Code Llama Python 是一个专门用于 Python 代码生成的语言模型。', + _('Code Llama Python is a language model specifically designed for Python code generation.'), ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'codeqwen1.5', - 'CodeQwen 1.5 是一个用于代码生成的语言模型,具有较高的性能。', + _('CodeQwen 1.5 is a language model for code generation with high performance.'), ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'codeqwen1.5-chat', - 'CodeQwen 1.5 Chat 是一个聊天模型版本的 CodeQwen 1.5。', + _('CodeQwen 1.5 Chat is a chat model version of CodeQwen 1.5.'), ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'deepseek', - 'Deepseek 是一个大规模语言模型,具有 130 亿参数。', + _('Deepseek is a large-scale language model with 13 billion parameters.'), ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'deepseek-chat', - 'Deepseek Chat 是一个聊天模型版本的 Deepseek。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'deepseek-coder', - 'Deepseek Coder 是一个专为代码生成设计的模型。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'deepseek-coder-instruct', - 'Deepseek Coder Instruct 是 Deepseek Coder 的指令微调版本,专为执行特定任务而设计。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'deepseek-vl-chat', - 'Deepseek VL Chat 是 Deepseek 的视觉语言聊天模型版本,能够处理图像和文本输入。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'gpt-3.5-turbo', - 'GPT-3.5 Turbo 是一个高效能的通用语言模型,适用于多种应用场景。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'gpt-4', - 'GPT-4 是一个强大的多模态模型,不仅支持文本输入,还支持图像输入。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'gpt-4-vision-preview', - 'GPT-4 Vision Preview 是 GPT-4 的视觉预览版本,支持图像输入。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'gpt4all', - 'GPT4All 是一个开源的多模态模型,支持文本和图像输入。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'llama2', - 'Llama2 是一个具有 700 亿参数的大规模语言模型,支持多种语言。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'llama2-chat', - 'Llama2 Chat 是一个聊天模型版本的 Llama2,支持多种语言。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'llama2-chat-32k', - 'Llama2 Chat 32K 是一个聊天模型版本的 Llama2,支持长达 32K 令牌的上下文。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen', - 'Qwen 是一个大规模语言模型,具有 130 亿参数。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen-chat', - 'Qwen Chat 是一个聊天模型版本的 Qwen。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen-chat-32k', - 'Qwen Chat 32K 是一个聊天模型版本的 Qwen,支持长达 32K 令牌的上下文。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen-code', - 'Qwen Code 是一个专门用于代码生成的语言模型。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen-code-chat', - 'Qwen Code Chat 是一个聊天模型版本的 Qwen Code。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen-vl', - 'Qwen VL 是 Qwen 的视觉语言模型版本,能够处理图像和文本输入。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen-vl-chat', - 'Qwen VL Chat 是 Qwen VL 的聊天模型版本,能够处理图像和文本输入。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel ), ModelInfo( 'qwen2-instruct', - 'Qwen2 Instruct 是 Qwen2 的指令微调版本,专为执行特定任务而设计。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel @@ -278,7 +282,7 @@ model_info_list = [ ), ModelInfo( 'minicpm-llama3-v-2_5', - 'MiniCPM-Llama3-V 2.5是MiniCPM-V系列中的最新型号,该模型基于SigLip-400M和Llama3-8B-Instruct构建,共有8B个参数', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel @@ -288,14 +292,14 @@ model_info_list = [ voice_model_info = [ ModelInfo( 'CosyVoice-300M-SFT', - 'CosyVoice-300M-SFT是一个小型的语音合成模型。', + '', ModelTypeConst.TTS, xinference_tts_model_credential, XInferenceTextToSpeech ), ModelInfo( 'Belle-whisper-large-v3-zh', - 'Belle Whisper Large V3 是一个中文大型语音识别模型。', + '', ModelTypeConst.STT, xinference_stt_model_credential, XInferenceSpeechToText @@ -459,67 +463,67 @@ xinference_embedding_model_credential = XinferenceEmbeddingModelCredential() # 生成embedding_model_info列表 embedding_model_info = [ - ModelInfo('bce-embedding-base_v1', 'BCE 嵌入模型的基础版本。', ModelTypeConst.EMBEDDING, + ModelInfo('bce-embedding-base_v1', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-base-en', 'BGE 英语基础版本的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-base-en', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-base-en-v1.5', 'BGE 英语基础版本 1.5 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-base-en-v1.5', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-base-zh', 'BGE 中文基础版本的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-base-zh', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-base-zh-v1.5', 'BGE 中文基础版本 1.5 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-base-zh-v1.5', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-large-en', 'BGE 英语大型版本的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-large-en', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-large-en-v1.5', 'BGE 英语大型版本 1.5 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-large-en-v1.5', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-large-zh', 'BGE 中文大型版本的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-large-zh', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-large-zh-noinstruct', 'BGE 中文大型版本的嵌入模型,无指令调整。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-large-zh-noinstruct', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-large-zh-v1.5', 'BGE 中文大型版本 1.5 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-large-zh-v1.5', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-m3', 'BGE M3 版本的嵌入模型。', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + ModelInfo('bge-m3', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-small-en-v1.5', 'BGE 英语小型版本 1.5 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-small-en-v1.5', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-small-zh', 'BGE 中文小型版本的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-small-zh', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('bge-small-zh-v1.5', 'BGE 中文小型版本 1.5 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('bge-small-zh-v1.5', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('e5-large-v2', 'E5 大型版本 2 的嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('e5-large-v2', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('gte-base', 'GTE 基础版本的嵌入模型。', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + ModelInfo('gte-base', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('gte-large', 'GTE 大型版本的嵌入模型。', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + ModelInfo('gte-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('jina-embeddings-v2-base-en', 'Jina 嵌入模型的英语基础版本 2。', ModelTypeConst.EMBEDDING, + ModelInfo('jina-embeddings-v2-base-en', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('jina-embeddings-v2-base-zh', 'Jina 嵌入模型的中文基础版本 2。', ModelTypeConst.EMBEDDING, + ModelInfo('jina-embeddings-v2-base-zh', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('jina-embeddings-v2-small-en', 'Jina 嵌入模型的英语小型版本 2。', ModelTypeConst.EMBEDDING, + ModelInfo('jina-embeddings-v2-small-en', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('m3e-base', 'M3E 基础版本的嵌入模型。', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + ModelInfo('m3e-base', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('m3e-large', 'M3E 大型版本的嵌入模型。', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + ModelInfo('m3e-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('m3e-small', 'M3E 小型版本的嵌入模型。', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, + ModelInfo('m3e-small', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('multilingual-e5-large', '多语言大型版本的 E5 嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('multilingual-e5-large', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('text2vec-base-chinese', 'Text2Vec 的中文基础版本嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('text2vec-base-chinese', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('text2vec-base-chinese-paraphrase', 'Text2Vec 的中文基础版本的同义句嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('text2vec-base-chinese-paraphrase', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('text2vec-base-chinese-sentence', 'Text2Vec 的中文基础版本的句子嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('text2vec-base-chinese-sentence', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('text2vec-base-multilingual', 'Text2Vec 的多语言基础版本嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('text2vec-base-multilingual', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), - ModelInfo('text2vec-large-chinese', 'Text2Vec 的中文大型版本嵌入模型。', ModelTypeConst.EMBEDDING, + ModelInfo('text2vec-large-chinese', '', ModelTypeConst.EMBEDDING, xinference_embedding_model_credential, XinferenceEmbedding), ] rerank_list = [ModelInfo('bce-reranker-base_v1', - '发布新的重新排名器,建立在强大的 M3 和LLM (GEMMA 和 MiniCPM,实际上没那么大)骨干上,支持多语言处理和更大的输入,大幅提高 BEIR、C-MTEB/Retrieval 的排名性能、MIRACL、LlamaIndex 评估', + '', ModelTypeConst.RERANKER, XInferenceRerankerModelCredential(), XInferenceReranker)] model_info_manage = ( ModelInfoManage.builder() @@ -528,7 +532,7 @@ model_info_manage = ( .append_default_model_info(voice_model_info[0]) .append_default_model_info(voice_model_info[1]) .append_default_model_info(ModelInfo('phi3', - 'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。', + '', ModelTypeConst.LLM, xinference_llm_model_credential, XinferenceChatModel)) .append_model_info_list(embedding_model_info) diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py index c9092371e..24fcdc97a 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/image.py @@ -9,9 +9,10 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class ZhiPuImageModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.95, _min=0.1, _max=1.0, @@ -19,7 +20,7 @@ class ZhiPuImageModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -33,24 +34,24 @@ class ZhiPuImageModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) + res = model.stream([HumanMessage(content=[{"type": "text", "text": _('Hello')}])]) for chunk in res: print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py index 55d1fad67..237ad3711 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/llm.py @@ -14,10 +14,11 @@ from common import forms from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class ZhiPuLLMModelParams(BaseForm): - temperature = forms.SliderField(TooltipLabel('温度', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'), + temperature = forms.SliderField(TooltipLabel(_('Temperature'), _('Higher values make the output more random, while lower values make it more focused and deterministic')), required=True, default_value=0.95, _min=0.1, _max=1.0, @@ -25,7 +26,7 @@ class ZhiPuLLMModelParams(BaseForm): precision=2) max_tokens = forms.SliderField( - TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'), + TooltipLabel(_('Output the maximum Tokens'), _('Specify the maximum number of tokens that the model can generate')), required=True, default_value=1024, _min=1, _max=100000, @@ -39,21 +40,21 @@ class ZhiPuLLMModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: model = provider.get_model(model_type, model_name, model_credential, **model_params) - model.invoke([HumanMessage(content='你好')]) + model.invoke([HumanMessage(content=_('Hello'))]) except Exception as e: if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py b/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py index 0b58fa65a..772003a27 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/credential/tti.py @@ -6,11 +6,12 @@ from common.exception.app_exception import AppApiException from common.forms import BaseForm, TooltipLabel from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode +from django.utils.translation import gettext_lazy as _ class ZhiPuTTIModelParams(BaseForm): size = forms.SingleSelect( - TooltipLabel('图片尺寸', - '图片尺寸,仅 cogview-3-plus 支持该参数。可选范围:[1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440],默认是1024x1024。'), + TooltipLabel(_('Image size'), + _('Image size, only cogview-3-plus supports this parameter. Optional range: [1024x1024,768x1344,864x1152,1344x768,1152x864,1440x720,720x1440], the default is 1024x1024.')), required=True, default_value='1024x1024', option_list=[ @@ -33,12 +34,12 @@ class ZhiPuTextToImageModelCredential(BaseForm, BaseModelCredential): raise_exception=False): model_type_list = provider.get_model_type_list() if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))): - raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持') + raise AppApiException(ValidCode.valid_error.value, _('{model_type} Model type is not supported').format(model_type=model_type)) for key in ['api_key']: if key not in model_credential: if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段') + raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key)) else: return False try: @@ -49,7 +50,7 @@ class ZhiPuTextToImageModelCredential(BaseForm, BaseModelCredential): if isinstance(e, AppApiException): raise e if raise_exception: - raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}') + raise AppApiException(ValidCode.valid_error.value, _('Verification failed, please check whether the parameters are correct: {error}').format(error=str(e))) else: return False return True diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py index e2c59b85a..15d213ffd 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/tti.py @@ -7,6 +7,7 @@ from zhipuai import ZhipuAI from common.config.tokenizer_manage_config import TokenizerManage from setting.models_provider.base_model_provider import MaxKBBaseModel from setting.models_provider.impl.base_tti import BaseTextToImage +from django.utils.translation import gettext_lazy as _ def custom_get_token_ids(text: str): @@ -45,7 +46,7 @@ class ZhiPuTextToImage(MaxKBBaseModel, BaseTextToImage): zhipuai_api_key=self.api_key, model_name=self.model, ) - chat.invoke([HumanMessage([{"type": "text", "text": "你好"}])]) + chat.invoke([HumanMessage([{"type": "text", "text": _('Hello')}])]) # self.generate_image('生成一个小猫图片') diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py b/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py index 6a934c086..bbe2f485d 100644 --- a/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py +++ b/apps/setting/models_provider/impl/zhipu_model_provider/zhipu_model_provider.py @@ -18,6 +18,7 @@ from setting.models_provider.impl.zhipu_model_provider.model.image import ZhiPuI from setting.models_provider.impl.zhipu_model_provider.model.llm import ZhipuChatModel from setting.models_provider.impl.zhipu_model_provider.model.tti import ZhiPuTextToImage from smartdoc.conf import PROJECT_DIR +from django.utils.translation import gettext_lazy as _ qwen_model_credential = ZhiPuLLMModelCredential() zhipu_image_model_credential = ZhiPuImageModelCredential() @@ -30,25 +31,25 @@ model_info_list = [ ] model_info_image_list = [ - ModelInfo('glm-4v-plus', '具有强大的多模态理解能力。能够同时理解多达五张图像,并支持视频内容理解', + ModelInfo('glm-4v-plus', _('Have strong multi-modal understanding capabilities. Able to understand up to five images simultaneously and supports video content understanding'), ModelTypeConst.IMAGE, zhipu_image_model_credential, ZhiPuImage), - ModelInfo('glm-4v', '专注于单图理解。适用于需要高效图像解析的场景', + ModelInfo('glm-4v', _('Focus on single picture understanding. Suitable for scenarios requiring efficient image analysis'), ModelTypeConst.IMAGE, zhipu_image_model_credential, ZhiPuImage), - ModelInfo('glm-4v-flash', '专注于单图理解。适用于需要高效图像解析的场景(免费)', + ModelInfo('glm-4v-flash', _('Focus on single picture understanding. Suitable for scenarios requiring efficient image analysis (free)'), ModelTypeConst.IMAGE, zhipu_image_model_credential, ZhiPuImage), ] model_info_tti_list = [ - ModelInfo('cogview-3', '根据用户文字描述快速、精准生成图像。分辨率支持1024x1024', + ModelInfo('cogview-3', _('Quickly and accurately generate images based on user text descriptions. Resolution supports 1024x1024'), ModelTypeConst.TTI, zhipu_tti_model_credential, ZhiPuTextToImage), - ModelInfo('cogview-3-plus', '根据用户文字描述生成高质量图像,支持多图片尺寸', + ModelInfo('cogview-3-plus', _('Generate high-quality images based on user text descriptions, supporting multiple image sizes'), ModelTypeConst.TTI, zhipu_tti_model_credential, ZhiPuTextToImage), - ModelInfo('cogview-3-flash', '根据用户文字描述生成高质量图像,支持多图片尺寸(免费)', + ModelInfo('cogview-3-flash', _('Generate high-quality images based on user text descriptions, supporting multiple image sizes (free)'), ModelTypeConst.TTI, zhipu_tti_model_credential, ZhiPuTextToImage), ] @@ -71,6 +72,6 @@ class ZhiPuModelProvider(IModelProvider): return model_info_manage def get_model_provide_info(self): - return ModelProvideInfo(provider='model_zhipu_provider', name='智谱AI', icon=get_file_content( + return ModelProvideInfo(provider='model_zhipu_provider', name=_('zhipu AI'), icon=get_file_content( os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'zhipu_model_provider', 'icon', 'zhipuai_icon_svg'))) diff --git a/apps/setting/models_provider/tools.py b/apps/setting/models_provider/tools.py index e353acc35..150e3d400 100644 --- a/apps/setting/models_provider/tools.py +++ b/apps/setting/models_provider/tools.py @@ -12,6 +12,7 @@ from django.db.models import QuerySet from common.config.embedding_config import ModelManage from setting.models import Model from setting.models_provider import get_model +from django.utils.translation import gettext_lazy as _ def get_model_by_id(_id, user_id): @@ -19,9 +20,9 @@ def get_model_by_id(_id, user_id): # 手动关闭数据库连接 connection.close() if model is None: - raise Exception("模型不存在") + raise Exception(_('Model does not exist')) if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id): - raise Exception(f"无权限使用此模型:{model.name}") + raise Exception(_('No permission to use this model') + f"{model.name}") return model diff --git a/apps/setting/serializers/model_apply_serializers.py b/apps/setting/serializers/model_apply_serializers.py index 12b6fafd4..88609a18f 100644 --- a/apps/setting/serializers/model_apply_serializers.py +++ b/apps/setting/serializers/model_apply_serializers.py @@ -15,7 +15,7 @@ from common.config.embedding_config import ModelManage from common.util.field_message import ErrMessage from setting.models import Model from setting.models_provider import get_model - +from django.utils.translation import gettext_lazy as _ def get_embedding_model(model_id): model = QuerySet(Model).filter(id=model_id).first() @@ -29,26 +29,26 @@ def get_embedding_model(model_id): class EmbedDocuments(serializers.Serializer): texts = serializers.ListField(required=True, child=serializers.CharField(required=True, error_messages=ErrMessage.char( - "向量文本")), - error_messages=ErrMessage.list("向量文本列表")) + _('vector text'))), + error_messages=ErrMessage.list(_('vector text list'))) class EmbedQuery(serializers.Serializer): - text = serializers.CharField(required=True, error_messages=ErrMessage.char("向量文本")) + text = serializers.CharField(required=True, error_messages=ErrMessage.char(_('vector text'))) class CompressDocument(serializers.Serializer): - page_content = serializers.CharField(required=True, error_messages=ErrMessage.char("文本")) - metadata = serializers.DictField(required=False, error_messages=ErrMessage.dict("元数据")) + page_content = serializers.CharField(required=True, error_messages=ErrMessage.char(_('text'))) + metadata = serializers.DictField(required=False, error_messages=ErrMessage.dict(_('metadata'))) class CompressDocuments(serializers.Serializer): documents = CompressDocument(required=True, many=True) - query = serializers.CharField(required=True, error_messages=ErrMessage.char("查询query")) + query = serializers.CharField(required=True, error_messages=ErrMessage.char(_('query'))) class ModelApplySerializers(serializers.Serializer): - model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) + model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('model id'))) def embed_documents(self, instance, with_valid=True): if with_valid: diff --git a/apps/setting/serializers/provider_serializers.py b/apps/setting/serializers/provider_serializers.py index 25f464bcb..b6958d03d 100644 --- a/apps/setting/serializers/provider_serializers.py +++ b/apps/setting/serializers/provider_serializers.py @@ -27,6 +27,8 @@ from setting.models.model_management import Model, Status, PermissionType from setting.models_provider import get_model, get_model_credential from setting.models_provider.base_model_provider import ValidCode, DownModelChunkStatus from setting.models_provider.constants.model_provider_constants import ModelProvideConstants +from django.utils.translation import gettext_lazy as _ + def get_default_model_params_setting(provider, model_type, model_name): credential = get_model_credential(provider, model_type, model_name) @@ -71,20 +73,20 @@ class ModelPullManage: class ModelSerializer(serializers.Serializer): class Query(serializers.Serializer): - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('user id'))) name = serializers.CharField(required=False, max_length=64, - error_messages=ErrMessage.char("模型名称")) + error_messages=ErrMessage.char(_('model name'))) - model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型")) + model_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_('model type'))) - model_name = serializers.CharField(required=False, error_messages=ErrMessage.char("基础模型")) + model_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_('model name'))) - provider = serializers.CharField(required=False, error_messages=ErrMessage.char("供应商")) + provider = serializers.CharField(required=False, error_messages=ErrMessage.char(_('provider'))) - permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char("权限")) + permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_('permission type'))) - create_user = serializers.CharField(required=False, error_messages=ErrMessage.char("创建者")) + create_user = serializers.CharField(required=False, error_messages=ErrMessage.char(_('create user'))) def list(self, with_valid): if with_valid: @@ -122,21 +124,25 @@ class ModelSerializer(serializers.Serializer): model_query_set.filter(**query_params).order_by("-create_time")] class Edit(serializers.Serializer): - user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid(_('user id'))) name = serializers.CharField(required=False, max_length=64, - error_messages=ErrMessage.char("模型名称")) + error_messages=ErrMessage.char(_("model name"))) - model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型")) + model_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_("model type"))) - permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char("权限"), validators=[ - validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), - message="权限只支持PUBLIC|PRIVATE", code=500) - ]) + permission_type = serializers.CharField(required=False, error_messages=ErrMessage.char(_("permission type")), + validators=[ + validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), + message=_( + "permissions only supportPUBLIC|PRIVATE"), + code=500) + ]) - model_name = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型")) + model_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("model type"))) - credential = serializers.DictField(required=False, error_messages=ErrMessage.dict("认证信息")) + credential = serializers.DictField(required=False, + error_messages=ErrMessage.dict(_("certification information"))) def is_valid(self, model=None, raise_exception=False): super().is_valid(raise_exception=True) @@ -165,30 +171,36 @@ class ModelSerializer(serializers.Serializer): return credential, model_credential, provider_handler class Create(serializers.Serializer): - user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("user id"))) - name = serializers.CharField(required=True, max_length=64, error_messages=ErrMessage.char("模型名称")) + name = serializers.CharField(required=True, max_length=64, error_messages=ErrMessage.char(_("model name"))) - provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商")) + provider = serializers.CharField(required=True, error_messages=ErrMessage.char(_("provider"))) - model_type = serializers.CharField(required=True, error_messages=ErrMessage.char("模型类型")) + model_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("model type"))) - permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char("权限"), validators=[ - validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), - message="权限只支持PUBLIC|PRIVATE", code=500) - ]) + permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("permission type")), + validators=[ + validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"), + message=_( + "permissions only supportPUBLIC|PRIVATE"), + code=500) + ]) - model_name = serializers.CharField(required=True, error_messages=ErrMessage.char("基础模型")) + model_name = serializers.CharField(required=True, error_messages=ErrMessage.char(_("model name"))) - model_params_form = serializers.ListField(required=False, default=list, error_messages=ErrMessage.char("参数配置")) + model_params_form = serializers.ListField(required=False, default=list, + error_messages=ErrMessage.char(_("parameter configuration"))) - credential = serializers.DictField(required=True, error_messages=ErrMessage.dict("认证信息")) + credential = serializers.DictField(required=True, + error_messages=ErrMessage.dict(_("certification information"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if QuerySet(Model).filter(user_id=self.data.get('user_id'), name=self.data.get('name')).exists(): - raise AppApiException(500, f'模型名称【{self.data.get("name")}】已存在') + raise AppApiException(500, _('Model name【{model_name}】already exists').format( + model_name=self.data.get("name"))) default_params = {item['field']: item['default_value'] for item in self.data.get('model_params_form')} ModelProvideConstants[self.data.get('provider')].value.is_valid_credential(self.data.get('model_type'), self.data.get('model_name'), @@ -241,7 +253,7 @@ class ModelSerializer(serializers.Serializer): class ModelParams(serializers.Serializer): id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -262,7 +274,7 @@ class ModelSerializer(serializers.Serializer): class ModelParamsForm(serializers.Serializer): id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -286,7 +298,7 @@ class ModelSerializer(serializers.Serializer): class Operate(serializers.Serializer): id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("模型id")) - user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id")) + user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("user id"))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -387,7 +399,7 @@ class ModelSerializer(serializers.Serializer): class ProviderSerializer(serializers.Serializer): - provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商")) + provider = serializers.CharField(required=True, error_messages=ErrMessage.char(_("provider"))) method = serializers.CharField(required=True, error_messages=ErrMessage.char("执行函数名称")) diff --git a/apps/setting/serializers/system_setting.py b/apps/setting/serializers/system_setting.py index a66b15805..9f1525bbf 100644 --- a/apps/setting/serializers/system_setting.py +++ b/apps/setting/serializers/system_setting.py @@ -13,6 +13,7 @@ from rest_framework import serializers from common.exception.app_exception import AppApiException from common.util.field_message import ErrMessage from setting.models.system_management import SystemSetting, SettingType +from django.utils.translation import gettext_lazy as _ class SystemSettingSerializer(serializers.Serializer): @@ -25,13 +26,13 @@ class SystemSettingSerializer(serializers.Serializer): return system_setting.meta class Create(serializers.Serializer): - email_host = serializers.CharField(required=True, error_messages=ErrMessage.char("SMTP 主机")) - email_port = serializers.IntegerField(required=True, error_messages=ErrMessage.char("SMTP 端口")) - email_host_user = serializers.CharField(required=True, error_messages=ErrMessage.char("发件人邮箱")) - email_host_password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码")) - email_use_tls = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否开启TLS")) - email_use_ssl = serializers.BooleanField(required=True, error_messages=ErrMessage.char("是否开启SSL")) - from_email = serializers.EmailField(required=True, error_messages=ErrMessage.char("发送人邮箱")) + email_host = serializers.CharField(required=True, error_messages=ErrMessage.char(_('SMTP host'))) + email_port = serializers.IntegerField(required=True, error_messages=ErrMessage.char(_('SMTP port'))) + email_host_user = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Sender\'s email'))) + email_host_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Password'))) + email_use_tls = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_('Whether to enable TLS'))) + email_use_ssl = serializers.BooleanField(required=True, error_messages=ErrMessage.char(_('Whether to enable SSL'))) + from_email = serializers.EmailField(required=True, error_messages=ErrMessage.char(_('Sender\'s email'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -45,7 +46,7 @@ class SystemSettingSerializer(serializers.Serializer): self.data.get("email_use_ssl") ).open() except Exception as e: - raise AppApiException(1004, "邮箱校验失败") + raise AppApiException(1004, _('Email verification failed')) def update_or_save(self): self.is_valid(raise_exception=True) diff --git a/apps/setting/serializers/team_serializers.py b/apps/setting/serializers/team_serializers.py index 46266bb35..6882f65ff 100644 --- a/apps/setting/serializers/team_serializers.py +++ b/apps/setting/serializers/team_serializers.py @@ -29,6 +29,7 @@ from setting.models import TeamMember, TeamMemberPermission, Team from smartdoc.conf import PROJECT_DIR from users.models.user import User from users.serializers.user_serializers import UserSerializer +from django.utils.translation import gettext_lazy as _ user_cache = cache.caches['user_cache'] @@ -38,39 +39,39 @@ def get_response_body_api(): type=openapi.TYPE_OBJECT, required=['id', 'username', 'email', 'role', 'is_active', 'team_id', 'member_id'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用"), - 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title="团队id", description="团队id"), - 'member_id': openapi.Schema(type=openapi.TYPE_STRING, title="成员id", description="成员id"), + 'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'), description=_('Is active')), + 'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')), + 'member_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('member id'), description=_('member id')), } ) class TeamMemberPermissionOperate(ApiMixin, serializers.Serializer): - USE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("使用")) - MANAGE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("管理")) + USE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('use'))) + MANAGE = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_('manage'))) def get_request_body_api(self): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="类型", - description="操作权限USE,MANAGE权限", + title=_('type'), + description=_('Operation permissions USE, MANAGE permissions'), properties={ 'USE': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="使用权限", - description="使用权限 True|False"), + title=_('use permission'), + description=_('use permission True|False')), 'MANAGE': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="管理权限", - description="管理权限 True|False") + title=_('manage permission'), + description=_('manage permission True|False')) } ) class UpdateTeamMemberItemPermissionSerializer(ApiMixin, serializers.Serializer): - target_id = serializers.CharField(required=True, error_messages=ErrMessage.char("目标id")) - type = serializers.CharField(required=True, error_messages=ErrMessage.char("目标类型")) + target_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('target id'))) + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type'))) operate = TeamMemberPermissionOperate(required=True, many=False) def get_request_body_api(self): @@ -78,10 +79,10 @@ class UpdateTeamMemberItemPermissionSerializer(ApiMixin, serializers.Serializer) type=openapi.TYPE_OBJECT, required=['id', 'type', 'operate'], properties={ - 'target_id': openapi.Schema(type=openapi.TYPE_STRING, title="知识库/应用id", - description="知识库或者应用的id"), + 'target_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset id/application id'), + description=_('dataset id/application id')), 'type': openapi.Schema(type=openapi.TYPE_STRING, - title="类型", + title=_('type'), description="DATASET|APPLICATION", ), 'operate': TeamMemberPermissionOperate().get_request_body_api() @@ -100,7 +101,7 @@ class UpdateTeamMemberPermissionSerializer(ApiMixin, serializers.Serializer): os.path.join(PROJECT_DIR, "apps", "setting", 'sql', 'check_member_permission_target_exists.sql')), [json.dumps(permission_list), user_id, user_id]) if illegal_target_id_list is not None and len(illegal_target_id_list) > 0: - raise AppApiException(500, '不存在的 应用|知识库id[' + str(illegal_target_id_list) + ']') + raise AppApiException(500, _('Non-existent application|knowledge base id[') + str(illegal_target_id_list) + ']') def update_or_save(self, member_id: str): team_member_permission_list = self.data.get("team_member_permission_list") @@ -134,8 +135,8 @@ class UpdateTeamMemberPermissionSerializer(ApiMixin, serializers.Serializer): required=['id'], properties={ 'team_member_permission_list': - openapi.Schema(type=openapi.TYPE_ARRAY, title="权限数据", - description="权限数据", + openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Permission data'), + description=_('Permission data'), items=UpdateTeamMemberItemPermissionSerializer().get_request_body_api() ), } @@ -143,7 +144,7 @@ class UpdateTeamMemberPermissionSerializer(ApiMixin, serializers.Serializer): class TeamMemberSerializer(ApiMixin, serializers.Serializer): - team_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("团队id")) + team_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('team id'))) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) @@ -152,8 +153,8 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): def get_bach_request_body_api(): return openapi.Schema( type=openapi.TYPE_ARRAY, - title="用户id列表", - description="用户id列表", + title=_('user id list'), + description=_('user id list'), items=openapi.Schema(type=openapi.TYPE_STRING) ) @@ -163,8 +164,8 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=['username_or_email'], properties={ - 'username_or_email': openapi.Schema(type=openapi.TYPE_STRING, title="用户名或者邮箱", - description="用户名或者邮箱"), + 'username_or_email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username or email'), + description=_('Username or email')), } ) @@ -194,11 +195,11 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): def to_member_model(self, add_user_id, team_member_user_id_list, use_user_id_list, user_id): if use_user_id_list.__contains__(add_user_id): if team_member_user_id_list.__contains__(add_user_id) or user_id == add_user_id: - raise AppApiException(500, "团队中已存在当前成员,不要重复添加") + raise AppApiException(500, _('The current members already exist in the team, do not add them again.')) else: return TeamMember(team_id=self.data.get("team_id"), user_id=add_user_id) else: - raise AppApiException(500, "不存在的用户") + raise AppApiException(500, _('User does not exist')) def add_member(self, username_or_email: str, with_valid=True): """ @@ -210,14 +211,14 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): if with_valid: self.is_valid(raise_exception=True) if username_or_email is None: - raise AppApiException(500, "用户名或者邮箱必填") + raise AppApiException(500, _('Username or email is required')) user = QuerySet(User).filter( Q(username=username_or_email) | Q(email=username_or_email)).first() if user is None: - raise AppApiException(500, "不存在的用户") + raise AppApiException(500, _('User does not exist')) if QuerySet(TeamMember).filter(Q(team_id=self.data.get('team_id')) & Q(user=user)).exists() or self.data.get( "team_id") == str(user.id): - raise AppApiException(500, "团队中已存在当前成员,不要重复添加") + raise AppApiException(500, _('The current members already exist in the team, do not add them again.')) TeamMember(team_id=self.data.get("team_id"), user=user).save() return self.list_member(with_valid=False) @@ -241,22 +242,22 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): def get_response_body_api(self): return get_api_response(openapi.Schema( - type=openapi.TYPE_ARRAY, title="成员列表", description="成员列表", + type=openapi.TYPE_ARRAY, title=_('member list'), description=_('member list'), items=UserSerializer().get_response_body_api() )) class Operate(ApiMixin, serializers.Serializer): # 团队 成员id - member_id = serializers.CharField(required=True, error_messages=ErrMessage.char("成员id")) + member_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('member id'))) # 团队id - team_id = serializers.CharField(required=True, error_messages=ErrMessage.char("团队id")) + team_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_('team id'))) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) if self.data.get('member_id') != 'root' and not QuerySet(TeamMember).filter( team_id=self.data.get('team_id'), id=self.data.get('member_id')).exists(): - raise AppApiException(500, "不存在的成员,请先添加成员") + raise AppApiException(500, _('The member does not exist, please add a member first')) return True @@ -290,7 +291,7 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): self.is_valid(raise_exception=True) member_id = self.data.get("member_id") if member_id == 'root': - raise AppApiException(500, "管理员权限不允许修改") + raise AppApiException(500, _('Administrator rights do not allow modification')) s = UpdateTeamMemberPermissionSerializer(data=member_permission) s.is_valid(user_id=self.data.get("team_id")) s.update_or_save(member_id) @@ -304,7 +305,7 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): self.is_valid(raise_exception=True) member_id = self.data.get("member_id") if member_id == 'root': - raise AppApiException(500, "无法移除团队管理员") + raise AppApiException(500, _('Unable to remove team admin')) # 删除成员权限 QuerySet(TeamMemberPermission).filter(member_id=member_id).delete() # 删除成员 @@ -317,4 +318,4 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='团队成员id')] + description=_('member id')),] diff --git a/apps/setting/serializers/valid_serializers.py b/apps/setting/serializers/valid_serializers.py index ee7315206..1ddd393b5 100644 --- a/apps/setting/serializers/valid_serializers.py +++ b/apps/setting/serializers/valid_serializers.py @@ -18,23 +18,27 @@ from common.models.db_model_manage import DBModelManage from common.util.field_message import ErrMessage from dataset.models import DataSet from users.models import User +from django.utils.translation import gettext_lazy as _ model_message_dict = { 'dataset': {'model': DataSet, 'count': 50, - 'message': '社区版最多支持 50 个知识库,如需拥有更多知识库,请联系我们(https://fit2cloud.com/)。'}, + 'message': _( + 'The community version supports up to 50 knowledge bases. If you need more knowledge bases, please contact us (https://fit2cloud.com/).')}, 'application': {'model': Application, 'count': 5, - 'message': '社区版最多支持 5 个应用,如需拥有更多应用,请联系我们(https://fit2cloud.com/)。'}, + 'message': _( + 'The community version supports up to 5 applications. If you need more applications, please contact us (https://fit2cloud.com/).')}, 'user': {'model': User, 'count': 2, - 'message': '社区版最多支持 2 个用户,如需拥有更多用户,请联系我们(https://fit2cloud.com/)。'} + 'message': _( + 'The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).')} } class ValidSerializer(serializers.Serializer): - valid_type = serializers.CharField(required=True, error_messages=ErrMessage.char("类型"), validators=[ + valid_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_('type')), validators=[ validators.RegexValidator(regex=re.compile("^application|dataset|user$"), message="类型只支持:application|dataset|user", code=500) ]) - valid_count = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("校验数量")) + valid_count = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_('check quantity'))) def valid(self, is_valid=True): if is_valid: diff --git a/apps/setting/swagger_api/provide_api.py b/apps/setting/swagger_api/provide_api.py index 7544fdf25..263b6c245 100644 --- a/apps/setting/swagger_api/provide_api.py +++ b/apps/setting/swagger_api/provide_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class ModelQueryApi(ApiMixin): @@ -18,20 +19,20 @@ class ModelQueryApi(ApiMixin): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='模型名称'), + description=_('name')), openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='模型类型'), + description=_('model type')), openapi.Parameter(name='model_name', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='基础模型名称'), + description=_('model name')), openapi.Parameter(name='provider', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='供应名称') + description=_('provider')), ] @@ -39,22 +40,25 @@ class ModelEditApi(ApiMixin): @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="调用函数所需要的参数", - description="调用函数所需要的参数", + title=_('parameters required to call the function'), + description=_('parameters required to call the function'), required=['provide', 'model_info'], properties={ 'name': openapi.Schema(type=openapi.TYPE_STRING, - title="模型名称", - description="模型名称"), + title=_('name'), + description=_('name')), 'model_type': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model type'), + description=_('model type')), 'model_name': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model name'), + description=_('model name')), + 'provider': openapi.Schema(type=openapi.TYPE_STRING, + title=_('provider'), + description=_('provider')), 'credential': openapi.Schema(type=openapi.TYPE_OBJECT, - title="模型证书信息", - description="模型证书信息") + title=_('model certificate information'), + description=_('model certificate information')) } ) @@ -64,27 +68,27 @@ class ModelCreateApi(ApiMixin): @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="调用函数所需要的参数", - description="调用函数所需要的参数", + title=_('parameters required to call the function'), + description=_('parameters required to call the function'), required=['provide', 'model_info'], properties={ 'name': openapi.Schema(type=openapi.TYPE_STRING, - title="模型名称", - description="模型名称"), + title=_('name'), + description=_('name')), 'provider': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), - 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title="权限", + title=_('provider'), + description=_('provider')), + 'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'), description="PUBLIC|PRIVATE"), 'model_type': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model type'), + description=_('model type')), 'model_name': openapi.Schema(type=openapi.TYPE_STRING, - title="供应商", - description="供应商"), + title=_('model name'), + description=_('model name')), 'credential': openapi.Schema(type=openapi.TYPE_OBJECT, - title="模型证书信息", - description="模型证书信息"), + title=_('model certificate information'), + description=_('model certificate information')), } ) @@ -98,7 +102,7 @@ class ProvideApi(ApiMixin): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='供应名称'), + description=_('provider')), ] @staticmethod @@ -107,10 +111,10 @@ class ProvideApi(ApiMixin): type=openapi.TYPE_OBJECT, required=['key', 'value'], properties={ - 'key': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型描述", - description="模型类型描述", default="大语言模型"), - 'value': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型值", - description="模型类型值", default="LLM"), + 'key': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type description'), + description=_('model type description'), default=_('large language model')), + 'value': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type value'), + description=_('model type value'), default="LLM"), } ) @@ -122,12 +126,12 @@ class ProvideApi(ApiMixin): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='供应名称'), + description=_('provider')), openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='模型类型'), + description=_('model type')), ] @staticmethod @@ -136,12 +140,12 @@ class ProvideApi(ApiMixin): type=openapi.TYPE_OBJECT, required=['name', 'desc', 'model_type'], properties={ - 'name': openapi.Schema(type=openapi.TYPE_STRING, title="模型名称", - description="模型名称", default="模型名称"), - 'desc': openapi.Schema(type=openapi.TYPE_STRING, title="模型描述", - description="模型描述", default="xxx模型"), - 'model_type': openapi.Schema(type=openapi.TYPE_STRING, title="模型类型值", - description="模型类型值", default="LLM"), + 'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('name'), + description=_('name'), default=_('name')), + 'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('model description'), + description=_('model description')), + 'model_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('model type value'), + description=_('model type value'), default="LLM"), } ) @@ -153,17 +157,17 @@ class ProvideApi(ApiMixin): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='供应名称'), + description=_('provider')), openapi.Parameter(name='model_type', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='模型类型'), + description=_('model type')), openapi.Parameter(name='model_name', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='模型名称'), + description=_('model name')), ] @staticmethod @@ -172,17 +176,17 @@ class ProvideApi(ApiMixin): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='供应商'), + description=_('provider')), openapi.Parameter(name='method', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='需要执行的函数'), + description=_('function that needs to be executed')), ] @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="调用函数所需要的参数", - description="调用函数所需要的参数", + title=_('parameters required to call the function'), + description=_('parameters required to call the function'), ) diff --git a/apps/setting/swagger_api/system_setting.py b/apps/setting/swagger_api/system_setting.py index 1246ff27d..282c20d20 100644 --- a/apps/setting/swagger_api/system_setting.py +++ b/apps/setting/swagger_api/system_setting.py @@ -9,69 +9,70 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class SystemSettingEmailApi(ApiMixin): @staticmethod def get_request_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="邮箱相关参数", - description="邮箱相关参数", + title=_('Email related parameters'), + description=_('Email related parameters'), required=['email_host', 'email_port', 'email_host_user', 'email_host_password', 'email_use_tls', 'email_use_ssl', 'from_email'], properties={ 'email_host': openapi.Schema(type=openapi.TYPE_STRING, - title="SMTP 主机", - description="SMTP 主机"), + title=_('SMTP host'), + description=_('SMTP host')), 'email_port': openapi.Schema(type=openapi.TYPE_NUMBER, - title="SMTP 端口", - description="SMTP 端口"), + title=_('SMTP port'), + description=_('SMTP port')), 'email_host_user': openapi.Schema(type=openapi.TYPE_STRING, - title="发件人邮箱", - description="发件人邮箱"), + title=_('Sender\'s email'), + description=_('Sender\'s email')), 'email_host_password': openapi.Schema(type=openapi.TYPE_STRING, - title="密码", - description="密码"), + title=_('Password'), + description=_('Password')), 'email_use_tls': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启TLS", - description="是否开启TLS"), + title=_('Whether to enable TLS'), + description=_('Whether to enable TLS')), 'email_use_ssl': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启SSL", - description="是否开启SSL"), + title=_('Whether to enable SSL'), + description=_('Whether to enable SSL')), 'from_email': openapi.Schema(type=openapi.TYPE_STRING, - title="发送人邮箱", - description="发送人邮箱") + title=_('Sender\'s email'), + description=_('Sender\'s email')) } ) @staticmethod def get_response_body_api(): return openapi.Schema(type=openapi.TYPE_OBJECT, - title="邮箱相关参数", - description="邮箱相关参数", + title=_('Email related parameters'), + description=_('Email related parameters'), required=['email_host', 'email_port', 'email_host_user', 'email_host_password', 'email_use_tls', 'email_use_ssl', 'from_email'], properties={ 'email_host': openapi.Schema(type=openapi.TYPE_STRING, - title="SMTP 主机", - description="SMTP 主机"), + title=_('SMTP host'), + description=_('SMTP host')), 'email_port': openapi.Schema(type=openapi.TYPE_NUMBER, - title="SMTP 端口", - description="SMTP 端口"), + title=_('SMTP port'), + description=_('SMTP port')), 'email_host_user': openapi.Schema(type=openapi.TYPE_STRING, - title="发件人邮箱", - description="发件人邮箱"), + title=_('Sender\'s email'), + description=_('Sender\'s email')), 'email_host_password': openapi.Schema(type=openapi.TYPE_STRING, - title="密码", - description="密码"), + title=_('Password'), + description=_('Password')), 'email_use_tls': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启TLS", - description="是否开启TLS"), + title=_('Whether to enable TLS'), + description=_('Whether to enable TLS')), 'email_use_ssl': openapi.Schema(type=openapi.TYPE_BOOLEAN, - title="是否开启SSL", - description="是否开启SSL"), + title=_('Whether to enable SSL'), + description=_('Whether to enable SSL')), 'from_email': openapi.Schema(type=openapi.TYPE_STRING, - title="发送人邮箱", - description="发送人邮箱") + title=_('Sender\'s email'), + description=_('Sender\'s email')) } ) diff --git a/apps/setting/swagger_api/valid_api.py b/apps/setting/swagger_api/valid_api.py index 4fad9e804..f5bc5c9a2 100644 --- a/apps/setting/swagger_api/valid_api.py +++ b/apps/setting/swagger_api/valid_api.py @@ -9,6 +9,7 @@ from drf_yasg import openapi from common.mixins.api_mixin import ApiMixin +from django.utils.translation import gettext_lazy as _ class ValidApi(ApiMixin): @@ -18,10 +19,10 @@ class ValidApi(ApiMixin): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='校验类型:application|dataset|user'), + description=_('Verification type: application|dataset|user')), openapi.Parameter(name='valid_count', in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='校验数量') + description=_('check quantity')) ] diff --git a/apps/setting/views/Team.py b/apps/setting/views/Team.py index 71710e3d6..deab032ea 100644 --- a/apps/setting/views/Team.py +++ b/apps/setting/views/Team.py @@ -16,25 +16,26 @@ from common.constants.permission_constants import PermissionConstants from common.response import result from setting.serializers.team_serializers import TeamMemberSerializer, get_response_body_api, \ UpdateTeamMemberPermissionSerializer +from django.utils.translation import gettext_lazy as _ class TeamMember(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取团队成员列表", - operation_id="获取团员成员列表", + @swagger_auto_schema(operation_summary=_('Get a list of team members'), + operation_id=_('Get a list of team members'), responses=result.get_api_response(get_response_body_api()), - tags=["团队"]) + tags=[_('team')]) @has_permissions(PermissionConstants.TEAM_READ) def get(self, request: Request): return result.success(TeamMemberSerializer(data={'team_id': str(request.user.id)}).list_member()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="添加成员", - operation_id="添加成员", + @swagger_auto_schema(operation_summary=_('Add member'), + operation_id=_('Add member'), request_body=TeamMemberSerializer().get_request_body_api(), - tags=["团队"]) + tags=[_('team')]) @has_permissions(PermissionConstants.TEAM_CREATE) def post(self, request: Request): team = TeamMemberSerializer(data={'team_id': str(request.user.id)}) @@ -44,10 +45,10 @@ class TeamMember(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="批量添加成员", - operation_id="批量添加成员", + @swagger_auto_schema(operation_summary=_('Add members in batches'), + operation_id=_('Add members in batches'), request_body=TeamMemberSerializer.get_bach_request_body_api(), - tags=["团队"]) + tags=[_('team')]) @has_permissions(PermissionConstants.TEAM_CREATE) def post(self, request: Request): return result.success( @@ -57,21 +58,21 @@ class TeamMember(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取团队成员权限", - operation_id="获取团队成员权限", + @swagger_auto_schema(operation_summary=_('Get team member permissions'), + operation_id=_('Get team member permissions'), manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(), - tags=["团队"]) + tags=[_('team')]) @has_permissions(PermissionConstants.TEAM_READ) def get(self, request: Request, member_id: str): return result.success(TeamMemberSerializer.Operate( data={'member_id': member_id, 'team_id': str(request.user.id)}).list_member_permission()) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改团队成员权限", - operation_id="修改团队成员权限", + @swagger_auto_schema(operation_summary=_('Update team member permissions'), + operation_id=_('Update team member permissions'), request_body=UpdateTeamMemberPermissionSerializer().get_request_body_api(), manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(), - tags=["团队"] + tags=[_('team')] ) @has_permissions(PermissionConstants.TEAM_EDIT) def put(self, request: Request, member_id: str): @@ -79,10 +80,10 @@ class TeamMember(APIView): data={'member_id': member_id, 'team_id': str(request.user.id)}).edit(request.data)) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="移除成员", - operation_id="移除成员", + @swagger_auto_schema(operation_summary=_('Remove member'), + operation_id=_('Remove member'), manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(), - tags=["团队"] + tags=[_('team')] ) @has_permissions(PermissionConstants.TEAM_DELETE) def delete(self, request: Request, member_id: str): diff --git a/apps/setting/views/model.py b/apps/setting/views/model.py index 965f68b1b..d57a453f5 100644 --- a/apps/setting/views/model.py +++ b/apps/setting/views/model.py @@ -18,16 +18,17 @@ from common.util.common import query_params_to_single_dict from setting.models_provider.constants.model_provider_constants import ModelProvideConstants from setting.serializers.provider_serializers import ProviderSerializer, ModelSerializer, get_default_model_params_setting from setting.swagger_api.provide_api import ProvideApi, ModelCreateApi, ModelQueryApi, ModelEditApi +from django.utils.translation import gettext_lazy as _ class Model(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="创建模型", - operation_id="创建模型", + @swagger_auto_schema(operation_summary=_('Create model'), + operation_id=_('Create model'), request_body=ModelCreateApi.get_request_body_api() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) def post(self, request: Request): return result.success( @@ -35,10 +36,10 @@ class Model(APIView): with_valid=True)) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="下载模型,只试用与Ollama平台", - operation_id="下载模型,只试用与Ollama平台", + @swagger_auto_schema(operation_summary=_('Download model, trial only with Ollama platform'), + operation_id=_('Download model, trial only with Ollama platform'), request_body=ModelCreateApi.get_request_body_api() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) def put(self, request: Request): return result.success( @@ -46,10 +47,10 @@ class Model(APIView): with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型列表", - operation_id="获取模型列表", + @swagger_auto_schema(operation_summary=_('Get model list'), + operation_id=_('Get model list'), manual_parameters=ModelQueryApi.get_request_params_api() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): return result.success( @@ -61,9 +62,9 @@ class Model(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="查询模型meta信息,该接口不携带认证信息", - operation_id="查询模型meta信息,该接口不携带认证信息", - tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Query model meta information, this interface does not carry authentication information'), + operation_id=_('Query model meta information, this interface does not carry authentication information'), + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request, model_id: str): return result.success( @@ -73,9 +74,9 @@ class Model(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="暂停模型下载", - operation_id="暂停模型下载", - tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Pause model download'), + operation_id=_('Pause model download'), + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) def put(self, request: Request, model_id: str): return result.success( @@ -85,20 +86,20 @@ class Model(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型参数表单", - operation_id="获取模型参数表单", + @swagger_auto_schema(operation_summary=_('Get model parameter form'), + operation_id=_('Get model parameter form'), manual_parameters=ProvideApi.ModelForm.get_request_params_api(), - tags=["模型"]) + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request, model_id: str): return result.success( ModelSerializer.ModelParams(data={'id': model_id, 'user_id': request.user.id}).get_model_params()) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="保存模型参数表单", - operation_id="保存模型参数表单", + @swagger_auto_schema(operation_summary=_('Save model parameter form'), + operation_id=_('Save model parameter form'), manual_parameters=ProvideApi.ModelForm.get_request_params_api(), - tags=["模型"]) + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def put(self, request: Request, model_id: str): return result.success( @@ -109,10 +110,10 @@ class Model(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改模型", - operation_id="修改模型", + @swagger_auto_schema(operation_summary=_('Update model'), + operation_id=_('Update model'), request_body=ModelEditApi.get_request_body_api() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_CREATE) def put(self, request: Request, model_id: str): return result.success( @@ -120,19 +121,19 @@ class Model(APIView): str(request.user.id))) @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除模型", - operation_id="删除模型", + @swagger_auto_schema(operation_summary=_('Delete model'), + operation_id=_('Delete model'), responses=result.get_default_response() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_DELETE) def delete(self, request: Request, model_id: str): return result.success( ModelSerializer.Operate(data={'id': model_id, 'user_id': request.user.id}).delete()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="查询模型详细信息", - operation_id="查询模型详细信息", - tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Query model details'), + operation_id=_('Query model details'), + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request, model_id: str): return result.success( @@ -146,20 +147,20 @@ class Provide(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="调用供应商函数,获取表单数据", - operation_id="调用供应商函数,获取表单数据", + @swagger_auto_schema(operation_summary=_('Call the supplier function to obtain form data'), + operation_id=_('Call the supplier function to obtain form data'), manual_parameters=ProvideApi.get_request_params_api(), request_body=ProvideApi.get_request_body_api() - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def post(self, request: Request, provider: str, method: str): return result.success( ProviderSerializer(data={'provider': provider, 'method': method}).exec(request.data, with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型供应商数据", - operation_id="获取模型供应商列表" - , tags=["模型"]) + @swagger_auto_schema(operation_summary=_('Get a list of model suppliers'), + operation_id=_('Get a list of model suppliers') + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): model_type = request.query_params.get('model_type') @@ -178,11 +179,11 @@ class Provide(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型类型列表", - operation_id="获取模型类型类型列表", + @swagger_auto_schema(operation_summary=_('Get a list of model types'), + operation_id=_('Get a list of model types'), manual_parameters=ProvideApi.ModelTypeList.get_request_params_api(), responses=result.get_api_array_response(ProvideApi.ModelTypeList.get_response_body_api()) - , tags=["模型"]) + , tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): provider = request.query_params.get('provider') @@ -192,11 +193,11 @@ class Provide(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型列表", - operation_id="获取模型创建表单", + @swagger_auto_schema(operation_summary=_('Get the model creation form'), + operation_id=_('Get the model creation form'), manual_parameters=ProvideApi.ModelList.get_request_params_api(), responses=result.get_api_array_response(ProvideApi.ModelList.get_response_body_api()) - , tags=["模型"] + , tags=[_('model')] ) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): @@ -211,11 +212,11 @@ class Provide(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型默认参数", - operation_id="获取模型创建表单", + @swagger_auto_schema(operation_summary=_('Get model default parameters'), + operation_id=_('Get the model creation form'), manual_parameters=ProvideApi.ModelList.get_request_params_api(), responses=result.get_api_array_response(ProvideApi.ModelList.get_response_body_api()) - , tags=["模型"] + , tags=[_('model')] ) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): @@ -229,10 +230,10 @@ class Provide(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取模型创建表单", - operation_id="获取模型创建表单", + @swagger_auto_schema(operation_summary=_('Get the model creation form'), + operation_id=_('Get the model creation form'), manual_parameters=ProvideApi.ModelForm.get_request_params_api(), - tags=["模型"]) + tags=[_('model')]) @has_permissions(PermissionConstants.MODEL_READ) def get(self, request: Request): provider = request.query_params.get('provider') diff --git a/apps/setting/views/model_apply.py b/apps/setting/views/model_apply.py index 6bd0b548e..73fb699f0 100644 --- a/apps/setting/views/model_apply.py +++ b/apps/setting/views/model_apply.py @@ -14,35 +14,36 @@ from rest_framework.views import APIView from common.response import result from setting.serializers.model_apply_serializers import ModelApplySerializers +from django.utils.translation import gettext_lazy as _ class ModelApply(APIView): class EmbedDocuments(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="向量化文档", - operation_id="向量化文档", + @swagger_auto_schema(operation_summary=_('Vectorization documentation'), + operation_id=_('Vectorization documentation'), responses=result.get_default_response(), - tags=["模型"]) + tags=[_('model')]) def post(self, request: Request, model_id): return result.success( ModelApplySerializers(data={'model_id': model_id}).embed_documents(request.data)) class EmbedQuery(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="向量化文档", - operation_id="向量化文档", + @swagger_auto_schema(operation_summary=_('Vectorization documentation'), + operation_id=_('Vectorization documentation'), responses=result.get_default_response(), - tags=["模型"]) + tags=[_('model')]) def post(self, request: Request, model_id): return result.success( ModelApplySerializers(data={'model_id': model_id}).embed_query(request.data)) class CompressDocuments(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="重排序文档", - operation_id="重排序文档", + @swagger_auto_schema(operation_summary=_('Reorder documents'), + operation_id=_('Reorder documents'), responses=result.get_default_response(), - tags=["模型"]) + tags=[_('model')]) def post(self, request: Request, model_id): return result.success( ModelApplySerializers(data={'model_id': model_id}).compress_documents(request.data)) diff --git a/apps/setting/views/system_setting.py b/apps/setting/views/system_setting.py index e08a4702e..fd2224677 100644 --- a/apps/setting/views/system_setting.py +++ b/apps/setting/views/system_setting.py @@ -17,6 +17,7 @@ from common.constants.permission_constants import RoleConstants from common.response import result from setting.serializers.system_setting import SystemSettingSerializer from setting.swagger_api.system_setting import SystemSettingEmailApi +from django.utils.translation import gettext_lazy as _ class SystemSetting(APIView): @@ -24,9 +25,9 @@ class SystemSetting(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="创建或者修改邮箱设置", - operation_id="创建或者修改邮箱设置", - request_body=SystemSettingEmailApi.get_request_body_api(), tags=["邮箱设置"], + @swagger_auto_schema(operation_summary=_('Create or update email settings'), + operation_id=_('Create or update email settings'), + request_body=SystemSettingEmailApi.get_request_body_api(), tags=[_('Email settings')], responses=result.get_api_response(SystemSettingEmailApi.get_response_body_api())) @has_permissions(RoleConstants.ADMIN) def put(self, request: Request): @@ -35,11 +36,11 @@ class SystemSetting(APIView): data=request.data).update_or_save()) @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="测试邮箱设置", - operation_id="测试邮箱设置", + @swagger_auto_schema(operation_summary=_('Test email settings'), + operation_id=_('Test email settings'), request_body=SystemSettingEmailApi.get_request_body_api(), responses=result.get_default_response(), - tags=["邮箱设置"]) + tags=[_('Email settings')]) @has_permissions(RoleConstants.ADMIN) def post(self, request: Request): return result.success( @@ -47,10 +48,10 @@ class SystemSetting(APIView): data=request.data).is_valid()) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取邮箱设置", - operation_id="获取邮箱设置", + @swagger_auto_schema(operation_summary=_('Get email settings'), + operation_id=_('Get email settings'), responses=result.get_api_response(SystemSettingEmailApi.get_response_body_api()), - tags=["邮箱设置"]) + tags=[_('Email settings')]) @has_permissions(RoleConstants.ADMIN) def get(self, request: Request): return result.success( diff --git a/apps/setting/views/valid.py b/apps/setting/views/valid.py index f88c58927..c52b8905e 100644 --- a/apps/setting/views/valid.py +++ b/apps/setting/views/valid.py @@ -16,14 +16,15 @@ from common.constants.permission_constants import RoleConstants from common.response import result from setting.serializers.valid_serializers import ValidSerializer from setting.swagger_api.valid_api import ValidApi +from django.utils.translation import gettext_lazy as _ class Valid(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取校验结果", - operation_id="获取校验结果", + @swagger_auto_schema(operation_summary=_('Get verification results'), + operation_id=_('Get verification results'), manual_parameters=ValidApi.get_request_params_api(), responses=result.get_default_response() , tags=["校验"]) diff --git a/apps/smartdoc/conf.py b/apps/smartdoc/conf.py index 75b7482e6..4e7cbabf8 100644 --- a/apps/smartdoc/conf.py +++ b/apps/smartdoc/conf.py @@ -82,6 +82,7 @@ class Config(dict): "DB_PASSWORD": "Password123@postgres", "DB_ENGINE": "dj_db_conn_pool.backends.postgresql", "DB_MAX_OVERFLOW": 80, + 'LANGUAGE_CODE': 'en', # 向量模型 "EMBEDDING_MODEL_NAME": "shibing624/text2vec-base-chinese", "EMBEDDING_DEVICE": "cpu", diff --git a/apps/smartdoc/settings/base.py b/apps/smartdoc/settings/base.py index 785a3feb6..b6ff5161b 100644 --- a/apps/smartdoc/settings/base.py +++ b/apps/smartdoc/settings/base.py @@ -50,6 +50,7 @@ INSTALLED_APPS = [ ] MIDDLEWARE = [ + 'django.middleware.locale.LocaleMiddleware', 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', @@ -172,13 +173,31 @@ AUTH_PASSWORD_VALIDATORS = [ # Internationalization # https://docs.djangoproject.com/en/4.2/topics/i18n/ -LANGUAGE_CODE = 'en-us' - TIME_ZONE = CONFIG.get_time_zone() +# 启用国际化 USE_I18N = True -USE_TZ = False +# 启用本地化 +USE_L10N = True + +# 启用时区 +USE_TZ = True + +# 默认语言 +LANGUAGE_CODE = CONFIG.get("LANGUAGE_CODE") + +# 支持的语言 +LANGUAGES = [ + ('en', 'English'), + ('zh', '中文简体'), + ('zh-hant', '中文繁体') +] + +# 翻译文件路径 +LOCALE_PATHS = [ + os.path.join(BASE_DIR.parent, 'locales') +] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/4.2/howto/static-files/ diff --git a/apps/users/serializers/user_serializers.py b/apps/users/serializers/user_serializers.py index 600cf86c3..055bc4a4c 100644 --- a/apps/users/serializers/user_serializers.py +++ b/apps/users/serializers/user_serializers.py @@ -39,6 +39,7 @@ from function_lib.models.function import FunctionLib from setting.models import Team, SystemSetting, SettingType, Model, TeamMember, TeamMemberPermission from smartdoc.conf import PROJECT_DIR from users.models.user import User, password_encrypt, get_user_dynamics_permission +from django.utils.translation import gettext_lazy as _ user_cache = cache.caches['user_cache'] @@ -58,22 +59,23 @@ class SystemSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=[], properties={ - 'version': openapi.Schema(type=openapi.TYPE_STRING, title="系统版本号", description="系统版本号"), + 'version': openapi.Schema(type=openapi.TYPE_STRING, title=_("System version number"), + description=_("System version number")), } ) class LoginSerializer(ApiMixin, serializers.Serializer): username = serializers.CharField(required=True, - error_messages=ErrMessage.char("用户名")) + error_messages=ErrMessage.char(_("Username"))) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码")) + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password"))) def is_valid(self, *, raise_exception=False): """ 校验参数 - :param raise_exception: 是否抛出异常 只能是True - :return: 用户信息 + :param raise_exception: Whether to throw an exception can only be True + :return: User information """ super().is_valid(raise_exception=True) username = self.data.get("username") @@ -84,13 +86,13 @@ class LoginSerializer(ApiMixin, serializers.Serializer): if user is None: raise ExceptionCodeConstants.INCORRECT_USERNAME_AND_PASSWORD.value.to_app_api_exception() if not user.is_active: - raise AppApiException(1005, "用户已被禁用,请联系管理员!") + raise AppApiException(1005, _("The user has been disabled, please contact the administrator!")) return user def get_user_token(self): """ - 获取用户Token - :return: 用户Token(认证信息) + Get user token + :return: User Token (authentication information) """ user = self.is_valid() token = signing.dumps({'username': user.username, 'id': str(user.id), 'email': user.email, @@ -106,8 +108,8 @@ class LoginSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=['username', 'password'], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码") + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")) } ) @@ -122,36 +124,38 @@ class LoginSerializer(ApiMixin, serializers.Serializer): class RegisterSerializer(ApiMixin, serializers.Serializer): """ - 注册请求对象 + Register request object """ email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) username = serializers.CharField(required=True, - error_messages=ErrMessage.char("用户名"), + error_messages=ErrMessage.char(_("Username")), max_length=20, min_length=6, validators=[ validators.RegexValidator(regex=re.compile("^.{6,20}$"), - message="用户名字符数为 6-20 个字符") + message=_("Username must be 6-20 characters long")) ]) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) re_password = serializers.CharField(required=True, - error_messages=ErrMessage.char("确认密码"), + error_messages=ErrMessage.char(_("Confirm Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code"))) class Meta: model = User @@ -182,20 +186,18 @@ class RegisterSerializer(ApiMixin, serializers.Serializer): return True @valid_license(model=User, count=2, - message='社区版最多支持 2 个用户,如需拥有更多用户,请联系我们(https://fit2cloud.com/)。') + message=_( + "The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).")) @transaction.atomic def save(self, **kwargs): m = User( **{'id': uuid.uuid1(), 'email': self.data.get("email"), 'username': self.data.get("username"), 'role': RoleConstants.USER.name}) m.set_password(self.data.get("password")) - # 插入用户 m.save() - # 初始化用户团队 - Team(**{'user': m, 'name': m.username + '的团队'}).save() + Team(**{'user': m, 'name': m.username + _("team")}).save() email = self.data.get("email") code_cache_key = email + ":register" - # 删除验证码缓存 user_cache.delete(code_cache_key) @staticmethod @@ -204,11 +206,13 @@ class RegisterSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=['username', 'email', 'password', 're_password', 'code'], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", description="确认密码"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码") + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"), + description=_("Confirm Password")), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")) } ) @@ -219,16 +223,18 @@ class CheckCodeSerializer(ApiMixin, serializers.Serializer): """ email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code"))) type = serializers.CharField(required=True, - error_messages=ErrMessage.char("类型"), + error_messages=ErrMessage.char(_("Type")), validators=[ validators.RegexValidator(regex=re.compile("^register|reset_password$"), - message="类型只支持register|reset_password", code=500) + message=_( + "The type only supports register|reset_password"), + code=500) ]) def is_valid(self, *, raise_exception=False): @@ -247,40 +253,43 @@ class CheckCodeSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=['email', 'code', 'type'], properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"), - 'type': openapi.Schema(type=openapi.TYPE_STRING, title="类型", description="register|reset_password") + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Type"), description="register|reset_password") } ) def get_response_body_api(self): return get_api_response(openapi.Schema( type=openapi.TYPE_BOOLEAN, - title="是否成功", + title=_('Is it successful'), default=True, - description="错误提示")) + description=_('Error message'))) class RePasswordSerializer(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - code = serializers.CharField(required=True, error_messages=ErrMessage.char("验证码")) + code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Verification code"))) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + , message=_( + "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) - re_password = serializers.CharField(required=True, error_messages=ErrMessage.char("确认密码"), + re_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Confirm Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")] + , message=_( + "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))] ) class Meta: @@ -318,10 +327,12 @@ class RePasswordSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=['email', 'code', "password", 're_password'], properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", description="确认密码") + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), + description=_("Verification code")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"), + description=_("Confirm Password")) } ) @@ -329,13 +340,13 @@ class RePasswordSerializer(ApiMixin, serializers.Serializer): class SendEmailSerializer(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=True - , error_messages=ErrMessage.char("邮箱"), + , error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - type = serializers.CharField(required=True, error_messages=ErrMessage.char("类型"), validators=[ + type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Type")), validators=[ validators.RegexValidator(regex=re.compile("^register|reset_password$"), - message="类型只支持register|reset_password", code=500) + message=_("The type only supports register|reset_password"), code=500) ]) class Meta: @@ -353,7 +364,8 @@ class SendEmailSerializer(ApiMixin, serializers.Serializer): code_cache_key_lock = code_cache_key + "_lock" ttl = user_cache.ttl(code_cache_key_lock) if ttl is not None: - raise AppApiException(500, f"{ttl.total_seconds()}秒内请勿重复发送邮件") + raise AppApiException(500, _("Do not send emails again within {seconds} seconds").format( + seconds=int(ttl.total_seconds()))) return True def send(self): @@ -379,7 +391,8 @@ class SendEmailSerializer(ApiMixin, serializers.Serializer): system_setting = QuerySet(SystemSetting).filter(type=SettingType.EMAIL.value).first() if system_setting is None: user_cache.delete(code_cache_key_lock) - raise AppApiException(1004, "邮箱服务未设置,请联系管理员到【邮箱设置】中设置邮箱服务。") + raise AppApiException(1004, + _("The email service has not been set up. Please contact the administrator to set up the email service in [Email Settings].")) try: connection = EmailBackend(system_setting.meta.get("email_host"), system_setting.meta.get('email_port'), @@ -390,14 +403,15 @@ class SendEmailSerializer(ApiMixin, serializers.Serializer): system_setting.meta.get('email_use_ssl') ) # 发送邮件 - send_mail(f'【智能知识库问答系统-{"用户注册" if state == "register" else "修改密码"}】', + send_mail(_('【Intelligent knowledge base question and answer system-{action}】').format( + action=_('User registration') if state == 'register' else _('Change password')), '', html_message=f'{content.replace("${code}", code)}', from_email=system_setting.meta.get('from_email'), recipient_list=[email], fail_silently=False, connection=connection) except Exception as e: user_cache.delete(code_cache_key_lock) - raise AppApiException(500, f"{str(e)}邮件发送失败") + raise AppApiException(500, f"{str(e)}" + _("Email sending failed")) user_cache.set(code_cache_key, code, timeout=datetime.timedelta(minutes=30)) return True @@ -406,8 +420,8 @@ class SendEmailSerializer(ApiMixin, serializers.Serializer): type=openapi.TYPE_OBJECT, required=['email', 'type'], properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'type': openapi.Schema(type=openapi.TYPE_STRING, title="类型", description="register|reset_password") + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_('Email')), + 'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('Type'), description="register|reset_password") } ) @@ -436,12 +450,12 @@ class UserProfile(ApiMixin): type=openapi.TYPE_OBJECT, required=['id', 'username', 'email', 'role', 'is_active'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用"), - "permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title="权限列表", description="权限列表", + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active")), + "permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Permissions"), description=_("Permissions"), items=openapi.Schema(type=openapi.TYPE_STRING)) } ) @@ -458,11 +472,11 @@ class UserSerializer(ApiMixin, serializers.ModelSerializer): type=openapi.TYPE_OBJECT, required=['id', 'username', 'email', 'role', 'is_active'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title="是否可用", description="是否可用") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")), + 'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active")) } ) @@ -475,7 +489,7 @@ class UserSerializer(ApiMixin, serializers.ModelSerializer): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=True, - description='邮箱或者用户名')] + description=_("Email or username"))] @staticmethod def get_response_body_api(): @@ -483,9 +497,10 @@ class UserSerializer(ApiMixin, serializers.ModelSerializer): type=openapi.TYPE_OBJECT, required=['username', 'email', 'id'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title='用户主键id', description="用户主键id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title='ID', description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), + description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")) } ) @@ -493,7 +508,8 @@ class UserSerializer(ApiMixin, serializers.ModelSerializer): if with_valid: self.is_valid(raise_exception=True) email_or_username = self.data.get('email_or_username') - return [{'id': user_model.id, 'username': user_model.username, 'email': user_model.email} for user_model in + return [{'id': user_model.id, 'username': user_model.username, 'email': user_model.email} for user_model + in QuerySet(User).filter(Q(username=email_or_username) | Q(email=email_or_username))] def listByType(self, type, user_id): @@ -524,8 +540,8 @@ class UserSerializer(ApiMixin, serializers.ModelSerializer): for app in user_list if app.user.id != user_id ] users = [ - {'id': 'all', 'username': '全部'}, - {'id': user_id, 'username': '我的'} + {'id': 'all', 'username': _('All')}, + {'id': user_id, 'username': _('Me')} ] users.extend(other_users) return users @@ -544,16 +560,16 @@ class UserInstanceSerializer(ApiMixin, serializers.ModelSerializer): required=['id', 'username', 'email', 'phone', 'is_active', 'role', 'nick_name', 'create_time', 'update_time'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title="用户id", description="用户id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否激活", description="是否激活"), - 'role': openapi.Schema(type=openapi.TYPE_STRING, title="角色", description="角色"), - 'source': openapi.Schema(type=openapi.TYPE_STRING, title="来源", description="来源"), - 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名"), - 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title="创建时间", description="修改时间"), - 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title="修改时间", description="修改时间") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title="ID", description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"), description=_("Is active")), + 'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")), + 'source': openapi.Schema(type=openapi.TYPE_STRING, title=_("Source"), description=_("Source")), + 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")), + 'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Create time"), description=_("Create time")), + 'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Update time"), description=_("Update time")) } ) @@ -563,15 +579,14 @@ class UserInstanceSerializer(ApiMixin, serializers.ModelSerializer): in_=openapi.IN_PATH, type=openapi.TYPE_STRING, required=True, - description='用户名id') - - ] + description='ID') + ] class UserManageSerializer(serializers.Serializer): class Query(ApiMixin, serializers.Serializer): email_or_username = serializers.CharField(required=False, allow_null=True, - error_messages=ErrMessage.char("邮箱或者用户名")) + error_messages=ErrMessage.char(_('Email or username'))) @staticmethod def get_request_params_api(): @@ -579,7 +594,7 @@ class UserManageSerializer(serializers.Serializer): in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False, - description='邮箱或者用户名')] + description=_("Email or username"))] @staticmethod def get_response_body_api(): @@ -587,9 +602,10 @@ class UserManageSerializer(serializers.Serializer): type=openapi.TYPE_OBJECT, required=['username', 'email', 'id'], properties={ - 'id': openapi.Schema(type=openapi.TYPE_STRING, title='用户主键id', description="用户主键id"), - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址") + 'id': openapi.Schema(type=openapi.TYPE_STRING, title='ID', description="ID"), + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), + description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")) } ) @@ -618,28 +634,28 @@ class UserManageSerializer(serializers.Serializer): class UserInstance(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=True, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) username = serializers.CharField(required=True, - error_messages=ErrMessage.char("用户名"), - max_length=20, - min_length=6, - validators=[ - validators.RegexValidator(regex=re.compile("^.{6,20}$"), - message="用户名字符数为 6-20 个字符") - ]) - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), - validators=[validators.RegexValidator(regex=re.compile( - "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" - "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")]) + error_messages=ErrMessage.char(_("Username")), + max_length=20, + min_length=6, + validators=[ + validators.RegexValidator(regex=re.compile("^.{6,20}$"), + message=_('Username must be 6-20 characters long')) + ]) + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), + validators=[validators.RegexValidator(regex=re.compile( + "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" + "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") + , message=_("The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) - nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char("姓名"), max_length=64, + nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64, + allow_null=True, allow_blank=True) + phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20, allow_null=True, allow_blank=True) - phone = serializers.CharField(required=False, error_messages=ErrMessage.char("手机号"), max_length=20, - allow_null=True, allow_blank=True) def is_valid(self, *, raise_exception=True): super().is_valid(raise_exception=True) @@ -658,56 +674,61 @@ class UserManageSerializer(serializers.Serializer): type=openapi.TYPE_OBJECT, required=['username', 'email', 'password'], properties={ - 'username': openapi.Schema(type=openapi.TYPE_STRING, title="用户名", description="用户名"), - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱地址"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"), - 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名") + 'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), + description=_("Username")), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")), + 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")), + 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")) } ) class UserEditInstance(ApiMixin, serializers.Serializer): email = serializers.EmailField( required=False, - error_messages=ErrMessage.char("邮箱"), + error_messages=ErrMessage.char(_("Email")), validators=[validators.EmailValidator(message=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.message, code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)]) - nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char("姓名"), max_length=64, + nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64, allow_null=True, allow_blank=True) - phone = serializers.CharField(required=False, error_messages=ErrMessage.char("手机号"), max_length=20, + phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20, allow_null=True, allow_blank=True) - is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("是否可用")) + is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char(_("Is active"))) def is_valid(self, *, user_id=None, raise_exception=False): super().is_valid(raise_exception=True) if self.data.get('email') is not None and QuerySet(User).filter(email=self.data.get('email')).exclude( id=user_id).exists(): - raise AppApiException(1004, "邮箱已经被使用") + raise AppApiException(1004, _('Email is already in use')) @staticmethod def get_request_body_api(): return openapi.Schema( type=openapi.TYPE_OBJECT, properties={ - 'email': openapi.Schema(type=openapi.TYPE_STRING, title="邮箱", description="邮箱"), - 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title="姓名", description="姓名"), - 'phone': openapi.Schema(type=openapi.TYPE_STRING, title="手机号", description="手机号"), - 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"), + 'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")), + 'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")), + 'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")), + 'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"), + description=_("Is active")), } ) class RePasswordInstance(ApiMixin, serializers.Serializer): - password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"), + password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="密码长度6-20个字符,必须字母、数字、特殊字符组合")]) - re_password = serializers.CharField(required=True, error_messages=ErrMessage.char("确认密码"), + , message=_( + "The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))]) + re_password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Confirm Password")), validators=[validators.RegexValidator(regex=re.compile( "^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)" "(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$") - , message="确认密码长度6-20个字符,必须字母、数字、特殊字符组合")] + , message=_( + "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))] ) @staticmethod @@ -716,9 +737,10 @@ class UserManageSerializer(serializers.Serializer): type=openapi.TYPE_OBJECT, required=['password', 're_password'], properties={ - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="确认密码", - description="确认密码"), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Confirm Password"), + description=_("Confirm Password")), } ) @@ -728,7 +750,8 @@ class UserManageSerializer(serializers.Serializer): raise ExceptionCodeConstants.PASSWORD_NOT_EQ_RE_PASSWORD.value.to_app_api_exception() @valid_license(model=User, count=2, - message='社区版最多支持 2 个用户,如需拥有更多用户,请联系我们(https://fit2cloud.com/)。') + message=_( + 'The community version supports up to 2 users. If you need more users, please contact us (https://fit2cloud.com/).')) @transaction.atomic def save(self, instance, with_valid=True): if with_valid: @@ -742,16 +765,16 @@ class UserManageSerializer(serializers.Serializer): is_active=True) user.save() # 初始化用户团队 - Team(**{'user': user, 'name': user.username + '的团队'}).save() + Team(**{'user': user, 'name': user.username + _('team')}).save() return UserInstanceSerializer(user).data class Operate(serializers.Serializer): - id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("用户id")) + id = serializers.UUIDField(required=True, error_messages=ErrMessage.char("ID")) def is_valid(self, *, raise_exception=False): super().is_valid(raise_exception=True) if not QuerySet(User).filter(id=self.data.get('id')).exists(): - raise AppApiException(1004, "用户不存在") + raise AppApiException(1004, _('User does not exist')) @transaction.atomic def delete(self, with_valid=True): @@ -759,7 +782,7 @@ class UserManageSerializer(serializers.Serializer): self.is_valid(raise_exception=True) user = QuerySet(User).filter(id=self.data.get('id')).first() if user.role == RoleConstants.ADMIN.name: - raise AppApiException(1004, "无法删除管理员") + raise AppApiException(1004, _('Unable to delete administrator')) user_id = self.data.get('id') team_member_list = QuerySet(TeamMember).filter(Q(user_id=user_id) | Q(team_id=user_id)) @@ -796,7 +819,7 @@ class UserManageSerializer(serializers.Serializer): user = QuerySet(User).filter(id=self.data.get('id')).first() if user.role == RoleConstants.ADMIN.name and 'is_active' in instance and instance.get( 'is_active') is not None: - raise AppApiException(1004, "不能修改管理员状态") + raise AppApiException(1004, _('Cannot modify administrator status')) update_keys = ['email', 'nick_name', 'phone', 'is_active'] for update_key in update_keys: if update_key in instance and instance.get(update_key) is not None: diff --git a/apps/users/views/user.py b/apps/users/views/user.py index 9dc91f962..c8b7380e8 100644 --- a/apps/users/views/user.py +++ b/apps/users/views/user.py @@ -23,17 +23,17 @@ from smartdoc.settings import JWT_AUTH from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \ RePasswordSerializer, \ SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer - +from django.utils.translation import gettext_lazy as _ user_cache = cache.caches['user_cache'] token_cache = cache.caches['token_cache'] class Profile(APIView): @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取MaxKB相关信息", - operation_id="获取MaxKB相关信息", + @swagger_auto_schema(operation_summary=_("Get MaxKB related information"), + operation_id=_("Get MaxKB related information"), responses=result.get_api_response(SystemSerializer.get_response_body_api()), - tags=['系统参数']) + tags=[_('System parameters')]) def get(self, request: Request): return result.success(SystemSerializer.get_profile()) @@ -42,10 +42,10 @@ class User(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取当前用户信息", - operation_id="获取当前用户信息", + @swagger_auto_schema(operation_summary=_("Get current user information"), + operation_id=_("Get current user information"), responses=result.get_api_response(UserProfile.get_response_body_api()), - tags=['用户']) + tags=[]) @has_permissions(PermissionConstants.USER_READ) def get(self, request: Request): return result.success(UserProfile.get_user_profile(request.user)) @@ -54,11 +54,11 @@ class User(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取用户列表", - operation_id="获取用户列表", + @swagger_auto_schema(operation_summary=_("Get user list"), + operation_id=_("Get user list"), manual_parameters=UserSerializer.Query.get_request_params_api(), responses=result.get_api_array_response(UserSerializer.Query.get_response_body_api()), - tags=['用户']) + tags=[_("User")]) @has_permissions(PermissionConstants.USER_READ) def get(self, request: Request): return result.success( @@ -69,20 +69,20 @@ class ResetCurrentUserPasswordView(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="修改当前用户密码", - operation_id="修改当前用户密码", + @swagger_auto_schema(operation_summary=_("Modify current user password"), + operation_id=_("Modify current user password"), request_body=openapi.Schema( type=openapi.TYPE_OBJECT, required=['email', 'code', "password", 're_password'], properties={ - 'code': openapi.Schema(type=openapi.TYPE_STRING, title="验证码", description="验证码"), - 'password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", description="密码"), - 're_password': openapi.Schema(type=openapi.TYPE_STRING, title="密码", - description="密码") + 'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), description=_("Verification code")), + 'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")), + 're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), + description=_("Password")) } ), responses=RePasswordSerializer().get_response_body_api(), - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): data = {'email': request.user.email} data.update(request.data) @@ -90,7 +90,7 @@ class ResetCurrentUserPasswordView(APIView): if serializer_obj.reset_password(): token_cache.delete(request.META.get('HTTP_AUTHORIZATION')) return result.success(True) - return result.error("修改密码失败") + return result.error(_("Failed to change password")) class SendEmailToCurrentUserView(APIView): @@ -98,10 +98,10 @@ class SendEmailToCurrentUserView(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="发送邮件到当前用户", - operation_id="发送邮件到当前用户", + @swagger_auto_schema(operation_summary=_("Send email to current user"), + operation_id=_("Send email to current user"), responses=SendEmailSerializer().get_response_body_api(), - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): serializer_obj = SendEmailSerializer(data={'email': request.user.email, 'type': "reset_password"}) if serializer_obj.is_valid(raise_exception=True): @@ -113,10 +113,10 @@ class Logout(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="登出", - operation_id="登出", + @swagger_auto_schema(operation_summary=_("Sign out"), + operation_id=_("Sign out"), responses=SendEmailSerializer().get_response_body_api(), - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): token_cache.delete(request.META.get('HTTP_AUTHORIZATION')) return result.success(True) @@ -125,12 +125,12 @@ class Logout(APIView): class Login(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="登录", - operation_id="登录", + @swagger_auto_schema(operation_summary=_("Log in"), + operation_id=_("Log in"), request_body=LoginSerializer().get_request_body_api(), responses=LoginSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): login_request = LoginSerializer(data=request.data) # 校验请求参数 @@ -144,29 +144,29 @@ class Register(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="用户注册", - operation_id="用户注册", + @swagger_auto_schema(operation_summary=_("User registration"), + operation_id=_("User registration"), request_body=RegisterSerializer().get_request_body_api(), responses=RegisterSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): serializer_obj = RegisterSerializer(data=request.data) if serializer_obj.is_valid(raise_exception=True): serializer_obj.save() - return result.success("注册成功") + return result.success(_("Registration successful")) class RePasswordView(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="修改密码", - operation_id="修改密码", + @swagger_auto_schema(operation_summary=_("Change password"), + operation_id=_("Change password"), request_body=RePasswordSerializer().get_request_body_api(), responses=RePasswordSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): serializer_obj = RePasswordSerializer(data=request.data) return result.success(serializer_obj.reset_password()) @@ -176,12 +176,12 @@ class CheckCode(APIView): @action(methods=['POST'], detail=False) @permission_classes((AllowAny,)) - @swagger_auto_schema(operation_summary="校验验证码是否正确", - operation_id="校验验证码是否正确", + @swagger_auto_schema(operation_summary=_("Check whether the verification code is correct"), + operation_id=_("Check whether the verification code is correct"), request_body=CheckCodeSerializer().get_request_body_api(), responses=CheckCodeSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): return result.success(CheckCodeSerializer(data=request.data).is_valid(raise_exception=True)) @@ -189,12 +189,12 @@ class CheckCode(APIView): class SendEmail(APIView): @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="发送邮件", - operation_id="发送邮件", + @swagger_auto_schema(operation_summary=_("Send email"), + operation_id=_("Send email"), request_body=SendEmailSerializer().get_request_body_api(), responses=SendEmailSerializer().get_response_body_api(), security=[], - tags=['用户']) + tags=[_("User")]) def post(self, request: Request): serializer_obj = SendEmailSerializer(data=request.data) if serializer_obj.is_valid(raise_exception=True): @@ -205,11 +205,11 @@ class UserManage(APIView): authentication_classes = [TokenAuth] @action(methods=['POST'], detail=False) - @swagger_auto_schema(operation_summary="添加用户", - operation_id="添加用户", + @swagger_auto_schema(operation_summary=_("Add user"), + operation_id=_("Add user"), request_body=UserManageSerializer.UserInstance.get_request_body_api(), responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()), - tags=["用户管理"] + tags=[_("User management")] ) @has_permissions(ViewPermission( [RoleConstants.ADMIN], @@ -222,9 +222,9 @@ class UserManage(APIView): authentication_classes = [TokenAuth] @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取用户分页列表", - operation_id="获取用户分页列表", - tags=["用户管理"], + @swagger_auto_schema(operation_summary=_("Get user paginated list"), + operation_id=_("Get user paginated list"), + tags=[_("User management")], manual_parameters=UserManageSerializer.Query.get_request_params_api(), responses=result.get_page_api_response(UserInstanceSerializer.get_response_body_api()), ) @@ -242,12 +242,12 @@ class UserManage(APIView): authentication_classes = [TokenAuth] @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改密码", - operation_id="修改密码", + @swagger_auto_schema(operation_summary=_("Change password"), + operation_id=_("Change password"), manual_parameters=UserInstanceSerializer.get_request_params_api(), request_body=UserManageSerializer.RePasswordInstance.get_request_body_api(), responses=result.get_default_response(), - tags=["用户管理"]) + tags=[_("User management")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN], [PermissionConstants.USER_READ], @@ -260,11 +260,11 @@ class UserManage(APIView): authentication_classes = [TokenAuth] @action(methods=['DELETE'], detail=False) - @swagger_auto_schema(operation_summary="删除用户", - operation_id="删除用户", + @swagger_auto_schema(operation_summary=_("Delete user"), + operation_id=_("Delete user"), manual_parameters=UserInstanceSerializer.get_request_params_api(), responses=result.get_default_response(), - tags=["用户管理"]) + tags=[_("User management")]) @has_permissions(ViewPermission( [RoleConstants.ADMIN], [PermissionConstants.USER_READ], @@ -273,11 +273,11 @@ class UserManage(APIView): return result.success(UserManageSerializer.Operate(data={'id': user_id}).delete(with_valid=True)) @action(methods=['GET'], detail=False) - @swagger_auto_schema(operation_summary="获取用户信息", - operation_id="获取用户信息", + @swagger_auto_schema(operation_summary=_("Get user information"), + operation_id=_("Get user information"), manual_parameters=UserInstanceSerializer.get_request_params_api(), responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()), - tags=["用户管理"] + tags=[_("User management")] ) @has_permissions(ViewPermission( [RoleConstants.ADMIN], @@ -287,12 +287,12 @@ class UserManage(APIView): return result.success(UserManageSerializer.Operate(data={'id': user_id}).one(with_valid=True)) @action(methods=['PUT'], detail=False) - @swagger_auto_schema(operation_summary="修改用户信息", - operation_id="修改用户信息", + @swagger_auto_schema(operation_summary=_("Update user information"), + operation_id=_("Update user information"), manual_parameters=UserInstanceSerializer.get_request_params_api(), request_body=UserManageSerializer.UserEditInstance.get_request_body_api(), responses=result.get_api_response(UserInstanceSerializer.get_response_body_api()), - tags=["用户管理"] + tags=[_("User management")] ) @has_permissions(ViewPermission( [RoleConstants.ADMIN], @@ -306,11 +306,11 @@ class UserManage(APIView): class UserListView(APIView): authentication_classes = [TokenAuth] - @swagger_auto_schema(operation_summary="通过类型获取用户列表", - operation_id="通过类型获取用户列表", + @swagger_auto_schema(operation_summary=_("Get user list by type"), + operation_id=_("Get user list by type"), manual_parameters=UserSerializer.Query.get_request_params_api(), responses=result.get_api_array_response(UserSerializer.Query.get_response_body_api()), - tags=['用户']) + tags=[_("User")]) @has_permissions(PermissionConstants.USER_READ) def get(self, request: Request, type): return result.success(UserSerializer().listByType(type, request.user.id)) diff --git a/ui/src/components/ai-chat/component/chat-input-operate/index.vue b/ui/src/components/ai-chat/component/chat-input-operate/index.vue index 47385a370..fff14e882 100644 --- a/ui/src/components/ai-chat/component/chat-input-operate/index.vue +++ b/ui/src/components/ai-chat/component/chat-input-operate/index.vue @@ -396,7 +396,7 @@ const startRecording = async () => { }, (err: any) => { MsgAlert( - `提示`, + t('common.tip'), `

该功能需要使用麦克风,浏览器禁止不安全页面录音,解决方案如下:
1、可开启 https 解决;
2、若无 https 配置则需要修改浏览器安全配置,Chrome 设置如下:
@@ -413,7 +413,7 @@ const startRecording = async () => { ) } catch (error) { MsgAlert( - `提示`, + t('common.tip'), `

该功能需要使用麦克风,浏览器禁止不安全页面录音,解决方案如下:
1、可开启 https 解决;
2、若无 https 配置则需要修改浏览器安全配置,Chrome 设置如下:
diff --git a/ui/src/components/app-table/index.vue b/ui/src/components/app-table/index.vue index 1b773081f..812148df0 100644 --- a/ui/src/components/app-table/index.vue +++ b/ui/src/components/app-table/index.vue @@ -15,8 +15,8 @@ clearable /> - 创建 - 取消 + {{$t('common.create')}} + {{$t('common.cancel')}}

diff --git a/ui/src/components/dynamics-form/items/JsonInput.vue b/ui/src/components/dynamics-form/items/JsonInput.vue index e7bfb3f2b..f5bec2baa 100644 --- a/ui/src/components/dynamics-form/items/JsonInput.vue +++ b/ui/src/components/dynamics-form/items/JsonInput.vue @@ -31,7 +31,7 @@ /> diff --git a/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue b/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue index 98d88b9b1..eae016df4 100644 --- a/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue +++ b/ui/src/components/dynamics-form/items/complex/ArrayObjectCard.vue @@ -14,7 +14,7 @@ v-bind="attr" :parent_field="formField.field + '.' + index" > - + diff --git a/ui/src/components/generate-related-dialog/index.vue b/ui/src/components/generate-related-dialog/index.vue index 310d0d8c5..937c48d4e 100644 --- a/ui/src/components/generate-related-dialog/index.vue +++ b/ui/src/components/generate-related-dialog/index.vue @@ -28,64 +28,11 @@
- - - -
- - {{ item.name }} - 公用 - -
- - - -
- - -
- - {{ item.name }} - {{ - $t('views.application.applicationForm.form.aiModel.unavailable') - }} -
- - - -
-
-
+ :options="modelOptions" + >
@@ -94,9 +41,9 @@ @@ -109,8 +56,6 @@ import documentApi from '@/api/document' import paragraphApi from '@/api/paragraph' import datasetApi from '@/api/dataset' import useStore from '@/stores' -import { relatedObject } from '@/utils/utils' -import type { Provider } from '@/api/type/model' import { groupBy } from 'lodash' import { MsgSuccess } from '@/utils/message' import { t } from '@/locales' @@ -129,7 +74,6 @@ const loading = ref(false) const dialogVisible = ref(false) const modelOptions = ref(null) -const providerOptions = ref>([]) const idList = ref([]) const apiType = ref('') // 文档document或段落paragraph @@ -143,7 +87,6 @@ const rules = reactive({ }) const open = (ids: string[], type: string) => { - getProvider() getModel() idList.value = ids apiType.value = type @@ -190,19 +133,6 @@ function getModel() { }) } -function getProvider() { - loading.value = true - model - .asyncGetProvider() - .then((res: any) => { - providerOptions.value = res?.data - loading.value = false - }) - .catch(() => { - loading.value = false - }) -} - defineExpose({ open }) diff --git a/ui/src/enums/model.ts b/ui/src/enums/model.ts index 625376710..41484b1bc 100644 --- a/ui/src/enums/model.ts +++ b/ui/src/enums/model.ts @@ -1,3 +1,4 @@ +import { t } from '@/locales' export enum PermissionType { PRIVATE = '私有', PUBLIC = '公用' @@ -8,11 +9,11 @@ export enum PermissionDesc { } export enum modelType { - EMBEDDING = '向量模型', - LLM = '大语言模型', - STT = '语音识别', - TTS = '语音合成', - IMAGE = '图片理解', - TTI = '图片生成', - RERANKER = '重排模型' + EMBEDDING = t('views.template.model.EMBEDDING'), + LLM = t('views.template.model.LLM'), + STT = t('views.template.model.STT'), + TTS = t('views.template.model.TTS'), + IMAGE = t('views.template.model.IMAGE'), + TTI = t('views.template.model.TTI'), + RERANKER = t('views.template.model.RERANKER') } diff --git a/ui/src/layout/components/breadcrumb/index.vue b/ui/src/layout/components/breadcrumb/index.vue index c380d373d..7f07cb6ad 100644 --- a/ui/src/layout/components/breadcrumb/index.vue +++ b/ui/src/layout/components/breadcrumb/index.vue @@ -94,7 +94,8 @@ diff --git a/ui/src/layout/components/top-bar/avatar/APIKeyDialog.vue b/ui/src/layout/components/top-bar/avatar/APIKeyDialog.vue index 39d67970f..6dfcfc7af 100644 --- a/ui/src/layout/components/top-bar/avatar/APIKeyDialog.vue +++ b/ui/src/layout/components/top-bar/avatar/APIKeyDialog.vue @@ -21,7 +21,7 @@ - {{ $t('views.applicationOverview.appInfo.APIKeyDialog.creatApiKey') }} + {{ $t('common.create') }} @@ -34,47 +34,28 @@ - + - + - +