feat: i18n (#2039)

This commit is contained in:
shaohuzhang1 2025-01-20 14:41:26 +08:00 committed by GitHub
parent 9267002baa
commit e350a221c9
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
18 changed files with 1539 additions and 1554 deletions

View File

@ -0,0 +1,19 @@
# Generated by Django 4.2.15 on 2025-01-20 03:20
from django.db import migrations, models
import smartdoc.conf
class Migration(migrations.Migration):
dependencies = [
('application', '0023_application_stt_autosend'),
]
operations = [
migrations.AddField(
model_name='applicationaccesstoken',
name='language',
field=models.CharField(default=smartdoc.conf.Config.get_language_code, max_length=10, verbose_name='语言'),
),
]

View File

@ -13,6 +13,7 @@ from django.db import models
from application.models import Application
from common.mixins.app_model_mixin import AppModelMixin
from smartdoc.const import CONFIG
from users.models import User
@ -45,6 +46,8 @@ class ApplicationAccessToken(AppModelMixin):
, default=list)
show_source = models.BooleanField(default=False, verbose_name="是否显示知识来源")
language = models.CharField(max_length=10, verbose_name="语言", default=CONFIG.get_language_code)
class Meta:
db_table = "application_access_token"

View File

@ -336,6 +336,8 @@ class ApplicationSerializer(serializers.Serializer):
show_source = serializers.BooleanField(required=False,
error_messages=ErrMessage.boolean(
_("Whether to display knowledge sources")))
language = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char(_("language")))
def edit(self, instance: Dict, with_valid=True):
if with_valid:
@ -358,6 +360,8 @@ class ApplicationSerializer(serializers.Serializer):
application_access_token.white_list = instance.get('white_list')
if 'show_source' in instance and instance.get('show_source') is not None:
application_access_token.show_source = instance.get('show_source')
if 'language' in instance and instance.get('language') is not None:
application_access_token.language = instance.get('language')
application_access_token.save()
application_setting_model = DBModelManage.get_model('application_setting')
xpack_cache = DBModelManage.get_model('xpack_cache')
@ -980,6 +984,7 @@ class ApplicationSerializer(serializers.Serializer):
'file_upload_setting': application.file_upload_setting,
'work_flow': application.work_flow,
'show_source': application_access_token.show_source,
'language': application_access_token.language,
**application_setting_dict})
@transaction.atomic

View File

@ -165,6 +165,9 @@ class ApplicationApi(ApiMixin):
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN,
title=_("Whether to display knowledge sources"),
description=_("Whether to display knowledge sources")),
'language': openapi.Schema(type=openapi.TYPE_STRING,
title=_("language"),
description=_("language"))
}
)
@ -282,24 +285,24 @@ class ApplicationApi(ApiMixin):
properties={
'prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_("Prompt word"),
description=_("Prompt word"),
default=_("""
Known information:
{data}
Answer requirements:
- If you don't know the answer or don't get the answer, please answer "No relevant information found in the knowledge base, it is recommended to consult relevant technical support or refer to official documents for operation".
- Avoid mentioning that you got the knowledge from <data></data>.
- Please keep the answer consistent with the description in <data></data>.
- Please use markdown syntax to optimize the format of the answer.
- Please return the image link, link address and script language in <data></data> completely.
- Please answer in the same language as the question.
Question:
{question}
""")),
default=_(("Known information:\n"
"{data}\n"
"Answer requirements:\n"
"- If you don't know the answer or don't get the answer, please answer \"No relevant information found in the knowledge base, it is recommended to consult relevant technical support or refer to official documents for operation\".\n"
"- Avoid mentioning that you got the knowledge from <data></data>.\n"
"- Please keep the answer consistent with the description in <data></data>.\n"
"- Please use markdown syntax to optimize the format of the answer.\n"
"- Please return the image link, link address and script language in <data></data> completely.\n"
"- Please answer in the same language as the question.\n"
"Question:\n"
"{question}"))),
'system': openapi.Schema(type=openapi.TYPE_STRING, title=_("System prompt words (role)"),
description=_("System prompt words (role)")),
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_("No citation segmentation prompt"),
default="{question}", description=_("No citation segmentation prompt"))
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING,
title=_("No citation segmentation prompt"),
default="{question}",
description=_("No citation segmentation prompt"))
}
)
@ -323,22 +326,29 @@ Question:
required=['name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"), description=_("Application Name")),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"), description=_("Application Description")),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"), description=_("Model id")),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of multi-round conversations"),
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
description=_("Application Name")),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
description=_("Application Description")),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
description=_("Model id")),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
title=_("Number of multi-round conversations"),
description=_("Number of multi-round conversations")),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"), description=_("Opening remarks")),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
description=_("Opening remarks")),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
title=_("List of associated knowledge base IDs"), description=_("List of associated knowledge base IDs")),
title=_("List of associated knowledge base IDs"),
description=_("List of associated knowledge base IDs")),
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
description=_("Problem Optimization"), default=True),
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
description=_("Application Type SIMPLE | WORK_FLOW")),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title=_('Question optimization tips'),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
title=_('Question optimization tips'),
description=_("Question optimization tips"),
default=_(
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),

View File

@ -7,11 +7,11 @@
@desc:
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
class AppModelMixin(models.Model):
create_time = models.DateTimeField(verbose_name=_('Create time'), auto_now_add=True)
update_time = models.DateTimeField(verbose_name=_('Update time'), auto_now=True)
create_time = models.DateTimeField(verbose_name="创建时间", auto_now_add=True)
update_time = models.DateTimeField(verbose_name="修改时间", auto_now=True)
class Meta:
abstract = True

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -48,9 +48,7 @@ model_info_list = [ModelInfo('gte-rerank',
_('CosyVoice is based on a new generation of large generative speech models, which can predict emotions, intonation, rhythm, etc. based on context, and has better anthropomorphic effects.'),
ModelTypeConst.TTS, aliyun_bai_lian_tts_model_credential, AliyunBaiLianTextToSpeech),
ModelInfo('text-embedding-v1',
_('''
Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data.
'''),
_("Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data."),
ModelTypeConst.EMBEDDING, aliyun_bai_lian_embedding_model_credential,
AliyunBaiLianEmbedding),
ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,

View File

@ -13,6 +13,7 @@ from setting.models_provider.impl.aws_bedrock_model_provider.model.llm import Be
from smartdoc.conf import PROJECT_DIR
from django.utils.translation import gettext_lazy as _
def _create_model_info(model_name, description, model_type, credential_class, model_class):
return ModelInfo(
name=model_name,
@ -46,18 +47,14 @@ def _initialize_model_info():
),
_create_model_info(
'anthropic.claude-3-haiku-20240307-v1:0',
_('''
The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-instant responsiveness. The model can answer simple queries and requests quickly. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text output, and provides 200K context windows.
'''),
_("The Claude 3 Haiku is Anthropic's fastest and most compact model, with near-instant responsiveness. The model can answer simple queries and requests quickly. Customers will be able to build seamless AI experiences that mimic human interactions. Claude 3 Haiku can process images and return text output, and provides 200K context windows."),
ModelTypeConst.LLM,
BedrockLLMModelCredential,
BedrockModel
),
_create_model_info(
'anthropic.claude-3-sonnet-20240229-v1:0',
_('''
The Claude 3 Sonnet model from Anthropic strikes the ideal balance between intelligence and speed, especially when it comes to handling enterprise workloads. This model offers maximum utility while being priced lower than competing products, and it's been engineered to be a solid choice for deploying AI at scale.
'''),
_("The Claude 3 Sonnet model from Anthropic strikes the ideal balance between intelligence and speed, especially when it comes to handling enterprise workloads. This model offers maximum utility while being priced lower than competing products, and it's been engineered to be a solid choice for deploying AI at scale."),
ModelTypeConst.LLM,
BedrockLLMModelCredential,
BedrockModel
@ -78,9 +75,7 @@ The Claude 3 Sonnet model from Anthropic strikes the ideal balance between intel
),
_create_model_info(
'amazon.titan-text-premier-v1:0',
_('''
Titan Text Premier is the most powerful and advanced model in the Titan Text series, designed to deliver exceptional performance for a variety of enterprise applications. With its cutting-edge features, it delivers greater accuracy and outstanding results, making it an excellent choice for organizations looking for a top-notch text processing solution.
'''),
_("Titan Text Premier is the most powerful and advanced model in the Titan Text series, designed to deliver exceptional performance for a variety of enterprise applications. With its cutting-edge features, it delivers greater accuracy and outstanding results, making it an excellent choice for organizations looking for a top-notch text processing solution."),
ModelTypeConst.LLM,
BedrockLLMModelCredential,
BedrockModel

View File

@ -54,49 +54,36 @@ model_info_list = [
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:0.5b',
_('''
Compared with previous versions, qwen 1.5 0.5b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 500 million parameters.
'''),
_("Compared with previous versions, qwen 1.5 0.5b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 500 million parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:1.8b',
_('''
Compared with previous versions, qwen 1.5 1.8b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 1.8 billion parameters.
'''),
_("Compared with previous versions, qwen 1.5 1.8b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 1.8 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:4b',
_('''
Compared with previous versions, qwen 1.5 4b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 4 billion parameters.
'''),
_("Compared with previous versions, qwen 1.5 4b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 4 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:7b',
_('''
Compared with previous versions, qwen 1.5 7b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 7 billion parameters.
'''),
_("Compared with previous versions, qwen 1.5 7b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 7 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:14b',
_('''Compared with previous versions, qwen 1.5 14b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 14 billion parameters.'''),
_("Compared with previous versions, qwen 1.5 14b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 14 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:32b',
_('''Compared with previous versions, qwen 1.5 32b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 32 billion parameters.'''),
_("Compared with previous versions, qwen 1.5 32b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 32 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:72b',
_('''
Compared with previous versions, qwen 1.5 72b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 72 billion parameters.'''),
_("Compared with previous versions, qwen 1.5 72b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 72 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen:110b',
_('''
Compared with previous versions, qwen 1.5 110b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 110 billion parameters.
'''),
_("Compared with previous versions, qwen 1.5 110b has significantly enhanced the model's alignment with human preferences and its multi-language processing capabilities. Models of all sizes support a context length of 32768 tokens. 110 billion parameters."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'qwen2:72b-instruct',
@ -140,9 +127,7 @@ Compared with previous versions, qwen 1.5 72b has significantly enhanced the mod
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
ModelInfo(
'phi3',
_('''
Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open model.
'''),
_("Phi-3 Mini is Microsoft's 3.8B parameter, lightweight, state-of-the-art open model."),
ModelTypeConst.LLM, ollama_llm_model_credential, OllamaChatModel),
]
ollama_embedding_model_credential = OllamaEmbeddingModelCredential()

View File

@ -54,25 +54,19 @@ def _initialize_model_info():
TencentModel),
_create_model_info(
'hunyuan-role',
_('''
Hunyuan's latest version of the role-playing model, a role-playing model launched by Hunyuan's official fine-tuning training, is based on the Hunyuan model combined with the role-playing scene data set for additional training, and has better basic effects in role-playing scenes.
'''),
_("Hunyuan's latest version of the role-playing model, a role-playing model launched by Hunyuan's official fine-tuning training, is based on the Hunyuan model combined with the role-playing scene data set for additional training, and has better basic effects in role-playing scenes."),
ModelTypeConst.LLM,
TencentLLMModelCredential,
TencentModel),
_create_model_info(
'hunyuan-functioncall',
_('''
Hunyuan's latest MOE architecture FunctionCall model has been trained with high-quality FunctionCall data and has a context window of 32K, leading in multiple dimensions of evaluation indicators.
'''),
_("Hunyuan's latest MOE architecture FunctionCall model has been trained with high-quality FunctionCall data and has a context window of 32K, leading in multiple dimensions of evaluation indicators."),
ModelTypeConst.LLM,
TencentLLMModelCredential,
TencentModel),
_create_model_info(
'hunyuan-code',
_('''
Hunyuan's latest code generation model, after training the base model with 200B high-quality code data, and iterating on high-quality SFT data for half a year, the context long window length has been increased to 8K, and it ranks among the top in the automatic evaluation indicators of code generation in the five major languages; the five major languages In the manual high-quality evaluation of 10 comprehensive code tasks that consider all aspects, the performance is in the first echelon.
'''),
_("Hunyuan's latest code generation model, after training the base model with 200B high-quality code data, and iterating on high-quality SFT data for half a year, the context long window length has been increased to 8K, and it ranks among the top in the automatic evaluation indicators of code generation in the five major languages; the five major languages In the manual high-quality evaluation of 10 comprehensive code tasks that consider all aspects, the performance is in the first echelon."),
ModelTypeConst.LLM,
TencentLLMModelCredential,
TencentModel),
@ -80,9 +74,7 @@ def _initialize_model_info():
tencent_embedding_model_info = _create_model_info(
'hunyuan-embedding',
_('''
Tencent's Hunyuan Embedding interface can convert text into high-quality vector data. The vector dimension is 1024 dimensions.
'''),
_("Tencent's Hunyuan Embedding interface can convert text into high-quality vector data. The vector dimension is 1024 dimensions."),
ModelTypeConst.EMBEDDING,
TencentEmbeddingCredential,
TencentEmbeddingModel

View File

@ -82,7 +82,7 @@ class Config(dict):
"DB_PASSWORD": "Password123@postgres",
"DB_ENGINE": "dj_db_conn_pool.backends.postgresql",
"DB_MAX_OVERFLOW": 80,
'LANGUAGE_CODE': 'en',
'LANGUAGE_CODE': 'zh-CN',
# 向量模型
"EMBEDDING_MODEL_NAME": "shibing624/text2vec-base-chinese",
"EMBEDDING_DEVICE": "cpu",
@ -117,6 +117,9 @@ class Config(dict):
}
}
def get_language_code(self):
return self.get('LANGUAGE_CODE', 'zh-CN')
def __init__(self, *args):
super().__init__(*args)

View File

@ -0,0 +1,19 @@
# Generated by Django 4.2.15 on 2025-01-20 03:19
from django.db import migrations, models
import smartdoc.conf
class Migration(migrations.Migration):
dependencies = [
('users', '0004_alter_user_email'),
]
operations = [
migrations.AddField(
model_name='user',
name='language',
field=models.CharField(default=smartdoc.conf.Config.get_language_code, max_length=10, verbose_name='语言'),
),
]

View File

@ -20,6 +20,8 @@ from smartdoc.conf import PROJECT_DIR
__all__ = ["User", "password_encrypt", 'get_user_dynamics_permission']
from smartdoc.const import CONFIG
def password_encrypt(raw_password):
"""
@ -71,6 +73,7 @@ class User(AppModelMixin):
role = models.CharField(max_length=150, verbose_name="角色")
source = models.CharField(max_length=10, verbose_name="来源", default="LOCAL")
is_active = models.BooleanField(default=True)
language = models.CharField(max_length=10, verbose_name="语言", default=CONFIG.get_language_code)
create_time = models.DateTimeField(verbose_name="创建时间", auto_now_add=True, null=True)
update_time = models.DateTimeField(verbose_name="修改时间", auto_now=True, null=True)

View File

@ -268,6 +268,19 @@ class CheckCodeSerializer(ApiMixin, serializers.Serializer):
description=_('Error message')))
class SwitchLanguageSerializer(serializers.Serializer):
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(_('user id')), )
language = serializers.CharField(required=True, error_messages=ErrMessage.char(_('language')))
def switch(self):
self.is_valid(raise_exception=True)
language = self.data.get('language')
support_language_list = ['zh-CN', 'zh-Hant', 'en-US']
if not support_language_list.__contains__(language):
raise AppApiException(500, _('language only support:') + ','.join(support_language_list))
QuerySet(User).filter(id=self.data.get('user_id')).update(language=language)
class RePasswordSerializer(ApiMixin, serializers.Serializer):
email = serializers.EmailField(
required=True,
@ -405,10 +418,10 @@ class SendEmailSerializer(ApiMixin, serializers.Serializer):
# 发送邮件
send_mail(_('【Intelligent knowledge base question and answer system-{action}').format(
action=_('User registration') if state == 'register' else _('Change password')),
'',
html_message=f'{content.replace("${code}", code)}',
from_email=system_setting.meta.get('from_email'),
recipient_list=[email], fail_silently=False, connection=connection)
'',
html_message=f'{content.replace("${code}", code)}',
from_email=system_setting.meta.get('from_email'),
recipient_list=[email], fail_silently=False, connection=connection)
except Exception as e:
user_cache.delete(code_cache_key_lock)
raise AppApiException(500, f"{str(e)}" + _("Email sending failed"))
@ -442,7 +455,8 @@ class UserProfile(ApiMixin):
permission_list += [p.value for p in get_permission_list_by_role(RoleConstants[user.role])]
return {'id': user.id, 'username': user.username, 'email': user.email, 'role': user.role,
'permissions': [str(p) for p in permission_list],
'is_edit_password': user.password == 'd880e722c47a34d8e9fce789fc62389d' if user.role == 'ADMIN' else False}
'is_edit_password': user.password == 'd880e722c47a34d8e9fce789fc62389d' if user.role == 'ADMIN' else False,
'language': user.language}
@staticmethod
def get_response_body_api():
@ -455,7 +469,8 @@ class UserProfile(ApiMixin):
'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")),
'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is active"), description=_("Is active")),
"permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Permissions"), description=_("Permissions"),
"permissions": openapi.Schema(type=openapi.TYPE_ARRAY, title=_("Permissions"),
description=_("Permissions"),
items=openapi.Schema(type=openapi.TYPE_STRING))
}
)
@ -564,12 +579,15 @@ class UserInstanceSerializer(ApiMixin, serializers.ModelSerializer):
'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
'email': openapi.Schema(type=openapi.TYPE_STRING, title=_("Email"), description=_("Email")),
'phone': openapi.Schema(type=openapi.TYPE_STRING, title=_("Phone"), description=_("Phone")),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"), description=_("Is active")),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is active"),
description=_("Is active")),
'role': openapi.Schema(type=openapi.TYPE_STRING, title=_("Role"), description=_("Role")),
'source': openapi.Schema(type=openapi.TYPE_STRING, title=_("Source"), description=_("Source")),
'nick_name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Name"), description=_("Name")),
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Create time"), description=_("Create time")),
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Update time"), description=_("Update time"))
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Create time"),
description=_("Create time")),
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_("Update time"),
description=_("Update time"))
}
)
@ -581,7 +599,8 @@ class UserInstanceSerializer(ApiMixin, serializers.ModelSerializer):
required=True,
description='ID')
]
]
class UserManageSerializer(serializers.Serializer):
class Query(ApiMixin, serializers.Serializer):
@ -639,23 +658,25 @@ class UserManageSerializer(serializers.Serializer):
code=ExceptionCodeConstants.EMAIL_FORMAT_ERROR.value.code)])
username = serializers.CharField(required=True,
error_messages=ErrMessage.char(_("Username")),
max_length=20,
min_length=6,
validators=[
validators.RegexValidator(regex=re.compile("^.{6,20}$"),
message=_('Username must be 6-20 characters long'))
])
error_messages=ErrMessage.char(_("Username")),
max_length=20,
min_length=6,
validators=[
validators.RegexValidator(regex=re.compile("^.{6,20}$"),
message=_(
'Username must be 6-20 characters long'))
])
password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")),
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
, message=_("The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
validators=[validators.RegexValidator(regex=re.compile(
"^(?![a-zA-Z]+$)(?![A-Z0-9]+$)(?![A-Z_!@#$%^&*`~.()-+=]+$)(?![a-z0-9]+$)(?![a-z_!@#$%^&*`~()-+=]+$)"
"(?![0-9_!@#$%^&*`~()-+=]+$)[a-zA-Z0-9_!@#$%^&*`~.()-+=]{6,20}$")
, message=_(
"The password must be 6-20 characters long and must be a combination of letters, numbers, and special characters."))])
nick_name = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Name")), max_length=64,
allow_null=True, allow_blank=True)
phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20,
allow_null=True, allow_blank=True)
phone = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Phone")), max_length=20,
allow_null=True, allow_blank=True)
def is_valid(self, *, raise_exception=True):
super().is_valid(raise_exception=True)

View File

@ -6,6 +6,7 @@ app_name = "user"
urlpatterns = [
path('profile', views.Profile.as_view()),
path('user', views.User.as_view(), name="profile"),
path('user/language', views.SwitchUserLanguageView.as_view(), name='language'),
path('user/list', views.User.Query.as_view()),
path('user/login', views.Login.as_view(), name='login'),
path('user/logout', views.Logout.as_view(), name='logout'),

View File

@ -22,8 +22,10 @@ from common.response import result
from smartdoc.settings import JWT_AUTH
from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \
RePasswordSerializer, \
SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer
SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer, \
SwitchLanguageSerializer
from django.utils.translation import gettext_lazy as _
user_cache = cache.caches['user_cache']
token_cache = cache.caches['token_cache']
@ -65,6 +67,27 @@ class User(APIView):
UserSerializer.Query(data={'email_or_username': request.query_params.get('email_or_username')}).list())
class SwitchUserLanguageView(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
@swagger_auto_schema(operation_summary=_("Switch Language"),
operation_id=_("Switch Language"),
request_body=openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['language'],
properties={
'language': openapi.Schema(type=openapi.TYPE_STRING, title=_("language"),
description=_("language")),
}
),
responses=RePasswordSerializer().get_response_body_api(),
tags=[_("User")])
def post(self, request: Request):
data = {**request.data, 'user_id': request.user.id}
return result.success(SwitchLanguageSerializer(data=data).switch())
class ResetCurrentUserPasswordView(APIView):
authentication_classes = [TokenAuth]
@ -75,8 +98,10 @@ class ResetCurrentUserPasswordView(APIView):
type=openapi.TYPE_OBJECT,
required=['email', 'code', "password", 're_password'],
properties={
'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"), description=_("Verification code")),
'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")),
'code': openapi.Schema(type=openapi.TYPE_STRING, title=_("Verification code"),
description=_("Verification code")),
'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"),
description=_("Password")),
're_password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"),
description=_("Password"))
}