feat: 简易应用页面参数优化 (#1182)

This commit is contained in:
shaohuzhang1 2024-09-14 21:48:45 +08:00 committed by GitHub
parent 0b64e7a56c
commit b0f443f3d2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
16 changed files with 306 additions and 150 deletions

View File

@ -37,6 +37,8 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
"最大携带知识库段落长度"))
# 模板
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
system = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("系统提示词(角色)"))
# 补齐问题
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("补齐问题"))
# 未查询到引用分段
@ -59,6 +61,7 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
prompt: str,
padding_problem_text: str = None,
no_references_setting=None,
system=None,
**kwargs) -> List[BaseMessage]:
"""
@ -71,6 +74,7 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
:param padding_problem_text 用户修改文本
:param kwargs: 其他参数
:param no_references_setting: 无引用分段设置
:param system 系统提示称
:return:
"""
pass

View File

@ -9,6 +9,7 @@
from typing import List, Dict
from langchain.schema import BaseMessage, HumanMessage
from langchain_core.messages import SystemMessage
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
from application.chat_pipeline.step.generate_human_message_step.i_generate_human_message_step import \
@ -27,6 +28,7 @@ class BaseGenerateHumanMessageStep(IGenerateHumanMessageStep):
prompt: str,
padding_problem_text: str = None,
no_references_setting=None,
system=None,
**kwargs) -> List[BaseMessage]:
prompt = prompt if (paragraph_list is not None and len(paragraph_list) > 0) else no_references_setting.get(
'value')
@ -35,6 +37,11 @@ class BaseGenerateHumanMessageStep(IGenerateHumanMessageStep):
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))]
if system is not None and len(system) > 0:
return [SystemMessage(system), *flat_map(history_message),
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list,
no_references_setting)]
return [*flat_map(history_message),
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list,
no_references_setting)]

View File

@ -29,6 +29,8 @@ class IResetProblemStep(IBaseChatPipelineStep):
error_messages=ErrMessage.list("历史对答"))
# 大语言模型
chat_model = ModelField(required=False, allow_null=True, error_messages=ErrMessage.base("大语言模型"))
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
error_messages=ErrMessage.char("问题补全提示词"))
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
return self.InstanceSerializer
@ -47,5 +49,6 @@ class IResetProblemStep(IBaseChatPipelineStep):
@abstractmethod
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None,
problem_optimization_prompt=None,
**kwargs):
pass

View File

@ -21,6 +21,7 @@ prompt = (
class BaseResetProblemStep(IResetProblemStep):
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None,
problem_optimization_prompt=None,
**kwargs) -> str:
if chat_model is None:
self.context['message_tokens'] = 0
@ -30,8 +31,9 @@ class BaseResetProblemStep(IResetProblemStep):
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))]
reset_prompt = problem_optimization_prompt if problem_optimization_prompt else prompt
message_list = [*flat_map(history_message),
HumanMessage(content=prompt.format(**{'question': problem_text}))]
HumanMessage(content=reset_prompt.replace('{question}', problem_text))]
response = chat_model.invoke(message_list)
padding_problem = problem_text
if response.content.__contains__("<data>") and response.content.__contains__('</data>'):
@ -39,6 +41,9 @@ class BaseResetProblemStep(IResetProblemStep):
response.content.index('<data>') + 6:response.content.index('</data>')]
if padding_problem_data is not None and len(padding_problem_data.strip()) > 0:
padding_problem = padding_problem_data
elif len(response.content) > 0:
padding_problem = response.content
try:
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(padding_problem)

View File

@ -0,0 +1,18 @@
# Generated by Django 4.2.15 on 2024-09-13 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0013_application_tts_type'),
]
operations = [
migrations.AddField(
model_name='application',
name='problem_optimization_prompt',
field=models.CharField(blank=True, default='()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中', max_length=102400, null=True, verbose_name='问题优化提示词'),
),
]

View File

@ -35,7 +35,7 @@ def get_dataset_setting_dict():
def get_model_setting_dict():
return {'prompt': Application.get_default_model_prompt()}
return {'prompt': Application.get_default_model_prompt(), 'no_references_prompt': '{question}'}
class Application(AppModelMixin):
@ -54,8 +54,13 @@ class Application(AppModelMixin):
work_flow = models.JSONField(verbose_name="工作流数据", default=dict)
type = models.CharField(verbose_name="应用类型", choices=ApplicationTypeChoices.choices,
default=ApplicationTypeChoices.SIMPLE, max_length=256)
tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True)
stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True)
problem_optimization_prompt = models.CharField(verbose_name="问题优化提示词", max_length=102400, blank=True,
null=True,
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中")
tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False,
blank=True, null=True)
stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False,
blank=True, null=True)
tts_model_enable = models.BooleanField(verbose_name="语音合成模型是否启用", default=False)
stt_model_enable = models.BooleanField(verbose_name="语音识别模型是否启用", default=False)
tts_type = models.CharField(verbose_name="语音播放类型", max_length=20, default="BROWSER")

View File

@ -120,7 +120,12 @@ class DatasetSettingSerializer(serializers.Serializer):
class ModelSettingSerializer(serializers.Serializer):
prompt = serializers.CharField(required=True, max_length=2048, error_messages=ErrMessage.char("提示词"))
prompt = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
error_messages=ErrMessage.char("提示词"))
system = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
error_messages=ErrMessage.char("角色提示词"))
no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("无引用分段提示词"))
class ApplicationWorkflowSerializer(serializers.Serializer):
@ -174,7 +179,7 @@ class ApplicationSerializer(serializers.Serializer):
error_messages=ErrMessage.char("应用描述"))
model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("模型"))
multiple_rounds_dialogue = serializers.BooleanField(required=True, error_messages=ErrMessage.char("多轮对话"))
dialogue_number = serializers.BooleanField(required=True, error_messages=ErrMessage.char("会话次数"))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char("开场白"))
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
@ -185,6 +190,8 @@ class ApplicationSerializer(serializers.Serializer):
model_setting = ModelSettingSerializer(required=True)
# 问题补全
problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全"))
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
error_messages=ErrMessage.char("问题补全提示词"))
# 应用类型
type = serializers.CharField(required=True, error_messages=ErrMessage.char("应用类型"),
validators=[
@ -364,8 +371,8 @@ class ApplicationSerializer(serializers.Serializer):
error_messages=ErrMessage.char("应用描述"))
model_id = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char("模型"))
multiple_rounds_dialogue = serializers.BooleanField(required=False,
error_messages=ErrMessage.boolean("多轮会话"))
dialogue_number = serializers.IntegerField(required=False,
error_messages=ErrMessage.boolean("多轮会话"))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char("开场白"))
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
@ -430,13 +437,14 @@ class ApplicationSerializer(serializers.Serializer):
def to_application_model(user_id: str, application: Dict):
return Application(id=uuid.uuid1(), name=application.get('name'), desc=application.get('desc'),
prologue=application.get('prologue'),
dialogue_number=3 if application.get('multiple_rounds_dialogue') else 0,
dialogue_number=application.get('dialogue_number', 0),
user_id=user_id, model_id=application.get('model_id'),
dataset_setting=application.get('dataset_setting'),
model_setting=application.get('model_setting'),
problem_optimization=application.get('problem_optimization'),
type=ApplicationTypeChoices.SIMPLE,
model_params_setting=application.get('model_params_setting', {}),
problem_optimization_prompt=application.get('problem_optimization_prompt', None),
work_flow={}
)

View File

@ -60,6 +60,17 @@ class ChatInfo:
self.chat_record_list: List[ChatRecord] = []
self.work_flow_version = work_flow_version
@staticmethod
def get_no_references_setting(dataset_setting, model_setting):
no_references_setting = dataset_setting.get(
'no_references_setting', {
'status': 'ai_questioning',
'value': '{question}'})
if no_references_setting.get('status') == 'ai_questioning':
no_references_prompt = model_setting.get('no_references_prompt', '{question}')
no_references_setting['value'] = no_references_prompt if len(no_references_prompt) > 0 else "{question}"
return no_references_setting
def to_base_pipeline_manage_params(self):
dataset_setting = self.application.dataset_setting
model_setting = self.application.model_setting
@ -80,8 +91,13 @@ class ChatInfo:
'history_chat_record': self.chat_record_list,
'chat_id': self.chat_id,
'dialogue_number': self.application.dialogue_number,
'problem_optimization_prompt': self.application.problem_optimization_prompt if self.application.problem_optimization_prompt is not None and len(
self.application.problem_optimization_prompt) > 0 else '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
'prompt': model_setting.get(
'prompt') if 'prompt' in model_setting else Application.get_default_model_prompt(),
'prompt') if 'prompt' in model_setting and len(model_setting.get(
'prompt')) > 0 else Application.get_default_model_prompt(),
'system': model_setting.get(
'system', None),
'model_id': model_id,
'problem_optimization': self.application.problem_optimization,
'stream': True,
@ -89,11 +105,7 @@ class ChatInfo:
self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting,
'search_mode': self.application.dataset_setting.get(
'search_mode') if 'search_mode' in self.application.dataset_setting else 'embedding',
'no_references_setting': self.application.dataset_setting.get(
'no_references_setting') if 'no_references_setting' in self.application.dataset_setting else {
'status': 'ai_questioning',
'value': '{question}',
},
'no_references_setting': self.get_no_references_setting(self.application.dataset_setting, model_setting),
'user_id': self.application.user_id
}

View File

@ -40,15 +40,15 @@ class ApplicationApi(ApiMixin):
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['id', 'name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'user_id', 'status', 'create_time',
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'user_id', 'status', 'create_time',
'update_time'],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"),
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
"multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
description="是否开启多轮对话"),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
description="多轮对话次数"),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
title="示例列表", description="示例列表"),
@ -164,8 +164,8 @@ class ApplicationApi(ApiMixin):
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
"multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
description="是否开启多轮对话"),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
description="多轮对话次数"),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
@ -176,7 +176,22 @@ class ApplicationApi(ApiMixin):
description="是否开启问题优化", default=True),
'icon': openapi.Schema(type=openapi.TYPE_STRING, title="icon",
description="icon", default="/ui/favicon.ico"),
'work_flow': ApplicationApi.WorkFlow.get_request_body_api()
'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型",
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"),
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词',
description="问题优化提示词",
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中"),
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID",
description="文字转语音模型ID"),
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id",
description="语音转文字模型id"),
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型",
description="文字转语音类型")
}
)
@ -248,6 +263,11 @@ class ApplicationApi(ApiMixin):
'\n问题:'
'\n{question}')),
'system': openapi.Schema(type=openapi.TYPE_STRING, title="系统提示词(角色)",
description="系统提示词(角色)"),
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING, title="无引用分段提示词",
default="{question}", description="无引用分段提示词")
}
)
@ -267,14 +287,14 @@ class ApplicationApi(ApiMixin):
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'dataset_setting', 'model_setting',
'problem_optimization'],
required=['name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
"multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
description="是否开启多轮对话"),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
description="多轮对话次数"),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
@ -284,8 +304,20 @@ class ApplicationApi(ApiMixin):
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化",
description="是否开启问题优化", default=True),
'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型",
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW")
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词',
description="问题优化提示词",
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中"),
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID",
description="文字转语音模型ID"),
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id",
description="语音转文字模型id"),
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型",
description="文字转语音类型")
}
)

View File

@ -4,12 +4,13 @@ interface ApplicationFormType {
name?: string
desc?: string
model_id?: string
multiple_rounds_dialogue?: boolean
dialogue_number?: number
prologue?: string
dataset_id_list?: string[]
dataset_setting?: any
model_setting?: any
problem_optimization?: boolean
problem_optimization_prompt?: string
icon?: string | undefined
type?: string
work_flow?: any

View File

@ -104,8 +104,10 @@ export default {
}
},
prompt: {
defaultPrompt:
'已知信息:\n{data}\n回答要求\n- 请使用简洁且专业的语言来回答用户的问题。\n- 如果你不知道答案,请回答“没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作”。\n- 避免提及你是从已知信息中获得的知识。\n- 请保证答案与已知信息中描述的一致。\n- 请使用 Markdown 语法优化答案的格式。\n- 已知信息中的图片、链接地址和脚本语言请直接返回。\n- 请使用与问题相同的语言来回答。\n问题\n{question}',
defaultPrompt: `已知信息:{data}
{question}
- 使`,
defaultPrologue:
'您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?'
}

View File

@ -61,10 +61,7 @@
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.aiModel.label')"
prop="model_id"
>
<el-form-item :label="$t('views.application.applicationForm.form.aiModel.label')">
<template #label>
<div class="flex-between">
<span>{{ $t('views.application.applicationForm.form.aiModel.label') }}</span>
@ -151,47 +148,51 @@
</template>
</el-select>
</el-form-item>
<el-form-item label="角色设定">
<el-input
v-model="applicationForm.model_setting.system"
:rows="6"
type="textarea"
maxlength="2048"
placeholder="你是 xxx 小助手"
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.prompt.label')"
prop="model_setting.prompt"
prop="model_setting.no_references_prompt"
:rules="{
required: applicationForm.model_id,
message: '请输入提示词',
trigger: 'blur'
}"
>
<template #label>
<div class="flex align-center">
<div class="flex-between mr-4">
<span
>{{ $t('views.application.applicationForm.form.prompt.label') }}
<span class="danger">*</span></span
>
(无引用知识库)
<span class="danger" v-if="applicationForm.model_id">*</span>
</span>
</div>
<el-tooltip effect="dark" placement="right">
<template #content
>{{
$t('views.application.applicationForm.form.prompt.tooltip', {
data: '{data}',
question: '{question}'
})
}}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-input
v-model="applicationForm.model_setting.prompt"
v-model="applicationForm.model_setting.no_references_prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
placeholder="{question}"
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.multipleRoundsDialogue')"
@click.prevent
>
<el-switch
size="small"
v-model="applicationForm.multiple_rounds_dialogue"
></el-switch>
<el-form-item label="历史聊天记录" @click.prevent>
<el-input-number
v-model="applicationForm.dialogue_number"
:min="0"
:value-on-clear="0"
controls-position="right"
class="w-full"
/>
</el-form-item>
<el-form-item
label="$t('views.application.applicationForm.form.relatedKnowledgeBase')"
@ -260,6 +261,34 @@
</el-row>
</div>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.prompt.label')"
prop="model_setting.prompt"
:rules="{
required: applicationForm.model_id,
message: '请输入提示词',
trigger: 'blur'
}"
>
<template #label>
<div class="flex align-center">
<div class="flex-between mr-4">
<span>
{{ $t('views.application.applicationForm.form.prompt.label') }}
(引用知识库)
<span class="danger" v-if="applicationForm.model_id">*</span>
</span>
</div>
</div>
</template>
<el-input
v-model="applicationForm.model_setting.prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
/>
</el-form-item>
<el-form-item :label="$t('views.application.applicationForm.form.prologue')">
<MdEditor
class="prologue-md-editor"
@ -269,25 +298,7 @@
:footers="[]"
/>
</el-form-item>
<el-form-item @click.prevent>
<template #label>
<div class="flex align-center">
<span class="mr-4">{{
$t('views.application.applicationForm.form.problemOptimization.label')
}}</span>
<el-tooltip
effect="dark"
:content="
$t('views.application.applicationForm.form.problemOptimization.tooltip')
"
placement="right"
>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="applicationForm.problem_optimization"></el-switch>
</el-form-item>
<el-form-item>
<template #label>
<div class="flex-between">
@ -372,8 +383,8 @@
</div>
</template>
<el-radio-group v-model="applicationForm.tts_type">
<el-radio label="BROWSER">浏览器播放(免费)</el-radio>
<el-radio label="TTS">TTS模型</el-radio>
<el-radio value="BROWSER">浏览器播放(免费)</el-radio>
<el-radio value="TTS">TTS模型</el-radio>
</el-radio-group>
<el-select
v-if="applicationForm.tts_type === 'TTS'"
@ -541,7 +552,7 @@ const applicationForm = ref<ApplicationFormType>({
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 1,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -555,10 +566,14 @@ const applicationForm = ref<ApplicationFormType>({
}
},
model_setting: {
prompt: defaultPrompt
prompt: defaultPrompt,
system: '你是 xxx 小助手',
no_references_prompt: '{question}'
},
model_params_setting: {},
problem_optimization: false,
problem_optimization_prompt:
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
stt_model_id: '',
tts_model_id: '',
stt_model_enable: false,
@ -574,20 +589,6 @@ const rules = reactive<FormRules<ApplicationFormType>>({
message: t('views.application.applicationForm.form.appName.placeholder'),
trigger: 'blur'
}
],
model_id: [
{
required: false,
message: t('views.application.applicationForm.form.aiModel.placeholder'),
trigger: 'change'
}
],
'model_setting.prompt': [
{
required: true,
message: t('views.application.applicationForm.form.prompt.placeholder'),
trigger: 'blur'
}
]
})
const modelOptions = ref<any>(null)
@ -623,11 +624,11 @@ const openAIParamSettingDialog = () => {
}
const openParamSettingDialog = () => {
ParamSettingDialogRef.value?.open(applicationForm.value.dataset_setting)
ParamSettingDialogRef.value?.open(applicationForm.value)
}
function refreshParam(data: any) {
applicationForm.value.dataset_setting = data
applicationForm.value = { ...applicationForm.value, ...data }
}
function refreshForm(data: any) {
@ -666,6 +667,8 @@ function getDetail() {
applicationForm.value.stt_model_id = res.data.stt_model
applicationForm.value.tts_model_id = res.data.tts_model
applicationForm.value.tts_type = res.data.tts_type
applicationForm.value.model_setting.no_references_prompt =
res.data.model_setting.no_references_prompt || ''
})
}

View File

@ -104,7 +104,7 @@ const applicationForm = ref<ApplicationFormType>({
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 1,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -118,9 +118,19 @@ const applicationForm = ref<ApplicationFormType>({
}
},
model_setting: {
prompt: defaultPrompt
prompt: defaultPrompt,
system: '你是 xxx 小助手',
no_references_prompt: '{question}'
},
model_params_setting: {},
problem_optimization: false,
problem_optimization_prompt:
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
stt_model_id: '',
tts_model_id: '',
stt_model_enable: false,
tts_model_enable: false,
tts_type: 'BROWSER',
type: 'SIMPLE'
})
@ -147,7 +157,7 @@ watch(dialogVisible, (bool) => {
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 1,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -161,9 +171,18 @@ watch(dialogVisible, (bool) => {
}
},
model_setting: {
prompt: defaultPrompt
prompt: defaultPrompt,
system: '你是 xxx 小助手',
no_references_prompt: '{question}'
},
model_params_setting: {},
problem_optimization: false,
problem_optimization_prompt: '',
stt_model_id: '',
tts_model_id: '',
stt_model_enable: false,
tts_model_enable: false,
tts_type: 'BROWSER',
type: 'SIMPLE'
}
applicationFormRef.value?.clearValidate()

View File

@ -14,7 +14,11 @@
<el-form-item
:label="$t('views.application.applicationForm.dialogues.selectSearchMode')"
>
<el-radio-group v-model="form.search_mode" class="card__radio" @change="changeHandle">
<el-radio-group
v-model="form.dataset_setting.search_mode"
class="card__radio"
@change="changeHandle"
>
<el-card
shadow="never"
class="mb-16"
@ -32,7 +36,7 @@
<el-card
shadow="never"
class="mb-16"
:class="form.search_mode === 'keywords' ? 'active' : ''"
:class="form.dataset_setting.search_mode === 'keywords' ? 'active' : ''"
>
<el-radio value="keywords" size="large">
<p class="mb-4">
@ -43,7 +47,10 @@
}}</el-text>
</el-radio>
</el-card>
<el-card shadow="never" :class="form.search_mode === 'blend' ? 'active' : ''">
<el-card
shadow="never"
:class="form.dataset_setting.search_mode === 'blend' ? 'active' : ''"
>
<el-radio value="blend" size="large">
<p class="mb-4">
{{ $t('views.application.applicationForm.dialogues.hybridSearch') }}
@ -69,7 +76,7 @@
</div>
</template>
<el-input-number
v-model="form.similarity"
v-model="form.dataset_setting.similarity"
:min="0"
:max="form.search_mode === 'blend' ? 2 : 1"
:precision="3"
@ -98,7 +105,7 @@
<el-form-item :label="$t('views.application.applicationForm.dialogues.maxCharacters')">
<el-slider
v-model="form.max_paragraph_char_number"
v-model="form.dataset_setting.max_paragraph_char_number"
show-input
:show-input-controls="false"
:min="500"
@ -119,34 +126,23 @@
:hide-required-asterisk="true"
>
<el-radio-group
v-model="form.no_references_setting.status"
class="radio-block mb-16"
v-model="form.dataset_setting.no_references_setting.status"
class="radio-block"
>
<div>
<el-radio value="ai_questioning">
<p>
{{ $t('views.application.applicationForm.dialogues.continueQuestioning') }}
</p>
<el-form-item
v-if="form.no_references_setting.status === 'ai_questioning'"
:label="$t('views.application.applicationForm.form.prompt.label')"
prop="ai_questioning"
>
<el-input
v-model="noReferencesform.ai_questioning"
:rows="2"
type="textarea"
maxlength="2048"
:placeholder="defaultValue['ai_questioning']"
/>
</el-form-item>
</el-radio>
</div>
<div class="mt-8">
<div>
<el-radio value="designated_answer">
<p>{{ $t('views.application.applicationForm.dialogues.provideAnswer') }}</p>
<el-form-item
v-if="form.no_references_setting.status === 'designated_answer'"
v-if="
form.dataset_setting.no_references_setting.status === 'designated_answer'
"
prop="designated_answer"
>
<el-input
@ -162,6 +158,29 @@
</el-radio-group>
</el-form>
</el-form-item>
<el-form-item @click.prevent v-if="!isWorkflowType">
<template #label>
<div class="flex align-center">
<span class="mr-4">{{
$t('views.application.applicationForm.form.problemOptimization.label')
}}</span>
</div>
</template>
<el-switch size="small" v-model="form.problem_optimization"></el-switch>
</el-form-item>
<el-form-item
v-if="form.problem_optimization"
:label="$t('views.application.applicationForm.form.prompt.label')"
>
<el-input
v-model="form.problem_optimization_prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
/>
</el-form-item>
</el-form>
</div>
</el-scrollbar>
@ -195,15 +214,21 @@ const defaultValue = {
designated_answer: t('views.application.applicationForm.dialogues.designated_answer')
}
const defaultPrompt = `()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中`
const form = ref<any>({
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: '{question}'
}
dataset_setting: {
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: '{question}'
}
},
problem_optimization: false,
problem_optimization_prompt: defaultPrompt
})
const noReferencesform = ref<any>({
@ -236,14 +261,18 @@ const isWorkflowType = ref(false)
watch(dialogVisible, (bool) => {
if (!bool) {
form.value = {
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: ''
}
dataset_setting: {
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: '{question}'
}
},
problem_optimization: false,
problem_optimization_prompt: ''
}
noReferencesform.value = {
ai_questioning: defaultValue['ai_questioning'],
@ -255,9 +284,16 @@ watch(dialogVisible, (bool) => {
const open = (data: any, type?: string) => {
isWorkflowType.value = isWorkFlow(type)
form.value = { ...form.value, ...cloneDeep(data) }
noReferencesform.value[form.value.no_references_setting.status] =
form.value.no_references_setting.value
form.value = {
dataset_setting: { ...data.dataset_setting },
problem_optimization: data.problem_optimization,
problem_optimization_prompt: data.problem_optimization_prompt
}
if (!isWorkflowType.value) {
noReferencesform.value[form.value.dataset_setting.no_references_setting.status] =
form.value.dataset_setting.no_references_setting.value
}
dialogVisible.value = true
}
@ -270,8 +306,8 @@ const submit = async (formEl: FormInstance | undefined) => {
if (!formEl) return
await formEl.validate((valid, fields) => {
if (valid) {
form.value.no_references_setting.value =
noReferencesform.value[form.value.no_references_setting.status]
form.value.dataset_setting.no_references_setting.value =
noReferencesform.value[form.value.dataset_setting.no_references_setting.status]
emit('refresh', form.value)
dialogVisible.value = false
}
@ -281,9 +317,9 @@ const submit = async (formEl: FormInstance | undefined) => {
function changeHandle(val: string) {
if (val === 'keywords') {
form.value.similarity = 0
form.value.dataset_setting.similarity = 0
} else {
form.value.similarity = 0.6
form.value.dataset_setting.similarity = 0.6
}
}

View File

@ -22,6 +22,7 @@
:gutter="8"
style="margin-bottom: 8px"
v-for="(reranker_reference, index) in form_data.reranker_reference_list"
:key="index"
>
<el-col :span="22">
<el-form-item
@ -212,7 +213,7 @@ const form = {
const providerOptions = ref<Array<Provider>>([])
const modelOptions = ref<any>(null)
const openParamSettingDialog = () => {
ParamSettingDialogRef.value?.open(form_data.value.dataset_setting, 'WORK_FLOW')
ParamSettingDialogRef.value?.open(form_data.value, 'WORK_FLOW')
}
const deleteCondition = (index: number) => {
const list = cloneDeep(props.nodeModel.properties.node_data.reranker_reference_list)
@ -242,7 +243,7 @@ const form_data = computed({
}
})
function refreshParam(data: any) {
set(props.nodeModel.properties.node_data, 'reranker_setting', data)
set(props.nodeModel.properties.node_data, 'reranker_setting', data.dataset_setting)
}
function getModel() {
if (id) {

View File

@ -159,11 +159,11 @@ const datasetList = ref<any>([])
const datasetLoading = ref(false)
function refreshParam(data: any) {
set(props.nodeModel.properties.node_data, 'dataset_setting', data)
set(props.nodeModel.properties.node_data, 'dataset_setting', data.dataset_setting)
}
const openParamSettingDialog = () => {
ParamSettingDialogRef.value?.open(form_data.value.dataset_setting, 'WORK_FLOW')
ParamSettingDialogRef.value?.open(form_data.value, 'WORK_FLOW')
}
function removeDataset(id: any) {