Merge remote-tracking branch 'origin/main'

This commit is contained in:
liqiang-fit2cloud 2024-09-18 14:56:55 +08:00
commit 47e24d87a6
68 changed files with 873 additions and 453 deletions

View File

@ -37,6 +37,8 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
"最大携带知识库段落长度"))
# 模板
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
system = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("系统提示词(角色)"))
# 补齐问题
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("补齐问题"))
# 未查询到引用分段
@ -59,6 +61,7 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
prompt: str,
padding_problem_text: str = None,
no_references_setting=None,
system=None,
**kwargs) -> List[BaseMessage]:
"""
@ -71,6 +74,7 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
:param padding_problem_text 用户修改文本
:param kwargs: 其他参数
:param no_references_setting: 无引用分段设置
:param system 系统提示称
:return:
"""
pass

View File

@ -9,6 +9,7 @@
from typing import List, Dict
from langchain.schema import BaseMessage, HumanMessage
from langchain_core.messages import SystemMessage
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
from application.chat_pipeline.step.generate_human_message_step.i_generate_human_message_step import \
@ -27,6 +28,7 @@ class BaseGenerateHumanMessageStep(IGenerateHumanMessageStep):
prompt: str,
padding_problem_text: str = None,
no_references_setting=None,
system=None,
**kwargs) -> List[BaseMessage]:
prompt = prompt if (paragraph_list is not None and len(paragraph_list) > 0) else no_references_setting.get(
'value')
@ -35,6 +37,11 @@ class BaseGenerateHumanMessageStep(IGenerateHumanMessageStep):
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))]
if system is not None and len(system) > 0:
return [SystemMessage(system), *flat_map(history_message),
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list,
no_references_setting)]
return [*flat_map(history_message),
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list,
no_references_setting)]

View File

@ -29,6 +29,8 @@ class IResetProblemStep(IBaseChatPipelineStep):
error_messages=ErrMessage.list("历史对答"))
# 大语言模型
chat_model = ModelField(required=False, allow_null=True, error_messages=ErrMessage.base("大语言模型"))
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
error_messages=ErrMessage.char("问题补全提示词"))
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
return self.InstanceSerializer
@ -47,5 +49,6 @@ class IResetProblemStep(IBaseChatPipelineStep):
@abstractmethod
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None,
problem_optimization_prompt=None,
**kwargs):
pass

View File

@ -21,6 +21,7 @@ prompt = (
class BaseResetProblemStep(IResetProblemStep):
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None,
problem_optimization_prompt=None,
**kwargs) -> str:
if chat_model is None:
self.context['message_tokens'] = 0
@ -30,8 +31,9 @@ class BaseResetProblemStep(IResetProblemStep):
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))]
reset_prompt = problem_optimization_prompt if problem_optimization_prompt else prompt
message_list = [*flat_map(history_message),
HumanMessage(content=prompt.format(**{'question': problem_text}))]
HumanMessage(content=reset_prompt.replace('{question}', problem_text))]
response = chat_model.invoke(message_list)
padding_problem = problem_text
if response.content.__contains__("<data>") and response.content.__contains__('</data>'):
@ -39,6 +41,9 @@ class BaseResetProblemStep(IResetProblemStep):
response.content.index('<data>') + 6:response.content.index('</data>')]
if padding_problem_data is not None and len(padding_problem_data.strip()) > 0:
padding_problem = padding_problem_data
elif len(response.content) > 0:
padding_problem = response.content
try:
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(padding_problem)

View File

@ -16,9 +16,6 @@ from application.flow.i_step_node import INode, NodeResult
class IStarNode(INode):
type = 'start-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer] | None:
return None
def _run(self):
return self.execute(**self.flow_params_serializer.data)

View File

@ -15,11 +15,16 @@ from application.flow.step_node.start_node.i_start_node import IStarNode
class BaseStartStepNode(IStarNode):
def execute(self, question, **kwargs) -> NodeResult:
history_chat_record = self.flow_params_serializer.data.get('history_chat_record', [])
history_context = [{'question': chat_record.problem_text, 'answer': chat_record.answer_text} for chat_record in
history_chat_record]
chat_id = self.flow_params_serializer.data.get('chat_id')
"""
开始节点 初始化全局变量
"""
return NodeResult({'question': question},
{'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'start_time': time.time()})
{'time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'start_time': time.time(),
'history_context': history_context, 'chat_id': str(chat_id)})
def get_details(self, index: int, **kwargs):
global_fields = []

View File

@ -0,0 +1,18 @@
# Generated by Django 4.2.15 on 2024-09-13 18:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('application', '0013_application_tts_type'),
]
operations = [
migrations.AddField(
model_name='application',
name='problem_optimization_prompt',
field=models.CharField(blank=True, default='()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中', max_length=102400, null=True, verbose_name='问题优化提示词'),
),
]

View File

@ -35,7 +35,7 @@ def get_dataset_setting_dict():
def get_model_setting_dict():
return {'prompt': Application.get_default_model_prompt()}
return {'prompt': Application.get_default_model_prompt(), 'no_references_prompt': '{question}'}
class Application(AppModelMixin):
@ -54,8 +54,13 @@ class Application(AppModelMixin):
work_flow = models.JSONField(verbose_name="工作流数据", default=dict)
type = models.CharField(verbose_name="应用类型", choices=ApplicationTypeChoices.choices,
default=ApplicationTypeChoices.SIMPLE, max_length=256)
tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True)
stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False, blank=True, null=True)
problem_optimization_prompt = models.CharField(verbose_name="问题优化提示词", max_length=102400, blank=True,
null=True,
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中")
tts_model = models.ForeignKey(Model, related_name='tts_model_id', on_delete=models.SET_NULL, db_constraint=False,
blank=True, null=True)
stt_model = models.ForeignKey(Model, related_name='stt_model_id', on_delete=models.SET_NULL, db_constraint=False,
blank=True, null=True)
tts_model_enable = models.BooleanField(verbose_name="语音合成模型是否启用", default=False)
stt_model_enable = models.BooleanField(verbose_name="语音识别模型是否启用", default=False)
tts_type = models.CharField(verbose_name="语音播放类型", max_length=20, default="BROWSER")

View File

@ -120,7 +120,12 @@ class DatasetSettingSerializer(serializers.Serializer):
class ModelSettingSerializer(serializers.Serializer):
prompt = serializers.CharField(required=True, max_length=2048, error_messages=ErrMessage.char("提示词"))
prompt = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
error_messages=ErrMessage.char("提示词"))
system = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
error_messages=ErrMessage.char("角色提示词"))
no_references_prompt = serializers.CharField(required=True, max_length=102400, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("无引用分段提示词"))
class ApplicationWorkflowSerializer(serializers.Serializer):
@ -174,7 +179,7 @@ class ApplicationSerializer(serializers.Serializer):
error_messages=ErrMessage.char("应用描述"))
model_id = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("模型"))
multiple_rounds_dialogue = serializers.BooleanField(required=True, error_messages=ErrMessage.char("多轮对话"))
dialogue_number = serializers.BooleanField(required=True, error_messages=ErrMessage.char("会话次数"))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char("开场白"))
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
@ -185,6 +190,8 @@ class ApplicationSerializer(serializers.Serializer):
model_setting = ModelSettingSerializer(required=True)
# 问题补全
problem_optimization = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("问题补全"))
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
error_messages=ErrMessage.char("问题补全提示词"))
# 应用类型
type = serializers.CharField(required=True, error_messages=ErrMessage.char("应用类型"),
validators=[
@ -364,8 +371,8 @@ class ApplicationSerializer(serializers.Serializer):
error_messages=ErrMessage.char("应用描述"))
model_id = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char("模型"))
multiple_rounds_dialogue = serializers.BooleanField(required=False,
error_messages=ErrMessage.boolean("多轮会话"))
dialogue_number = serializers.IntegerField(required=False,
error_messages=ErrMessage.boolean("多轮会话"))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char("开场白"))
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
@ -430,13 +437,14 @@ class ApplicationSerializer(serializers.Serializer):
def to_application_model(user_id: str, application: Dict):
return Application(id=uuid.uuid1(), name=application.get('name'), desc=application.get('desc'),
prologue=application.get('prologue'),
dialogue_number=3 if application.get('multiple_rounds_dialogue') else 0,
dialogue_number=application.get('dialogue_number', 0),
user_id=user_id, model_id=application.get('model_id'),
dataset_setting=application.get('dataset_setting'),
model_setting=application.get('model_setting'),
problem_optimization=application.get('problem_optimization'),
type=ApplicationTypeChoices.SIMPLE,
model_params_setting=application.get('model_params_setting', {}),
problem_optimization_prompt=application.get('problem_optimization_prompt', None),
work_flow={}
)
@ -601,7 +609,8 @@ class ApplicationSerializer(serializers.Serializer):
if with_valid:
self.is_valid(raise_exception=True)
application = QuerySet(Application).filter(id=self.data.get("application_id")).first()
return FunctionLibSerializer.Query(data={'user_id': application.user_id}).list(with_valid=True)
return FunctionLibSerializer.Query(data={'user_id': application.user_id, 'is_active': True}).list(
with_valid=True)
def get_function_lib(self, function_lib_id, with_valid=True):
if with_valid:

View File

@ -60,6 +60,17 @@ class ChatInfo:
self.chat_record_list: List[ChatRecord] = []
self.work_flow_version = work_flow_version
@staticmethod
def get_no_references_setting(dataset_setting, model_setting):
no_references_setting = dataset_setting.get(
'no_references_setting', {
'status': 'ai_questioning',
'value': '{question}'})
if no_references_setting.get('status') == 'ai_questioning':
no_references_prompt = model_setting.get('no_references_prompt', '{question}')
no_references_setting['value'] = no_references_prompt if len(no_references_prompt) > 0 else "{question}"
return no_references_setting
def to_base_pipeline_manage_params(self):
dataset_setting = self.application.dataset_setting
model_setting = self.application.model_setting
@ -80,8 +91,13 @@ class ChatInfo:
'history_chat_record': self.chat_record_list,
'chat_id': self.chat_id,
'dialogue_number': self.application.dialogue_number,
'problem_optimization_prompt': self.application.problem_optimization_prompt if self.application.problem_optimization_prompt is not None and len(
self.application.problem_optimization_prompt) > 0 else '()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
'prompt': model_setting.get(
'prompt') if 'prompt' in model_setting else Application.get_default_model_prompt(),
'prompt') if 'prompt' in model_setting and len(model_setting.get(
'prompt')) > 0 else Application.get_default_model_prompt(),
'system': model_setting.get(
'system', None),
'model_id': model_id,
'problem_optimization': self.application.problem_optimization,
'stream': True,
@ -89,11 +105,7 @@ class ChatInfo:
self.application.model_params_setting.keys()) == 0 else self.application.model_params_setting,
'search_mode': self.application.dataset_setting.get(
'search_mode') if 'search_mode' in self.application.dataset_setting else 'embedding',
'no_references_setting': self.application.dataset_setting.get(
'no_references_setting') if 'no_references_setting' in self.application.dataset_setting else {
'status': 'ai_questioning',
'value': '{question}',
},
'no_references_setting': self.get_no_references_setting(self.application.dataset_setting, model_setting),
'user_id': self.application.user_id
}

View File

@ -40,15 +40,15 @@ class ApplicationApi(ApiMixin):
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['id', 'name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'user_id', 'status', 'create_time',
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'user_id', 'status', 'create_time',
'update_time'],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description="主键id"),
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
"multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
description="是否开启多轮对话"),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
description="多轮对话次数"),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
'example': openapi.Schema(type=openapi.TYPE_ARRAY, items=openapi.Schema(type=openapi.TYPE_STRING),
title="示例列表", description="示例列表"),
@ -164,8 +164,8 @@ class ApplicationApi(ApiMixin):
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
"multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
description="是否开启多轮对话"),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
description="多轮对话次数"),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
@ -176,7 +176,22 @@ class ApplicationApi(ApiMixin):
description="是否开启问题优化", default=True),
'icon': openapi.Schema(type=openapi.TYPE_STRING, title="icon",
description="icon", default="/ui/favicon.ico"),
'work_flow': ApplicationApi.WorkFlow.get_request_body_api()
'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型",
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"),
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词',
description="问题优化提示词",
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中"),
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID",
description="文字转语音模型ID"),
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id",
description="语音转文字模型id"),
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型",
description="文字转语音类型")
}
)
@ -248,6 +263,11 @@ class ApplicationApi(ApiMixin):
'\n问题:'
'\n{question}')),
'system': openapi.Schema(type=openapi.TYPE_STRING, title="系统提示词(角色)",
description="系统提示词(角色)"),
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING, title="无引用分段提示词",
default="{question}", description="无引用分段提示词")
}
)
@ -267,14 +287,14 @@ class ApplicationApi(ApiMixin):
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['name', 'desc', 'model_id', 'multiple_rounds_dialogue', 'dataset_setting', 'model_setting',
'problem_optimization'],
required=['name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING, title="应用名称", description="应用名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="应用描述", description="应用描述"),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title="模型id", description="模型id"),
"multiple_rounds_dialogue": openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否开启多轮对话",
description="是否开启多轮对话"),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER, title="多轮对话次数",
description="多轮对话次数"),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title="开场白", description="开场白"),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
@ -284,8 +304,20 @@ class ApplicationApi(ApiMixin):
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="问题优化",
description="是否开启问题优化", default=True),
'type': openapi.Schema(type=openapi.TYPE_STRING, title="应用类型",
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW")
description="应用类型 简易:SIMPLE|工作流:WORK_FLOW"),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING, title='问题优化提示词',
description="问题优化提示词",
default="()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中"),
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音模型ID",
description="文字转语音模型ID"),
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字模型id",
description="语音转文字模型id"),
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title="语音转文字是否开启",
description="语音转文字是否开启"),
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title="文字转语音类型",
description="文字转语音类型")
}
)

View File

@ -19,26 +19,41 @@ class XlsSplitHandle(BaseParseTableHandle):
def handle(self, file, get_buffer, save_image):
buffer = get_buffer(file)
try:
wb = xlrd.open_workbook(file_contents=buffer)
wb = xlrd.open_workbook(file_contents=buffer, formatting_info=True)
result = []
sheets = wb.sheets()
for sheet in sheets:
# 获取合并单元格的范围信息
merged_cells = sheet.merged_cells
print(merged_cells)
data = []
paragraphs = []
rows = iter([sheet.row_values(i) for i in range(sheet.nrows)])
if not rows: continue
ti = next(rows)
for r in rows:
l = []
for i, c in enumerate(r):
if not c:
continue
t = str(ti[i]) if i < len(ti) else ""
t += (": " if t else "") + str(c)
l.append(t)
l = "; ".join(l)
if sheet.name.lower().find("sheet") < 0:
l += " ——" + sheet.name
paragraphs.append({'title': '', 'content': l})
# 获取第一行作为标题行
headers = [sheet.cell_value(0, col_idx) for col_idx in range(sheet.ncols)]
# 从第二行开始遍历每一行(跳过标题行)
for row_idx in range(1, sheet.nrows):
row_data = {}
for col_idx in range(sheet.ncols):
cell_value = sheet.cell_value(row_idx, col_idx)
# 检查是否为空单元格,如果为空检查是否在合并区域中
if cell_value == "":
# 检查当前单元格是否在合并区域
for (rlo, rhi, clo, chi) in merged_cells:
if rlo <= row_idx < rhi and clo <= col_idx < chi:
# 使用合并区域的左上角单元格的值
cell_value = sheet.cell_value(rlo, clo)
break
# 将标题作为键,单元格的值作为值存入字典
row_data[headers[col_idx]] = cell_value
data.append(row_data)
for row in data:
row_output = "; ".join([f"{key}: {value}" for key, value in row.items()])
# print(row_output)
paragraphs.append({'title': '', 'content': row_output})
result.append({'name': sheet.name, 'paragraphs': paragraphs})
except BaseException as e:

View File

@ -17,6 +17,35 @@ class XlsxSplitHandle(BaseParseTableHandle):
return True
return False
def fill_merged_cells(self, sheet, image_dict):
data = []
# 获取第一行作为标题行
headers = [cell.value for cell in sheet[1]]
# 从第二行开始遍历每一行
for row in sheet.iter_rows(min_row=2, values_only=False):
row_data = {}
for col_idx, cell in enumerate(row):
cell_value = cell.value
# 如果单元格为空,并且该单元格在合并单元格内,获取合并单元格的值
if cell_value is None:
for merged_range in sheet.merged_cells.ranges:
if cell.coordinate in merged_range:
cell_value = sheet[merged_range.min_row][merged_range.min_col - 1].value
break
image = image_dict.get(cell_value, None)
if image is not None:
cell_value = f'![](/api/image/{image.id})'
# 使用标题作为键,单元格的值作为值存入字典
row_data[headers[col_idx]] = cell_value
data.append(row_data)
return data
def handle(self, file, get_buffer, save_image):
buffer = get_buffer(file)
try:
@ -30,25 +59,13 @@ class XlsxSplitHandle(BaseParseTableHandle):
for sheetname in wb.sheetnames:
paragraphs = []
ws = wb[sheetname]
rows = list(ws.rows)
if not rows: continue
ti = list(rows[0])
for r in list(rows[1:]):
l = []
for i, c in enumerate(r):
if not c.value:
continue
t = str(ti[i].value) if i < len(ti) else ""
content = str(c.value)
image = image_dict.get(content, None)
if image is not None:
content = f'![](/api/image/{image.id})'
t += (": " if t else "") + content
l.append(t)
l = "; ".join(l)
if sheetname.lower().find("sheet") < 0:
l += " ——" + sheetname
paragraphs.append({'title': '', 'content': l})
data = self.fill_merged_cells(ws, image_dict)
for row in data:
row_output = "; ".join([f"{key}: {value}" for key, value in row.items()])
# print(row_output)
paragraphs.append({'title': '', 'content': row_output})
result.append({'name': sheetname, 'paragraphs': paragraphs})
except BaseException as e:

View File

@ -23,6 +23,7 @@ urlpatterns = [
path('dataset/<str:dataset_id>/document/_bach', views.Document.Batch.as_view()),
path('dataset/<str:dataset_id>/document/batch_hit_handling', views.Document.BatchEditHitHandling.as_view()),
path('dataset/<str:dataset_id>/document/<int:current_page>/<int:page_size>', views.Document.Page.as_view()),
path('dataset/<str:dataset_id>/document/batch_refresh', views.Document.BatchRefresh.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>', views.Document.Operate.as_view(),
name="document_operate"),
path('dataset/document/split', views.Document.Split.as_view(),
@ -34,7 +35,6 @@ urlpatterns = [
name="document_export"),
path('dataset/<str:dataset_id>/document/<str:document_id>/sync', views.Document.SyncWeb.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>/refresh', views.Document.Refresh.as_view()),
path('dataset/<str:dataset_id>/document/batch_refresh', views.Document.BatchRefresh.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>/paragraph', views.Paragraph.as_view()),
path(
'dataset/<str:dataset_id>/document/<str:document_id>/paragraph/migrate/dataset/<str:target_dataset_id>/document/<str:target_document_id>',

View File

@ -239,7 +239,7 @@ class Document(APIView):
class BatchRefresh(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary="批量刷新文档向量库",
operation_id="批量刷新文档向量库",
request_body=

View File

@ -0,0 +1,23 @@
# Generated by Django 4.2.15 on 2024-09-14 11:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('function_lib', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='functionlib',
name='is_active',
field=models.BooleanField(default=True),
),
migrations.AddField(
model_name='functionlib',
name='permission_type',
field=models.CharField(choices=[('PUBLIC', '公开'), ('PRIVATE', '私有')], default='PRIVATE', max_length=20, verbose_name='权限类型'),
),
]

View File

@ -15,6 +15,11 @@ from common.mixins.app_model_mixin import AppModelMixin
from users.models import User
class PermissionType(models.TextChoices):
PUBLIC = "PUBLIC", '公开'
PRIVATE = "PRIVATE", "私有"
class FunctionLib(AppModelMixin):
id = models.UUIDField(primary_key=True, max_length=128, default=uuid.uuid1, editable=False, verbose_name="主键id")
user = models.ForeignKey(User, on_delete=models.CASCADE, verbose_name="用户id")
@ -24,6 +29,9 @@ class FunctionLib(AppModelMixin):
input_field_list = ArrayField(verbose_name="输入字段列表",
base_field=models.JSONField(verbose_name="输入字段", default=dict)
, default=list)
is_active = models.BooleanField(default=True)
permission_type = models.CharField(max_length=20, verbose_name='权限类型', choices=PermissionType.choices,
default=PermissionType.PRIVATE)
class Meta:
db_table = "function_lib"

View File

@ -11,7 +11,7 @@ import re
import uuid
from django.core import validators
from django.db.models import QuerySet
from django.db.models import QuerySet, Q
from rest_framework import serializers
from common.db.search import page_search
@ -27,7 +27,7 @@ function_executor = FunctionExecutor(CONFIG.get('SANDBOX'))
class FunctionLibModelSerializer(serializers.ModelSerializer):
class Meta:
model = FunctionLib
fields = ['id', 'name', 'desc', 'code', 'input_field_list',
fields = ['id', 'name', 'desc', 'code', 'input_field_list', 'permission_type', 'is_active',
'create_time', 'update_time']
@ -68,6 +68,8 @@ class EditFunctionLib(serializers.Serializer):
input_field_list = FunctionLibInputField(required=False, many=True)
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char('是否可用'))
class CreateFunctionLib(serializers.Serializer):
name = serializers.CharField(required=True, error_messages=ErrMessage.char("函数名称"))
@ -79,6 +81,12 @@ class CreateFunctionLib(serializers.Serializer):
input_field_list = FunctionLibInputField(required=True, many=True)
permission_type = serializers.CharField(required=True, error_messages=ErrMessage.char("权限"), validators=[
validators.RegexValidator(regex=re.compile("^PUBLIC|PRIVATE$"),
message="权限只支持PUBLIC|PRIVATE", code=500)
])
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char('是否可用'))
class FunctionLibSerializer(serializers.Serializer):
class Query(serializers.Serializer):
@ -87,15 +95,19 @@ class FunctionLibSerializer(serializers.Serializer):
desc = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char("函数描述"))
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.char("是否可用"))
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
def get_query_set(self):
query_set = QuerySet(FunctionLib).filter(user_id=self.data.get('user_id'))
query_set = QuerySet(FunctionLib).filter(
(Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC')))
if self.data.get('name') is not None:
query_set = query_set.filter(name__contains=self.data.get('name'))
if self.data.get('desc') is not None:
query_set = query_set.filter(desc__contains=self.data.get('desc'))
if self.data.get('is_active') is not None:
query_set = query_set.filter(is_active=self.data.get('is_active'))
query_set = query_set.order_by("-create_time")
return query_set
@ -120,7 +132,9 @@ class FunctionLibSerializer(serializers.Serializer):
function_lib = FunctionLib(id=uuid.uuid1(), name=instance.get('name'), desc=instance.get('desc'),
code=instance.get('code'),
user_id=self.data.get('user_id'),
input_field_list=instance.get('input_field_list'))
input_field_list=instance.get('input_field_list'),
permission_type=instance.get('permission_type'),
is_active=instance.get('is_active', True))
function_lib.save()
return FunctionLibModelSerializer(function_lib).data
@ -193,7 +207,7 @@ class FunctionLibSerializer(serializers.Serializer):
if with_valid:
self.is_valid(raise_exception=True)
EditFunctionLib(data=instance).is_valid(raise_exception=True)
edit_field_list = ['name', 'desc', 'code', 'input_field_list']
edit_field_list = ['name', 'desc', 'code', 'input_field_list', 'permission_type', 'is_active']
edit_dict = {field: instance.get(field) for field in edit_field_list if (
field in instance and instance.get(field) is not None)}
QuerySet(FunctionLib).filter(id=self.data.get('id')).update(**edit_dict)

View File

@ -103,6 +103,8 @@ class FunctionLibApi(ApiMixin):
'name': openapi.Schema(type=openapi.TYPE_STRING, title="函数名称", description="函数名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="函数描述", description="函数描述"),
'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容"),
'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title="权限", description="权限"),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"),
'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
description="输入变量列表",
items=openapi.Schema(type=openapi.TYPE_OBJECT,
@ -135,11 +137,13 @@ class FunctionLibApi(ApiMixin):
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['name', 'code', 'input_field_list'],
required=['name', 'code', 'input_field_list', 'permission_type'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING, title="函数名称", description="函数名称"),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title="函数描述", description="函数描述"),
'code': openapi.Schema(type=openapi.TYPE_STRING, title="函数内容", description="函数内容"),
'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title="权限", description="权限"),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title="是否可用", description="是否可用"),
'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
description="输入变量列表",
items=openapi.Schema(type=openapi.TYPE_OBJECT,

View File

@ -19,7 +19,7 @@ class BedrockLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class AzureLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class DeepSeekLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class GeminiLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class KimiLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -23,7 +23,7 @@ class OllamaLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class OpenAILLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -40,8 +40,6 @@ class OpenAIChatModel(MaxKBBaseModel, ChatOpenAI):
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params,
streaming=True,
stream_usage=True,
custom_get_token_ids=custom_get_token_ids
)
return azure_chat_open_ai

View File

@ -25,7 +25,7 @@ class QwenModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=2048,

View File

@ -19,7 +19,7 @@ class VLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class VolcanicEngineLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -11,6 +11,7 @@ import base64
import gzip
import hmac
import json
import os
import uuid
import wave
from enum import Enum
@ -144,6 +145,7 @@ def parse_response(res):
result['code'] = code
payload_size = int.from_bytes(payload[4:8], "big", signed=False)
payload_msg = payload[8:]
print(f"Error code: {code}, message: {payload_msg}")
if payload_msg is None:
return result
if message_compression == GZIP:
@ -321,14 +323,9 @@ class VolcanicEngineSpeechToText(MaxKBBaseModel, BaseSpeechToText):
return result['payload_msg']['result'][0]['text']
def check_auth(self):
header = self.token_auth()
async def check():
async with websockets.connect(self.volcanic_api_url, extra_headers=header, max_size=1000000000,
ssl=ssl_context) as ws:
pass
asyncio.run(check())
cwd = os.path.dirname(os.path.abspath(__file__))
with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f:
self.speech_to_text(f)
def speech_to_text(self, file):
data = file.read()

View File

@ -69,14 +69,7 @@ class VolcanicEngineTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
)
def check_auth(self):
header = self.token_auth()
async def check():
async with websockets.connect(self.volcanic_api_url, extra_headers=header, ping_interval=None,
ssl=ssl_context) as ws:
pass
asyncio.run(check())
self.text_to_speech('你好')
def text_to_speech(self, text):
request_json = {
@ -159,7 +152,7 @@ class VolcanicEngineTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
if message_compression == 1:
error_msg = gzip.decompress(error_msg)
error_msg = str(error_msg, "utf-8")
break
raise Exception(f"Error code: {code}, message: {error_msg}")
elif message_type == 0xc:
msg_size = int.from_bytes(payload[:4], "big", signed=False)
payload = payload[4:]

View File

@ -25,7 +25,7 @@ class WenxinLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=2,
_max=2048,

View File

@ -25,7 +25,7 @@ class XunFeiLLMModelGeneralParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=4096,
_min=1,
_max=4096,
@ -42,7 +42,7 @@ class XunFeiLLMModelProParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=4096,
_min=1,
_max=8192,

View File

@ -8,6 +8,8 @@ import datetime
import hashlib
import hmac
import json
import logging
import os
from datetime import datetime
from typing import Dict
from urllib.parse import urlencode, urlparse
@ -25,6 +27,7 @@ ssl_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ssl_context.check_hostname = False
ssl_context.verify_mode = ssl.CERT_NONE
max_kb = logging.getLogger("max_kb")
class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText):
spark_app_id: str
@ -89,11 +92,9 @@ class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText):
return url
def check_auth(self):
async def check():
async with websockets.connect(self.create_url(), ssl=ssl_context) as ws:
pass
asyncio.run(check())
cwd = os.path.dirname(os.path.abspath(__file__))
with open(f'{cwd}/iat_mp3_16k.mp3', 'rb') as f:
self.speech_to_text(f)
def speech_to_text(self, file):
async def handle():
@ -112,8 +113,7 @@ class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText):
sid = message["sid"]
if code != 0:
errMsg = message["message"]
print("sid:%s call error:%s code is:%s" % (sid, errMsg, code))
return errMsg
raise Exception(f"sid: {sid} call error: {errMsg} code is: {code}")
else:
data = message["data"]["result"]["ws"]
result = ""

View File

@ -10,6 +10,7 @@ import datetime
import hashlib
import hmac
import json
import logging
import os
from datetime import datetime
from typing import Dict
@ -20,6 +21,8 @@ import websockets
from setting.models_provider.base_model_provider import MaxKBBaseModel
from setting.models_provider.impl.base_tts import BaseTextToSpeech
max_kb = logging.getLogger("max_kb")
STATUS_FIRST_FRAME = 0 # 第一帧的标识
STATUS_CONTINUE_FRAME = 1 # 中间帧标识
STATUS_LAST_FRAME = 2 # 最后一帧的标识
@ -92,11 +95,7 @@ class XFSparkTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
return url
def check_auth(self):
async def check():
async with websockets.connect(self.create_url(), max_size=1000000000, ssl=ssl_context) as ws:
pass
asyncio.run(check())
self.text_to_speech("你好")
def text_to_speech(self, text):
@ -119,13 +118,13 @@ class XFSparkTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
# print(message)
code = message["code"]
sid = message["sid"]
audio = message["data"]["audio"]
audio = base64.b64decode(audio)
if code != 0:
errMsg = message["message"]
print("sid:%s call error:%s code is:%s" % (sid, errMsg, code))
raise Exception(f"sid: {sid} call error: {errMsg} code is: {code}")
else:
audio = message["data"]["audio"]
audio = base64.b64decode(audio)
audio_bytes += audio
# 退出
if message["data"]["status"] == 2:

View File

@ -19,7 +19,7 @@ class XinferenceLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class ZhiPuLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -132,8 +132,8 @@ class RegisterSerializer(ApiMixin, serializers.Serializer):
max_length=20,
min_length=6,
validators=[
validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"),
message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等")
validators.RegexValidator(regex=re.compile("^.{6,20}$"),
message="用户名字符数为 6-20 个字符")
])
password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"),
validators=[validators.RegexValidator(regex=re.compile(
@ -590,8 +590,8 @@ class UserManageSerializer(serializers.Serializer):
max_length=20,
min_length=6,
validators=[
validators.RegexValidator(regex=re.compile("^[a-zA-Z][a-zA-Z0-9_]{5,20}$"),
message="用户名字符数为 6-20 个字符,必须以字母开头,可使用字母、数字、下划线等")
validators.RegexValidator(regex=re.compile("^.{6,20}$"),
message="用户名字符数为 6-20 个字符")
])
password = serializers.CharField(required=True, error_messages=ErrMessage.char("密码"),
validators=[validators.RegexValidator(regex=re.compile(

View File

@ -11,7 +11,7 @@ django = "4.2.15"
djangorestframework = "^3.15.2"
drf-yasg = "1.21.7"
django-filter = "23.2"
langchain = "0.2.3"
langchain = "0.2.16"
langchain_community = "0.2.4"
langchain-huggingface = "^0.0.3"
psycopg2-binary = "2.9.7"

View File

@ -3,6 +3,7 @@ import { get, post, del, put, exportExcel } from '@/request/index'
import type { Ref } from 'vue'
import type { KeyValue } from '@/api/type/common'
import type { pageRequest } from '@/api/type/common'
const prefix = '/dataset'
/**
@ -26,14 +27,14 @@ const listSplitPattern: (
/**
*
* @param dataset_id,
* @param dataset_id,
* page {
"current_page": "string",
"page_size": "string",
}
* param {
"name": "string",
}
"current_page": "string",
"page_size": "string",
}
* param {
"name": "string",
}
*/
const getDocument: (
@ -58,22 +59,22 @@ const getAllDocument: (dataset_id: string, loading?: Ref<boolean>) => Promise<Re
/**
*
* @param
* @param
* {
"name": "string",
"paragraphs": [
{
"content": "string",
"title": "string",
"problem_list": [
{
"id": "string",
"content": "string"
}
]
}
]
}
"name": "string",
"paragraphs": [
{
"content": "string",
"title": "string",
"problem_list": [
{
"id": "string",
"content": "string"
}
]
}
]
}
*/
const postDocument: (
dataset_id: string,
@ -85,13 +86,13 @@ const postDocument: (
/**
*
* @param
* dataset_id, document_id,
* @param
* dataset_id, document_id,
* {
"name": "string",
"is_active": true,
"meta": {}
}
"name": "string",
"is_active": true,
"meta": {}
}
*/
const putDocument: (
dataset_id: string,
@ -124,6 +125,19 @@ const delMulDocument: (
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
return del(`${prefix}/${dataset_id}/document/_bach`, undefined, { id_list: data }, loading)
}
const batchRefresh: (
dataset_id: string,
data: any,
loading?: Ref<boolean>
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
return put(
`${prefix}/${dataset_id}/document/batch_refresh`,
{ id_list: data },
undefined,
loading
)
}
/**
*
* @param dataset_id
@ -180,14 +194,14 @@ const delMulSyncDocument: (
/**
* Web站点文档
* @param
* @param
* {
"source_url_list": [
"string"
],
"selector": "string"
"source_url_list": [
"string"
],
"selector": "string"
}
}
}
*/
const postWebDocument: (
dataset_id: string,
@ -199,9 +213,9 @@ const postWebDocument: (
/**
* QA文档
* @param
* @param
* file
}
}
*/
const postQADocument: (
dataset_id: string,
@ -323,5 +337,6 @@ export default {
exportTableTemplate,
postQADocument,
postTableDocument,
exportDocument
exportDocument,
batchRefresh
}

View File

@ -4,12 +4,13 @@ interface ApplicationFormType {
name?: string
desc?: string
model_id?: string
multiple_rounds_dialogue?: boolean
dialogue_number?: number
prologue?: string
dataset_id_list?: string[]
dataset_setting?: any
model_setting?: any
problem_optimization?: boolean
problem_optimization_prompt?: string
icon?: string | undefined
type?: string
work_flow?: any

View File

@ -1,9 +1,11 @@
interface functionLibData {
id?: String
name: String
desc: String
name?: String
desc?: String
code?: String
permission_type?: 'PRIVATE' | 'PUBLIC'
input_field_list?: Array<any>
is_active?: Boolean
}
export type { functionLibData }

View File

@ -170,7 +170,7 @@
<!-- 多路召回 -->
<template v-if="item.type == WorkflowType.RrerankerNode">
<div class="card-never border-r-4">
<h5 class="p-8-12">检索内容</h5>
<h5 class="p-8-12">重排内容</h5>
<div class="p-8-12 border-t-dashed lighter">
<template v-if="item.document_list?.length > 0">
<template
@ -194,7 +194,7 @@
</div>
</div>
<div class="card-never border-r-4 mt-8">
<h5 class="p-8-12">检索结果</h5>
<h5 class="p-8-12">重排结果</h5>
<div class="p-8-12 border-t-dashed lighter">
<template v-if="item.result_list?.length > 0">
<template
@ -203,7 +203,7 @@
>
<CardBox
shadow="never"
:title="''"
:title="`分段${paragraphIndex + 1}`"
class="paragraph-source-card cursor mb-8 paragraph-source-card-height"
:showIcon="false"
>

View File

@ -812,7 +812,7 @@ const startRecording = async () => {
mediaRecorder.value = new Recorder({
type: 'mp3',
bitRate: 128,
sampleRate: 44100
sampleRate: 16000
})
mediaRecorder.value.open(

View File

@ -104,8 +104,10 @@ export default {
}
},
prompt: {
defaultPrompt:
'已知信息:\n{data}\n回答要求\n- 请使用简洁且专业的语言来回答用户的问题。\n- 如果你不知道答案,请回答“没有在知识库中查找到相关信息,建议咨询相关技术支持或参考官方文档进行操作”。\n- 避免提及你是从已知信息中获得的知识。\n- 请保证答案与已知信息中描述的一致。\n- 请使用 Markdown 语法优化答案的格式。\n- 已知信息中的图片、链接地址和脚本语言请直接返回。\n- 请使用与问题相同的语言来回答。\n问题\n{question}',
defaultPrompt: `已知信息:{data}
{question}
- 使`,
defaultPrologue:
'您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?'
}

View File

@ -733,3 +733,13 @@ h5 {
display: none !important;
}
}
.edit-avatar {
position: relative;
.edit-mask {
position: absolute;
left: 0;
background: rgba(0, 0, 0, 0.4);
}
}

View File

@ -332,14 +332,5 @@ onMounted(() => {
right: 16px;
top: 21px;
}
.edit-avatar {
position: relative;
.edit-mask {
position: absolute;
left: 0;
background: rgba(0, 0, 0, 0.4);
}
}
}
</style>

View File

@ -61,10 +61,7 @@
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.aiModel.label')"
prop="model_id"
>
<el-form-item :label="$t('views.application.applicationForm.form.aiModel.label')">
<template #label>
<div class="flex-between">
<span>{{ $t('views.application.applicationForm.form.aiModel.label') }}</span>
@ -151,47 +148,51 @@
</template>
</el-select>
</el-form-item>
<el-form-item label="角色设定">
<el-input
v-model="applicationForm.model_setting.system"
:rows="6"
type="textarea"
maxlength="2048"
placeholder="你是 xxx 小助手"
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.prompt.label')"
prop="model_setting.prompt"
prop="model_setting.no_references_prompt"
:rules="{
required: applicationForm.model_id,
message: '请输入提示词',
trigger: 'blur'
}"
>
<template #label>
<div class="flex align-center">
<div class="flex-between mr-4">
<span
>{{ $t('views.application.applicationForm.form.prompt.label') }}
<span class="danger">*</span></span
>
(无引用知识库)
<span class="danger" v-if="applicationForm.model_id">*</span>
</span>
</div>
<el-tooltip effect="dark" placement="right">
<template #content
>{{
$t('views.application.applicationForm.form.prompt.tooltip', {
data: '{data}',
question: '{question}'
})
}}
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-input
v-model="applicationForm.model_setting.prompt"
v-model="applicationForm.model_setting.no_references_prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
placeholder="{question}"
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.multipleRoundsDialogue')"
@click.prevent
>
<el-switch
size="small"
v-model="applicationForm.multiple_rounds_dialogue"
></el-switch>
<el-form-item label="历史聊天记录" @click.prevent>
<el-input-number
v-model="applicationForm.dialogue_number"
:min="0"
:value-on-clear="0"
controls-position="right"
class="w-full"
/>
</el-form-item>
<el-form-item
label="$t('views.application.applicationForm.form.relatedKnowledgeBase')"
@ -260,6 +261,34 @@
</el-row>
</div>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.prompt.label')"
prop="model_setting.prompt"
:rules="{
required: applicationForm.model_id,
message: '请输入提示词',
trigger: 'blur'
}"
>
<template #label>
<div class="flex align-center">
<div class="flex-between mr-4">
<span>
{{ $t('views.application.applicationForm.form.prompt.label') }}
(引用知识库)
<span class="danger" v-if="applicationForm.model_id">*</span>
</span>
</div>
</div>
</template>
<el-input
v-model="applicationForm.model_setting.prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
/>
</el-form-item>
<el-form-item :label="$t('views.application.applicationForm.form.prologue')">
<MdEditor
class="prologue-md-editor"
@ -269,25 +298,7 @@
:footers="[]"
/>
</el-form-item>
<el-form-item @click.prevent>
<template #label>
<div class="flex align-center">
<span class="mr-4">{{
$t('views.application.applicationForm.form.problemOptimization.label')
}}</span>
<el-tooltip
effect="dark"
:content="
$t('views.application.applicationForm.form.problemOptimization.tooltip')
"
placement="right"
>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="applicationForm.problem_optimization"></el-switch>
</el-form-item>
<el-form-item>
<template #label>
<div class="flex-between">
@ -305,6 +316,7 @@
</div>
</template>
<el-select
v-if="applicationForm.stt_model_enable"
v-model="applicationForm.stt_model_id"
class="w-full"
popper-class="select-model"
@ -371,12 +383,15 @@
<el-switch size="small" v-model="applicationForm.tts_model_enable" />
</div>
</template>
<el-radio-group v-model="applicationForm.tts_type">
<el-radio label="BROWSER">浏览器播放(免费)</el-radio>
<el-radio label="TTS">TTS模型</el-radio>
<el-radio-group
v-model="applicationForm.tts_type"
v-if="applicationForm.tts_model_enable"
>
<el-radio value="BROWSER">浏览器播放(免费)</el-radio>
<el-radio value="TTS">TTS模型</el-radio>
</el-radio-group>
<el-select
v-if="applicationForm.tts_type === 'TTS'"
v-if="applicationForm.tts_type === 'TTS' && applicationForm.tts_model_enable"
v-model="applicationForm.tts_model_id"
class="w-full"
popper-class="select-model"
@ -446,7 +461,11 @@
</h4>
<div class="dialog-bg">
<div class="flex align-center p-24">
<div class="mr-12">
<div
class="edit-avatar mr-12"
@mouseenter="showEditIcon = true"
@mouseleave="showEditIcon = false"
>
<AppAvatar
v-if="isAppIcon(applicationForm?.icon)"
shape="square"
@ -462,8 +481,16 @@
shape="square"
:size="32"
/>
<AppAvatar
v-if="showEditIcon"
shape="square"
class="edit-mask"
:size="32"
@click="openEditAvatar"
>
<el-icon><EditPen /></el-icon>
</AppAvatar>
</div>
<h4>
{{
applicationForm?.name || $t('views.application.applicationForm.form.appName.label')
@ -494,6 +521,7 @@
@change="openCreateModel($event)"
></CreateModelDialog>
<SelectProviderDialog ref="selectProviderRef" @change="openCreateModel($event)" />
<EditAvatarDialog ref="EditAvatarDialogRef" @refresh="refreshIcon" />
</LayoutContainer>
</template>
<script setup lang="ts">
@ -505,6 +533,8 @@ import ParamSettingDialog from './component/ParamSettingDialog.vue'
import AddDatasetDialog from './component/AddDatasetDialog.vue'
import CreateModelDialog from '@/views/template/component/CreateModelDialog.vue'
import SelectProviderDialog from '@/views/template/component/SelectProviderDialog.vue'
import EditAvatarDialog from '@/views/application-overview/component/EditAvatarDialog.vue'
import applicationApi from '@/api/application'
import { isAppIcon } from '@/utils/application'
import type { FormInstance, FormRules } from 'element-plus'
@ -534,6 +564,7 @@ const selectProviderRef = ref<InstanceType<typeof SelectProviderDialog>>()
const applicationFormRef = ref<FormInstance>()
const AddDatasetDialogRef = ref()
const EditAvatarDialogRef = ref()
const loading = ref(false)
const datasetLoading = ref(false)
@ -541,7 +572,7 @@ const applicationForm = ref<ApplicationFormType>({
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 1,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -555,10 +586,14 @@ const applicationForm = ref<ApplicationFormType>({
}
},
model_setting: {
prompt: defaultPrompt
prompt: defaultPrompt,
system: '你是 xxx 小助手',
no_references_prompt: '{question}'
},
model_params_setting: {},
problem_optimization: false,
problem_optimization_prompt:
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
stt_model_id: '',
tts_model_id: '',
stt_model_enable: false,
@ -574,20 +609,6 @@ const rules = reactive<FormRules<ApplicationFormType>>({
message: t('views.application.applicationForm.form.appName.placeholder'),
trigger: 'blur'
}
],
model_id: [
{
required: false,
message: t('views.application.applicationForm.form.aiModel.placeholder'),
trigger: 'change'
}
],
'model_setting.prompt': [
{
required: true,
message: t('views.application.applicationForm.form.prompt.placeholder'),
trigger: 'blur'
}
]
})
const modelOptions = ref<any>(null)
@ -595,6 +616,7 @@ const providerOptions = ref<Array<Provider>>([])
const datasetList = ref([])
const sttModelOptions = ref<any>(null)
const ttsModelOptions = ref<any>(null)
const showEditIcon = ref(false)
const submit = async (formEl: FormInstance | undefined) => {
if (!formEl) return
@ -623,11 +645,11 @@ const openAIParamSettingDialog = () => {
}
const openParamSettingDialog = () => {
ParamSettingDialogRef.value?.open(applicationForm.value.dataset_setting)
ParamSettingDialogRef.value?.open(applicationForm.value)
}
function refreshParam(data: any) {
applicationForm.value.dataset_setting = data
applicationForm.value = { ...applicationForm.value, ...data }
}
function refreshForm(data: any) {
@ -666,6 +688,8 @@ function getDetail() {
applicationForm.value.stt_model_id = res.data.stt_model
applicationForm.value.tts_model_id = res.data.tts_model
applicationForm.value.tts_type = res.data.tts_type
applicationForm.value.model_setting.no_references_prompt =
res.data.model_setting.no_references_prompt || ''
})
}
@ -727,6 +751,13 @@ function getProvider() {
})
}
function openEditAvatar() {
EditAvatarDialogRef.value.open(applicationForm.value)
}
function refreshIcon() {
getDetail()
}
function refresh() {
getDataset()
}

View File

@ -162,14 +162,14 @@
:placeholder="defaultPrompt"
/>
</el-form-item>
<el-form-item
:label="$t('views.application.applicationForm.form.multipleRoundsDialogue')"
@click.prevent
>
<el-switch
size="small"
v-model="applicationForm.multiple_rounds_dialogue"
></el-switch>
<el-form-item label="历史聊天记录" @click.prevent>
<el-input-number
v-model="applicationForm.dialogue_number"
:min="0"
:value-on-clear="0"
controls-position="right"
class="w-full"
/>
</el-form-item>
<el-form-item
label="$t('views.application.applicationForm.form.relatedKnowledgeBase')"
@ -355,7 +355,7 @@ const applicationForm = ref<ApplicationFormType>({
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 0,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {

View File

@ -28,10 +28,14 @@
</el-input>
</el-form-item>
</template>
<div v-if="configType === 'wechat'" class="flex align-center" style="margin-bottom: 8px">
<span class="el-form-item__label">是否是订阅号</span>
<el-switch v-if="configType === 'wechat'" v-model="form[configType].is_personal" />
</div>
<h4 class="title-decoration-1 mb-16">回调地址</h4>
<el-form-item label="URL" prop="callback_url">
<el-input v-model="form[configType].callback_url" placeholder="请输入回调地址">
<el-input v-model="form[configType].callback_url" placeholder="请输入回调地址" readonly>
<template #append>
<el-button @click="copyClick(form[configType].callback_url)">
<AppIcon iconName="app-copy"></AppIcon>
@ -102,7 +106,14 @@ const {
} = route as any
const form = reactive<any>({
wechat: { app_id: '', app_secret: '', token: '', encoding_aes_key: '', callback_url: '' },
wechat: {
app_id: '',
app_secret: '',
token: '',
encoding_aes_key: '',
is_personal: false,
callback_url: ''
},
dingtalk: { client_id: '', client_secret: '', callback_url: '' },
wecom: {
app_id: '',

View File

@ -72,7 +72,7 @@ const applicationForm = ref<ApplicationFormType>({
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 0,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -108,7 +108,7 @@ watch(dialogVisible, (bool) => {
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 0,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {

View File

@ -67,7 +67,7 @@
<el-button @click.prevent="dialogVisible = false" :loading="loading">
{{ $t('views.application.applicationForm.buttons.cancel') }}
</el-button>
<el-button type="primary" @click="submitValid(applicationFormRef)" :loading="loading">
<el-button type="primary" @click="submitHandle(applicationFormRef)" :loading="loading">
{{ $t('views.application.applicationForm.buttons.create') }}
</el-button>
</span>
@ -104,7 +104,7 @@ const applicationForm = ref<ApplicationFormType>({
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 1,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -118,9 +118,19 @@ const applicationForm = ref<ApplicationFormType>({
}
},
model_setting: {
prompt: defaultPrompt
prompt: defaultPrompt,
system: '你是 xxx 小助手',
no_references_prompt: '{question}'
},
model_params_setting: {},
problem_optimization: false,
problem_optimization_prompt:
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
stt_model_id: '',
tts_model_id: '',
stt_model_enable: false,
tts_model_enable: false,
tts_type: 'BROWSER',
type: 'SIMPLE'
})
@ -147,7 +157,7 @@ watch(dialogVisible, (bool) => {
name: '',
desc: '',
model_id: '',
multiple_rounds_dialogue: false,
dialogue_number: 1,
prologue: t('views.application.prompt.defaultPrologue'),
dataset_id_list: [],
dataset_setting: {
@ -161,9 +171,19 @@ watch(dialogVisible, (bool) => {
}
},
model_setting: {
prompt: defaultPrompt
prompt: defaultPrompt,
system: '你是 xxx 小助手',
no_references_prompt: '{question}'
},
model_params_setting: {},
problem_optimization: false,
problem_optimization_prompt:
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中',
stt_model_id: '',
tts_model_id: '',
stt_model_enable: false,
tts_model_enable: false,
tts_type: 'BROWSER',
type: 'SIMPLE'
}
applicationFormRef.value?.clearValidate()
@ -174,21 +194,6 @@ const open = () => {
dialogVisible.value = true
}
const submitValid = (formEl: FormInstance | undefined) => {
if (user.isEnterprise()) {
submitHandle(formEl)
} else {
common
.asyncGetValid(ValidType.Application, ValidCount.Application, loading)
.then(async (res: any) => {
if (res?.data) {
submitHandle(formEl)
} else {
MsgAlert('提示', '社区版最多支持 5 个应用,如需拥有更多应用,请升级为专业版。')
}
})
}
}
const submitHandle = async (formEl: FormInstance | undefined) => {
if (!formEl) return
await formEl.validate((valid) => {

View File

@ -14,7 +14,11 @@
<el-form-item
:label="$t('views.application.applicationForm.dialogues.selectSearchMode')"
>
<el-radio-group v-model="form.search_mode" class="card__radio" @change="changeHandle">
<el-radio-group
v-model="form.dataset_setting.search_mode"
class="card__radio"
@change="changeHandle"
>
<el-card
shadow="never"
class="mb-16"
@ -32,7 +36,7 @@
<el-card
shadow="never"
class="mb-16"
:class="form.search_mode === 'keywords' ? 'active' : ''"
:class="form.dataset_setting.search_mode === 'keywords' ? 'active' : ''"
>
<el-radio value="keywords" size="large">
<p class="mb-4">
@ -43,7 +47,10 @@
}}</el-text>
</el-radio>
</el-card>
<el-card shadow="never" :class="form.search_mode === 'blend' ? 'active' : ''">
<el-card
shadow="never"
:class="form.dataset_setting.search_mode === 'blend' ? 'active' : ''"
>
<el-radio value="blend" size="large">
<p class="mb-4">
{{ $t('views.application.applicationForm.dialogues.hybridSearch') }}
@ -69,7 +76,7 @@
</div>
</template>
<el-input-number
v-model="form.similarity"
v-model="form.dataset_setting.similarity"
:min="0"
:max="form.search_mode === 'blend' ? 2 : 1"
:precision="3"
@ -98,7 +105,7 @@
<el-form-item :label="$t('views.application.applicationForm.dialogues.maxCharacters')">
<el-slider
v-model="form.max_paragraph_char_number"
v-model="form.dataset_setting.max_paragraph_char_number"
show-input
:show-input-controls="false"
:min="500"
@ -119,34 +126,23 @@
:hide-required-asterisk="true"
>
<el-radio-group
v-model="form.no_references_setting.status"
class="radio-block mb-16"
v-model="form.dataset_setting.no_references_setting.status"
class="radio-block"
>
<div>
<el-radio value="ai_questioning">
<p>
{{ $t('views.application.applicationForm.dialogues.continueQuestioning') }}
</p>
<el-form-item
v-if="form.no_references_setting.status === 'ai_questioning'"
:label="$t('views.application.applicationForm.form.prompt.label')"
prop="ai_questioning"
>
<el-input
v-model="noReferencesform.ai_questioning"
:rows="2"
type="textarea"
maxlength="2048"
:placeholder="defaultValue['ai_questioning']"
/>
</el-form-item>
</el-radio>
</div>
<div class="mt-8">
<div>
<el-radio value="designated_answer">
<p>{{ $t('views.application.applicationForm.dialogues.provideAnswer') }}</p>
<el-form-item
v-if="form.no_references_setting.status === 'designated_answer'"
v-if="
form.dataset_setting.no_references_setting.status === 'designated_answer'
"
prop="designated_answer"
>
<el-input
@ -162,6 +158,29 @@
</el-radio-group>
</el-form>
</el-form-item>
<el-form-item @click.prevent v-if="!isWorkflowType">
<template #label>
<div class="flex align-center">
<span class="mr-4">{{
$t('views.application.applicationForm.form.problemOptimization.label')
}}</span>
</div>
</template>
<el-switch size="small" v-model="form.problem_optimization"></el-switch>
</el-form-item>
<el-form-item
v-if="form.problem_optimization"
:label="$t('views.application.applicationForm.form.prompt.label')"
>
<el-input
v-model="form.problem_optimization_prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
/>
</el-form-item>
</el-form>
</div>
</el-scrollbar>
@ -195,15 +214,21 @@ const defaultValue = {
designated_answer: t('views.application.applicationForm.dialogues.designated_answer')
}
const defaultPrompt = `()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中`
const form = ref<any>({
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: '{question}'
}
dataset_setting: {
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: '{question}'
}
},
problem_optimization: false,
problem_optimization_prompt: defaultPrompt
})
const noReferencesform = ref<any>({
@ -236,14 +261,18 @@ const isWorkflowType = ref(false)
watch(dialogVisible, (bool) => {
if (!bool) {
form.value = {
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: ''
}
dataset_setting: {
search_mode: 'embedding',
top_n: 3,
similarity: 0.6,
max_paragraph_char_number: 5000,
no_references_setting: {
status: 'ai_questioning',
value: '{question}'
}
},
problem_optimization: false,
problem_optimization_prompt: ''
}
noReferencesform.value = {
ai_questioning: defaultValue['ai_questioning'],
@ -255,9 +284,16 @@ watch(dialogVisible, (bool) => {
const open = (data: any, type?: string) => {
isWorkflowType.value = isWorkFlow(type)
form.value = { ...form.value, ...cloneDeep(data) }
noReferencesform.value[form.value.no_references_setting.status] =
form.value.no_references_setting.value
form.value = {
dataset_setting: { ...data.dataset_setting },
problem_optimization: data.problem_optimization,
problem_optimization_prompt: data.problem_optimization_prompt
}
if (!isWorkflowType.value) {
noReferencesform.value[form.value.dataset_setting.no_references_setting.status] =
form.value.dataset_setting.no_references_setting.value
}
dialogVisible.value = true
}
@ -270,8 +306,8 @@ const submit = async (formEl: FormInstance | undefined) => {
if (!formEl) return
await formEl.validate((valid, fields) => {
if (valid) {
form.value.no_references_setting.value =
noReferencesform.value[form.value.no_references_setting.status]
form.value.dataset_setting.no_references_setting.value =
noReferencesform.value[form.value.dataset_setting.no_references_setting.status]
emit('refresh', form.value)
dialogVisible.value = false
}
@ -281,9 +317,9 @@ const submit = async (formEl: FormInstance | undefined) => {
function changeHandle(val: string) {
if (val === 'keywords') {
form.value.similarity = 0
form.value.dataset_setting.similarity = 0
} else {
form.value.similarity = 0.6
form.value.dataset_setting.similarity = 0.6
}
}

View File

@ -131,9 +131,11 @@ import { MsgSuccess, MsgConfirm } from '@/utils/message'
import { isAppIcon } from '@/utils/application'
import { useRouter } from 'vue-router'
import { isWorkFlow } from '@/utils/application'
import useStore from '@/stores'
import { ValidType, ValidCount } from '@/enums/common'
import { t } from '@/locales'
const { application, user } = useStore()
import useStore from '@/stores'
const { application, user, common } = useStore()
const router = useRouter()
const CopyApplicationDialogRef = ref()
@ -168,7 +170,27 @@ function settingApplication(row: any) {
}
function openCreateDialog() {
CreateApplicationDialogRef.value.open()
if (user.isEnterprise()) {
CreateApplicationDialogRef.value.open()
} else {
MsgConfirm(`提示`, '社区版最多支持 5 个应用,如需拥有更多应用,请升级为专业版。', {
cancelButtonText: '确定',
confirmButtonText: '购买专业版',
confirmButtonClass: 'primary'
})
.then(() => {
window.open('https://maxkb.cn/pricing.html', '_blank')
})
.catch(() => {
common
.asyncGetValid(ValidType.Application, ValidCount.Application, loading)
.then(async (res: any) => {
if (res?.data) {
CreateApplicationDialogRef.value.open()
}
})
})
}
}
function searchHandle() {
@ -222,7 +244,7 @@ onMounted(() => {
.status-tag {
position: absolute;
right: 16px;
top: 20px;
top: 13px;
}
}
.dropdown-custom-switch {

View File

@ -73,7 +73,7 @@
<el-button @click.prevent="dialogVisible = false" :loading="loading">
{{ $t('views.application.applicationForm.buttons.cancel') }}
</el-button>
<el-button type="primary" @click="submitValid" :loading="loading">
<el-button type="primary" @click="submitHandle" :loading="loading">
{{ $t('views.application.applicationForm.buttons.create') }}
</el-button>
</span>
@ -124,19 +124,6 @@ const open = () => {
dialogVisible.value = true
}
const submitValid = () => {
if (user.isEnterprise()) {
submitHandle()
} else {
common.asyncGetValid(ValidType.Dataset, ValidCount.Dataset, loading).then(async (res: any) => {
if (res?.data) {
submitHandle()
} else {
MsgAlert('提示', '社区版最多支持 50 个知识库,如需拥有更多知识库,请升级为专业版。')
}
})
}
}
const submitHandle = async () => {
if (await BaseFormRef.value?.validate()) {
await DatasetFormRef.value.validate((valid: any) => {

View File

@ -107,7 +107,7 @@
</InfiniteScroll>
</div>
<SyncWebDialog ref="SyncWebDialogRef" @refresh="refresh" />
<CreateDatasetDialog ref="CreateDatasetDialogRef"/>
<CreateDatasetDialog ref="CreateDatasetDialogRef" />
</div>
</template>
<script setup lang="ts">
@ -118,6 +118,10 @@ import datasetApi from '@/api/dataset'
import { MsgSuccess, MsgConfirm } from '@/utils/message'
import { useRouter } from 'vue-router'
import { numberFormat } from '@/utils/utils'
import { ValidType, ValidCount } from '@/enums/common'
import useStore from '@/stores'
const { user, common } = useStore()
const router = useRouter()
const CreateDatasetDialogRef = ref()
@ -133,7 +137,27 @@ const paginationConfig = reactive({
const searchValue = ref('')
function openCreateDialog() {
CreateDatasetDialogRef.value.open()
if (user.isEnterprise()) {
CreateDatasetDialogRef.value.open()
} else {
MsgConfirm(`提示`, '社区版最多支持 50 个知识库,如需拥有更多知识库,请升级为专业版。', {
cancelButtonText: '确定',
confirmButtonText: '购买专业版',
confirmButtonClass: 'primary'
})
.then(() => {
window.open('https://maxkb.cn/pricing.html', '_blank')
})
.catch(() => {
common
.asyncGetValid(ValidType.Dataset, ValidCount.Dataset, loading)
.then(async (res: any) => {
if (res?.data) {
CreateDatasetDialogRef.value.open()
}
})
})
}
}
function refresh() {
@ -198,7 +222,7 @@ onMounted(() => {
.delete-button {
position: absolute;
right: 12px;
top: 18px;
top: 13px;
height: auto;
}
.footer-content {

View File

@ -23,6 +23,9 @@
<el-button @click="openDatasetDialog()" :disabled="multipleSelection.length === 0">
迁移
</el-button>
<el-button @click="batchRefresh" :disabled="multipleSelection.length === 0">
重新向量化
</el-button>
<el-button @click="openBatchEditDocument" :disabled="multipleSelection.length === 0">
设置
</el-button>
@ -538,6 +541,19 @@ function deleteMulDocument() {
})
}
function batchRefresh() {
const arr: string[] = []
multipleSelection.value.map((v) => {
if (v) {
arr.push(v.id)
}
})
documentApi.batchRefresh(id, arr, loading).then(() => {
MsgSuccess('批量重新向量化成功')
multipleTableRef.value?.clearSelection()
})
}
function deleteDocument(row: any) {
MsgConfirm(
`是否删除文档:${row.name} ?`,

View File

@ -19,7 +19,7 @@
placeholder="请输入函数名称"
maxlength="64"
show-word-limit
@blur="form.name = form.name.trim()"
@blur="form.name = form.name?.trim()"
/>
</el-form-item>
<el-form-item label="描述">
@ -30,9 +30,35 @@
maxlength="128"
show-word-limit
:autosize="{ minRows: 3 }"
@blur="form.desc = form.desc.trim()"
@blur="form.desc = form.desc?.trim()"
/>
</el-form-item>
<el-form-item prop="permission_type">
<template #label>
<span>权限</span>
</template>
<el-radio-group v-model="form.permission_type" class="card__radio">
<el-row :gutter="16">
<template v-for="(value, key) of PermissionType" :key="key">
<el-col :span="12">
<el-card
shadow="never"
class="mb-16"
:class="form.permission_type === key ? 'active' : ''"
>
<el-radio :value="key" size="large">
<p class="mb-4">{{ value }}</p>
<el-text type="info">
{{ PermissionDesc[key] }}
</el-text>
</el-radio>
</el-card>
</el-col>
</template>
</el-row>
</el-radio-group>
</el-form-item>
</el-form>
<div class="flex-between">
<h4 class="title-decoration-1 mb-16">
@ -137,7 +163,7 @@ import functionLibApi from '@/api/function-lib'
import type { FormInstance } from 'element-plus'
import { MsgSuccess, MsgConfirm } from '@/utils/message'
import { cloneDeep } from 'lodash'
import { PermissionType, PermissionDesc } from '@/enums/model'
const props = defineProps({
title: String
})
@ -158,7 +184,8 @@ const form = ref<functionLibData>({
name: '',
desc: '',
code: '',
input_field_list: []
input_field_list: [],
permission_type: 'PRIVATE'
})
const dialogVisible = ref(false)
@ -173,13 +200,15 @@ watch(visible, (bool) => {
name: '',
desc: '',
code: '',
input_field_list: []
input_field_list: [],
permission_type: 'PRIVATE'
}
}
})
const rules = reactive({
name: [{ required: true, message: '请输入函数名称', trigger: 'blur' }]
name: [{ required: true, message: '请输入函数名称', trigger: 'blur' }],
permission_type: [{ required: true, message: '请选择', trigger: 'change' }]
})
function openCodemirrorDialog() {

View File

@ -11,7 +11,11 @@
clearable
/>
</div>
<div v-loading.fullscreen.lock="paginationConfig.current_page === 1 && loading">
<div
v-loading.fullscreen.lock="
(paginationConfig.current_page === 1 && loading) || changeStateloading
"
>
<InfiniteScroll
:size="functionLibList.length"
:total="paginationConfig.total"
@ -45,20 +49,34 @@
<img src="@/assets/icon_function_outlined.svg" style="width: 58%" alt="" />
</AppAvatar>
</template>
<div class="status-button">
<el-tag class="info-tag" v-if="item.permission_type === 'PUBLIC'">公用</el-tag>
<el-tag class="danger-tag" v-else-if="item.permission_type === 'PRIVATE'"
>私有</el-tag
>
</div>
<template #footer>
<div class="footer-content">
<el-tooltip effect="dark" content="复制" placement="top">
<el-button text @click.stop="copyFunctionLib(item)">
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</el-tooltip>
<el-divider direction="vertical" />
<el-tooltip effect="dark" content="删除" placement="top">
<el-button text @click.stop="deleteFunctionLib(item)">
<el-icon><Delete /></el-icon>
</el-button>
</el-tooltip>
<div class="footer-content flex-between">
<div>
<el-tooltip effect="dark" content="复制" placement="top">
<el-button text @click.stop="copyFunctionLib(item)">
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</el-tooltip>
<el-divider direction="vertical" />
<el-tooltip effect="dark" content="删除" placement="top">
<el-button text @click.stop="deleteFunctionLib(item)">
<el-icon><Delete /></el-icon>
</el-button>
</el-tooltip>
</div>
<div @click.stop>
<el-switch
v-model="item.is_active"
@change="changeState($event, item)"
size="small"
/>
</div>
</div>
</template>
</CardBox>
@ -89,6 +107,7 @@ const paginationConfig = reactive({
const searchValue = ref('')
const title = ref('')
const changeStateloading = ref(false)
function openCreateDialog(data?: any) {
title.value = data ? '编辑函数' : '创建函数'
@ -102,6 +121,33 @@ function searchHandle() {
getList()
}
function changeState(bool: Boolean, row: any) {
if (!bool) {
MsgConfirm(
`是否禁用函数:${row.name} ?`,
`禁用后,引用了该函数的应用提问时会报错 ,请谨慎操作。`,
{
confirmButtonText: '禁用',
confirmButtonClass: 'danger'
}
)
.then(() => {
const obj = {
is_active: bool
}
functionLibApi.putFunctionLib(row.id, obj, changeStateloading).then((res) => {})
})
.catch(() => {
row.is_active = true
})
} else {
const obj = {
is_active: bool
}
functionLibApi.putFunctionLib(row.id, obj, changeStateloading).then((res) => {})
}
}
function deleteFunctionLib(row: any) {
MsgConfirm(
`是否删除函数:${row.name} ?`,
@ -155,4 +201,13 @@ onMounted(() => {
getList()
})
</script>
<style lang="scss" scoped></style>
<style lang="scss" scoped>
.function-lib-list-container {
.status-button {
position: absolute;
right: 12px;
top: 13px;
height: auto;
}
}
</style>

View File

@ -140,14 +140,22 @@ function createUser() {
title.value = '创建用户'
UserDialogRef.value.open()
} else {
common.asyncGetValid(ValidType.User, ValidCount.User, loading).then((res: any) => {
if (res?.data) {
title.value = '创建用户'
UserDialogRef.value.open()
} else {
MsgAlert('提示', '社区版最多支持 2 个用户,如需拥有更多用户,请升级为专业版。')
}
MsgConfirm(`提示`, '社区版最多支持 2 个用户,如需拥有更多用户,请升级为专业版。', {
cancelButtonText: '确定',
confirmButtonText: '购买专业版',
confirmButtonClass: 'primary'
})
.then(() => {
window.open('https://maxkb.cn/pricing.html', '_blank')
})
.catch(() => {
common.asyncGetValid(ValidType.User, ValidCount.User, loading).then(async (res: any) => {
if (res?.data) {
title.value = '创建用户'
UserDialogRef.value.open()
}
})
})
}
}

View File

@ -6,7 +6,7 @@
style="overflow: visible"
>
<div v-resize="resizeStepContainer">
<div class="flex-between mb-16">
<div class="flex-between">
<div
class="flex align-center"
:style="{ maxWidth: node_status == 200 ? 'calc(100% - 55px)' : 'calc(100% - 85px)' }"
@ -33,6 +33,11 @@
@click.stop
v-if="showOperate(nodeModel.type)"
>
<el-button text @click="showNode = !showNode" class="mr-4">
<el-icon class="mr-8 arrow-icon" :class="showNode ? 'rotate-90' : ''"
><CaretRight
/></el-icon>
</el-button>
<el-dropdown :teleported="false" trigger="click">
<el-button text>
<el-icon class="color-secondary"><MoreFilled /></el-icon>
@ -46,42 +51,44 @@
</el-dropdown>
</div>
</div>
<div @mousedown.stop @keydown.stop @click.stop>
<el-alert
v-if="node_status != 200"
class="mb-16"
title="该函数不可用"
type="error"
show-icon
:closable="false"
/>
<slot></slot>
<template v-if="nodeFields.length > 0">
<h5 class="title-decoration-1 mb-8 mt-8">参数输出</h5>
<template v-for="(item, index) in nodeFields" :key="index">
<div
class="flex-between border-r-4 p-8-12 mb-8 layout-bg lighter"
@mouseenter="showicon = index"
@mouseleave="showicon = null"
>
<span style="max-width: 92%">{{ item.label }} {{ '{' + item.value + '}' }}</span>
<el-tooltip
effect="dark"
content="复制参数"
placement="top"
v-if="showicon === index"
<el-collapse-transition>
<div @mousedown.stop @keydown.stop @click.stop v-if="showNode" class="mt-16">
<el-alert
v-if="node_status != 200"
class="mb-16"
title="该函数不可用"
type="error"
show-icon
:closable="false"
/>
<slot></slot>
<template v-if="nodeFields.length > 0">
<h5 class="title-decoration-1 mb-8 mt-8">参数输出</h5>
<template v-for="(item, index) in nodeFields" :key="index">
<div
class="flex-between border-r-4 p-8-12 mb-8 layout-bg lighter"
@mouseenter="showicon = index"
@mouseleave="showicon = null"
>
<el-button link @click="copyClick(item.globeLabel)" style="padding: 0">
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</el-tooltip>
</div>
<span style="max-width: 92%">{{ item.label }} {{ '{' + item.value + '}' }}</span>
<el-tooltip
effect="dark"
content="复制参数"
placement="top"
v-if="showicon === index"
>
<el-button link @click="copyClick(item.globeLabel)" style="padding: 0">
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</el-tooltip>
</div>
</template>
</template>
</template>
</div>
</div>
</el-collapse-transition>
</div>
</div>
<el-collapse-transition>
<DropdownMenu
v-if="showAnchor"
@ -122,6 +129,7 @@ const height = ref<{
})
const showAnchor = ref<boolean>(false)
const anchorData = ref<any>()
const showNode = ref<boolean>(true)
const node_status = computed(() => {
if (props.nodeModel.properties.status) {
return props.nodeModel.properties.status
@ -240,6 +248,9 @@ onMounted(() => {
border: 1px solid #f54a45 !important;
}
}
.arrow-icon {
transition: 0.2s;
}
}
:deep(.el-card) {
overflow: visible;

View File

@ -118,6 +118,7 @@
</template>
<el-select
v-if="form_data.stt_model_enable"
v-model="form_data.stt_model_id"
class="w-full"
popper-class="select-model"
@ -182,12 +183,12 @@
<el-switch size="small" v-model="form_data.tts_model_enable" />
</div>
</template>
<el-radio-group v-model="form_data.tts_type">
<el-radio-group v-model="form_data.tts_type" v-if="form_data.tts_model_enable">
<el-radio label="浏览器播放(免费)" value="BROWSER" />
<el-radio label="TTS模型" value="TTS" />
</el-radio-group>
<el-select
v-if="form_data.tts_type === 'TTS'"
v-if="form_data.tts_type === 'TTS' && form_data.tts_model_enable"
v-model="form_data.tts_model_id"
class="w-full"
popper-class="select-model"
@ -412,6 +413,7 @@ function openAddDialog(data?: any, index?: any) {
function deleteField(index: any) {
inputFieldList.value.splice(index, 1)
props.nodeModel.graphModel.eventCenter.emit('refreshFieldList', inputFieldList.value)
}
function refreshFieldList(data: any) {
@ -428,6 +430,7 @@ function refreshFieldList(data: any) {
}
currentIndex.value = null
FieldFormDialogRef.value.close()
props.nodeModel.graphModel.eventCenter.emit('refreshFieldList', inputFieldList.value)
}
onMounted(() => {

View File

@ -22,6 +22,7 @@
:gutter="8"
style="margin-bottom: 8px"
v-for="(reranker_reference, index) in form_data.reranker_reference_list"
:key="index"
>
<el-col :span="22">
<el-form-item
@ -212,7 +213,7 @@ const form = {
const providerOptions = ref<Array<Provider>>([])
const modelOptions = ref<any>(null)
const openParamSettingDialog = () => {
ParamSettingDialogRef.value?.open(form_data.value.dataset_setting, 'WORK_FLOW')
ParamSettingDialogRef.value?.open(form_data.value, 'WORK_FLOW')
}
const deleteCondition = (index: number) => {
const list = cloneDeep(props.nodeModel.properties.node_data.reranker_reference_list)
@ -242,7 +243,7 @@ const form_data = computed({
}
})
function refreshParam(data: any) {
set(props.nodeModel.properties.node_data, 'reranker_setting', data)
set(props.nodeModel.properties.node_data, 'reranker_setting', data.dataset_setting)
}
function getModel() {
if (id) {

View File

@ -159,11 +159,11 @@ const datasetList = ref<any>([])
const datasetLoading = ref(false)
function refreshParam(data: any) {
set(props.nodeModel.properties.node_data, 'dataset_setting', data)
set(props.nodeModel.properties.node_data, 'dataset_setting', data.dataset_setting)
}
const openParamSettingDialog = () => {
ParamSettingDialogRef.value?.open(form_data.value.dataset_setting, 'WORK_FLOW')
ParamSettingDialogRef.value?.open(form_data.value, 'WORK_FLOW')
}
function removeDataset(id: any) {

View File

@ -2,25 +2,18 @@
<NodeContainer :nodeModel="nodeModel">
<h5 class="title-decoration-1 mb-8">全局变量</h5>
<div
v-for="item in nodeModel.properties.config.globalFields"
class="flex-between border-r-4 p-8-12 mb-8 layout-bg lighter"
@mouseenter="showicon = true"
@mouseleave="showicon = false"
>
<span>当前时间 {time}</span>
<span>{{ item.label }} {{ '{' + item.value + '}' }}</span>
<el-tooltip effect="dark" content="复制参数" placement="top" v-if="showicon === true">
<el-button link @click="copyClick(globeLabel)" style="padding: 0">
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</el-tooltip>
</div>
<div v-for="(item, index) in inputFieldList" :key="index"
class="flex-between border-r-4 p-8-12 mb-8 layout-bg lighter"
@mouseenter="showicon = true"
@mouseleave="showicon = false"
>
<span>{{ item.name }} {{ '{' + item.variable + '}' }}</span>
<el-tooltip effect="dark" content="复制参数" placement="top" v-if="showicon === true">
<el-button link @click="copyClick('{{' + '全局变量.' + item.variable + '}}')" style="padding: 0">
<el-button
link
@click="copyClick('{{' + '全局变量.' + item.value + '}}')"
style="padding: 0"
>
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</el-tooltip>
@ -28,34 +21,36 @@
</NodeContainer>
</template>
<script setup lang="ts">
import { set } from 'lodash'
import { cloneDeep, set } from 'lodash'
import NodeContainer from '@/workflow/common/NodeContainer.vue'
import { copyClick } from '@/utils/clipboard'
import { ref, computed, onMounted } from 'vue'
const props = defineProps<{ nodeModel: any }>()
const globeLabel = '{{全局变量.time}}'
const showicon = ref(false)
const globalFields = [
{ label: '当前时间', value: 'time' },
{ label: '历史聊天记录', value: 'history_context' },
{ label: '对话id', value: 'chat_id' }
]
const inputFieldList = ref<any[]>([])
onMounted(() => {
props.nodeModel.graphModel.nodes
const getRefreshFieldList = () => {
return props.nodeModel.graphModel.nodes
.filter((v: any) => v.id === 'base-node')
.map((v: any) => {
// eslint-disable-next-line vue/no-mutating-props
props.nodeModel.properties.config.globalFields = [
{
label: '当前时间',
value: 'time'
}, ...v.properties.input_field_list.map((i: any) => {
return { label: i.name, value: i.variable }
})
]
inputFieldList.value = v.properties.input_field_list
})
.map((v: any) => cloneDeep(v.properties.input_field_list))
.reduce((x: any, y: any) => [...x, ...y], [])
.map((i: any) => ({ label: i.name, value: i.variable }))
}
const refreshFieldList = () => {
const refreshFieldList = getRefreshFieldList()
set(props.nodeModel.properties.config, 'globalFields', [...globalFields, ...refreshFieldList])
}
props.nodeModel.graphModel.eventCenter.on('refreshFieldList', refreshFieldList)
onMounted(() => {
refreshFieldList()
})
</script>
<style lang="scss" scoped></style>