Compare commits

...

24 Commits
main ... v1.4.1

Author SHA1 Message Date
shaohuzhang1 5971a4c183 fix: 修复创建web知识库时,向量模型错误 (#948)
Some checks failed
sync2gitee / repo-sync (push) Has been cancelled
Typos Check / Spell Check with Typos (push) Has been cancelled
(cherry picked from commit 3edb1f29b1)
2024-08-08 11:08:01 +08:00
shaohuzhang1 c5c816b981 fix: 修复分段时,特殊情况会丢失数据 #938 (#946)
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
Typos Check / Spell Check with Typos (push) Waiting to run
(cherry picked from commit 0ad5a76598)
2024-08-07 19:54:50 +08:00
shaohuzhang1 349328f608 fix: 修改调试页面多轮对话设置无效问题 (#945)
(cherry picked from commit e59e26208b)
2024-08-07 15:22:13 +08:00
shaohuzhang1 452213e6e1 fix: 修改图片上传像素校验位200亿 2024-08-07 14:30:42 +08:00
shaohuzhang1 8b0b2b4415 fix: 修复文档同步文档状态错误 (#942)
(cherry picked from commit 59af953caa)
2024-08-07 14:30:37 +08:00
shaohuzhang1 de6b442f31 fix: 模型管理,模型名称长度从20个字符调整到64个字符 (#940)
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
Typos Check / Spell Check with Typos (push) Waiting to run
(cherry picked from commit 0e186f3f5a)
2024-08-07 11:45:29 +08:00
wangdan-fit2cloud 7974aeaa86 perf: 优化头像图标不超过10MB
(cherry picked from commit 889ebe4ed5)
2024-08-07 11:28:36 +08:00
wangdan-fit2cloud ebcef8f126 perf: 上传头像可支持GIF,大小扩大到10M
(cherry picked from commit 2605d50bd0)
2024-08-07 11:28:36 +08:00
shaohuzhang1 a9d767c67a fix: 修复工作流提示词使用md格式输入框,输入框滚动问题 (#939)
(cherry picked from commit 4b2213a171)
2024-08-07 11:28:30 +08:00
shaohuzhang1 77814972a4 fix: 修复节点直接回答,不同节点响应用回车隔开 (#935)
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
Typos Check / Spell Check with Typos (push) Waiting to run
(cherry picked from commit 8453ea2b4b)
2024-08-06 16:58:36 +08:00
shaohuzhang1 1721344cc6 fix: 修复索引中的文档,知识库删除后依然再执行 (#934)
(cherry picked from commit 864bca6450)
2024-08-06 16:25:44 +08:00
shaohuzhang1 1117814a08 fix: 修复QA文件解析失败 (#933)
(cherry picked from commit b3c7120372)
2024-08-06 16:25:38 +08:00
shaohuzhang1 cb9bbccd4a fix: 修复无法上传像素点很高的图片 (#932)
(cherry picked from commit 6f2d437ac8)
2024-08-06 14:05:09 +08:00
shaohuzhang1 38b58d3e5e fix: 修复ollama模型二级path无法添加问题 #805 (#930)
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
Typos Check / Spell Check with Typos (push) Waiting to run
(cherry picked from commit 62b12c3124)
2024-08-06 11:43:47 +08:00
shaohuzhang1 84039b1272 fix: 修复知识库问题管理修改问题报错 (#929)
(cherry picked from commit d33bf6a8e8)
2024-08-06 11:43:46 +08:00
shaohuzhang1 e8e6c489fd fix: 修复节点输出,输出结果重复 (#928)
(cherry picked from commit db167dd8b4)
2024-08-06 11:43:35 +08:00
wangdan-fit2cloud 73904d5407 fix: 【应用】仅有使用权限高级编排自动保存问题
(cherry picked from commit b55524ad4e)
2024-08-06 11:43:29 +08:00
wangdan-fit2cloud 9927c67ba3 perf: 【问答页面】对话框支持格式(#909)
(cherry picked from commit 097b5dd3ec)
2024-08-06 11:43:29 +08:00
shaohuzhang1 dc371e25b0 feat: 支持节点参数设置直接输出 2024-08-06 11:42:57 +08:00
shaohuzhang1 f2b46225ac feat: 支持节点参数设置直接输出 2024-08-06 11:42:56 +08:00
wangdan-fit2cloud 98cc8cd0e8 feat: 支持节点参数设置直接输出 2024-08-06 11:42:56 +08:00
wangdan-fit2cloud 03ecebe506 feat: 支持节点参数设置直接输出 2024-08-06 11:42:55 +08:00
shaohuzhang1 89b9f06f45 fix: 修复讯飞星火模型多用户同时提问后,回答内容会错乱 #917 (#920)
(cherry picked from commit 76c1acbabb)
2024-08-06 11:42:55 +08:00
shaohuzhang1 209702cad2 feat: ai对话,问题优化,指定回复节点支持返回结果开关 2024-08-06 11:42:55 +08:00
41 changed files with 477 additions and 327 deletions

View File

@ -10,6 +10,7 @@ import time
from abc import abstractmethod
from typing import Type, Dict, List
from django.core import cache
from django.db.models import QuerySet
from rest_framework import serializers
@ -18,7 +19,6 @@ from application.models.api_key_model import ApplicationPublicAccessClient
from common.constants.authentication_type import AuthenticationType
from common.field.common import InstanceField
from common.util.field_message import ErrMessage
from django.core import cache
chat_cache = cache.caches['chat_cache']
@ -27,9 +27,14 @@ def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if step_variable is not None:
for key in step_variable:
node.context[key] = step_variable[key]
if workflow.is_result() and 'answer' in step_variable:
answer = step_variable['answer'] + '\n'
yield answer
workflow.answer += answer
if global_variable is not None:
for key in global_variable:
workflow.context[key] = global_variable[key]
node.context['run_time'] = time.time() - node.context['start_time']
class WorkFlowPostHandler:
@ -70,18 +75,14 @@ class WorkFlowPostHandler:
class NodeResult:
def __init__(self, node_variable: Dict, workflow_variable: Dict, _to_response=None, _write_context=write_context):
def __init__(self, node_variable: Dict, workflow_variable: Dict,
_write_context=write_context):
self._write_context = _write_context
self.node_variable = node_variable
self.workflow_variable = workflow_variable
self._to_response = _to_response
def write_context(self, node, workflow):
self._write_context(self.node_variable, self.workflow_variable, node, workflow)
def to_response(self, chat_id, chat_record_id, node, workflow, post_handler: WorkFlowPostHandler):
return self._to_response(chat_id, chat_record_id, self.node_variable, self.workflow_variable, node, workflow,
post_handler)
return self._write_context(self.node_variable, self.workflow_variable, node, workflow)
def is_assertion_result(self):
return 'branch_id' in self.node_variable

View File

@ -22,6 +22,8 @@ class ChatNodeSerializer(serializers.Serializer):
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
class IChatNode(INode):
type = 'ai-chat-node'

View File

@ -13,12 +13,25 @@ from typing import List, Dict
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import BaseMessage
from application.flow import tools
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
from setting.models_provider.tools import get_model_instance_by_model_user_id
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
chat_model = node_variable.get('chat_model')
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
if workflow.is_result():
workflow.answer += answer
def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据 (流式)
@ -31,15 +44,10 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
answer = ''
for chunk in response:
answer += chunk.content
chat_model = node_variable.get('chat_model')
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
yield chunk.content
answer += '\n'
yield '\n'
_write_context(node_variable, workflow_variable, node, workflow, answer)
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
@ -51,71 +59,8 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
chat_model = node_variable.get('chat_model')
answer = response.content
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
def get_to_response_write_context(node_variable: Dict, node: INode):
def _write_context(answer, status=200):
chat_model = node_variable.get('chat_model')
if status == 200:
answer_tokens = chat_model.get_num_tokens(answer)
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
else:
answer_tokens = 0
message_tokens = 0
node.err_message = answer
node.status = status
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['run_time'] = time.time() - node.context['start_time']
return _write_context
def to_stream_response(chat_id, chat_record_id, node_variable: Dict, workflow_variable: Dict, node, workflow,
post_handler):
"""
将流式数据 转换为 流式响应
@param chat_id: 会话id
@param chat_record_id: 对话记录id
@param node_variable: 节点数据
@param workflow_variable: 工作流数据
@param node: 节点
@param workflow: 工作流管理器
@param post_handler: 后置处理器 输出结果后执行
@return: 流式响应
"""
response = node_variable.get('result')
_write_context = get_to_response_write_context(node_variable, node)
return tools.to_stream_response(chat_id, chat_record_id, response, workflow, _write_context, post_handler)
def to_response(chat_id, chat_record_id, node_variable: Dict, workflow_variable: Dict, node, workflow,
post_handler):
"""
将结果转换
@param chat_id: 会话id
@param chat_record_id: 对话记录id
@param node_variable: 节点数据
@param workflow_variable: 工作流数据
@param node: 节点
@param workflow: 工作流管理器
@param post_handler: 后置处理器
@return: 响应
"""
response = node_variable.get('result')
_write_context = get_to_response_write_context(node_variable, node)
return tools.to_response(chat_id, chat_record_id, response, workflow, _write_context, post_handler)
_write_context(node_variable, workflow_variable, node, workflow, answer)
class BaseChatNode(IChatNode):
@ -132,13 +77,12 @@ class BaseChatNode(IChatNode):
r = chat_model.stream(message_list)
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context_stream,
_to_response=to_stream_response)
_write_context=write_context_stream)
else:
r = chat_model.invoke(message_list)
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context, _to_response=to_response)
_write_context=write_context)
@staticmethod
def get_history_message(history_chat_record, dialogue_number):

View File

@ -20,6 +20,7 @@ class ReplyNodeParamsSerializer(serializers.Serializer):
fields = serializers.ListField(required=False, error_messages=ErrMessage.list("引用字段"))
content = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char("直接回答内容"))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)

View File

@ -6,69 +6,19 @@
@date2024/6/11 17:25
@desc:
"""
from typing import List, Dict
from typing import List
from langchain_core.messages import AIMessage, AIMessageChunk
from application.flow import tools
from application.flow.i_step_node import NodeResult, INode
from application.flow.i_step_node import NodeResult
from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
def get_to_response_write_context(node_variable: Dict, node: INode):
def _write_context(answer, status=200):
node.context['answer'] = answer
return _write_context
def to_stream_response(chat_id, chat_record_id, node_variable: Dict, workflow_variable: Dict, node, workflow,
post_handler):
"""
将流式数据 转换为 流式响应
@param chat_id: 会话id
@param chat_record_id: 对话记录id
@param node_variable: 节点数据
@param workflow_variable: 工作流数据
@param node: 节点
@param workflow: 工作流管理器
@param post_handler: 后置处理器 输出结果后执行
@return: 流式响应
"""
response = node_variable.get('result')
_write_context = get_to_response_write_context(node_variable, node)
return tools.to_stream_response(chat_id, chat_record_id, response, workflow, _write_context, post_handler)
def to_response(chat_id, chat_record_id, node_variable: Dict, workflow_variable: Dict, node, workflow,
post_handler):
"""
将结果转换
@param chat_id: 会话id
@param chat_record_id: 对话记录id
@param node_variable: 节点数据
@param workflow_variable: 工作流数据
@param node: 节点
@param workflow: 工作流管理器
@param post_handler: 后置处理器
@return: 响应
"""
response = node_variable.get('result')
_write_context = get_to_response_write_context(node_variable, node)
return tools.to_response(chat_id, chat_record_id, response, workflow, _write_context, post_handler)
class BaseReplyNode(IReplyNode):
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
if reply_type == 'referencing':
result = self.get_reference_content(fields)
else:
result = self.generate_reply_content(content)
if stream:
return NodeResult({'result': iter([AIMessageChunk(content=result)]), 'answer': result}, {},
_to_response=to_stream_response)
else:
return NodeResult({'result': AIMessage(content=result), 'answer': result}, {}, _to_response=to_response)
return NodeResult({'answer': result}, {})
def generate_reply_content(self, prompt):
return self.workflow_manage.generate_prompt(prompt)

View File

@ -22,6 +22,8 @@ class QuestionNodeSerializer(serializers.Serializer):
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean('是否返回内容'))
class IQuestionNode(INode):
type = 'question-node'

View File

@ -13,12 +13,25 @@ from typing import List, Dict
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import BaseMessage
from application.flow import tools
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.question_node.i_question_node import IQuestionNode
from setting.models_provider.tools import get_model_instance_by_model_user_id
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
chat_model = node_variable.get('chat_model')
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
if workflow.is_result():
workflow.answer += answer
def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据 (流式)
@ -31,15 +44,10 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
answer = ''
for chunk in response:
answer += chunk.content
chat_model = node_variable.get('chat_model')
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
yield chunk.content
answer += '\n'
yield '\n'
_write_context(node_variable, workflow_variable, node, workflow, answer)
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
@ -51,71 +59,8 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
chat_model = node_variable.get('chat_model')
answer = response.content
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
def get_to_response_write_context(node_variable: Dict, node: INode):
def _write_context(answer, status=200):
chat_model = node_variable.get('chat_model')
if status == 200:
answer_tokens = chat_model.get_num_tokens(answer)
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
else:
answer_tokens = 0
message_tokens = 0
node.err_message = answer
node.status = status
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['run_time'] = time.time() - node.context['start_time']
return _write_context
def to_stream_response(chat_id, chat_record_id, node_variable: Dict, workflow_variable: Dict, node, workflow,
post_handler):
"""
将流式数据 转换为 流式响应
@param chat_id: 会话id
@param chat_record_id: 对话记录id
@param node_variable: 节点数据
@param workflow_variable: 工作流数据
@param node: 节点
@param workflow: 工作流管理器
@param post_handler: 后置处理器 输出结果后执行
@return: 流式响应
"""
response = node_variable.get('result')
_write_context = get_to_response_write_context(node_variable, node)
return tools.to_stream_response(chat_id, chat_record_id, response, workflow, _write_context, post_handler)
def to_response(chat_id, chat_record_id, node_variable: Dict, workflow_variable: Dict, node, workflow,
post_handler):
"""
将结果转换
@param chat_id: 会话id
@param chat_record_id: 对话记录id
@param node_variable: 节点数据
@param workflow_variable: 工作流数据
@param node: 节点
@param workflow: 工作流管理器
@param post_handler: 后置处理器
@return: 响应
"""
response = node_variable.get('result')
_write_context = get_to_response_write_context(node_variable, node)
return tools.to_response(chat_id, chat_record_id, response, workflow, _write_context, post_handler)
_write_context(node_variable, workflow_variable, node, workflow, answer)
class BaseQuestionNode(IQuestionNode):
@ -131,15 +76,13 @@ class BaseQuestionNode(IQuestionNode):
if stream:
r = chat_model.stream(message_list)
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
'get_to_response_write_context': get_to_response_write_context,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context_stream,
_to_response=to_stream_response)
_write_context=write_context_stream)
else:
r = chat_model.invoke(message_list)
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context, _to_response=to_response)
_write_context=write_context)
@staticmethod
def get_history_message(history_chat_record, dialogue_number):

View File

@ -85,3 +85,21 @@ def to_response(chat_id, chat_record_id, response: BaseMessage, workflow, write_
post_handler.handler(chat_id, chat_record_id, answer, workflow)
return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
'content': answer, 'is_end': True})
def to_response_simple(chat_id, chat_record_id, response: BaseMessage, workflow,
post_handler: WorkFlowPostHandler):
answer = response.content
post_handler.handler(chat_id, chat_record_id, answer, workflow)
return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
'content': answer, 'is_end': True})
def to_stream_response_simple(stream_event):
r = StreamingHttpResponse(
streaming_content=stream_event,
content_type='text/event-stream;charset=utf-8',
charset='utf-8')
r['Cache-Control'] = 'no-cache'
return r

View File

@ -6,10 +6,11 @@
@date2024/1/9 17:40
@desc:
"""
import json
from functools import reduce
from typing import List, Dict
from langchain_core.messages import AIMessageChunk, AIMessage
from langchain_core.messages import AIMessage
from langchain_core.prompts import PromptTemplate
from application.flow import tools
@ -63,7 +64,6 @@ class Flow:
def get_search_node(self):
return [node for node in self.nodes if node.type == 'search-dataset-node']
def is_valid(self):
"""
校验工作流数据
@ -140,33 +140,71 @@ class WorkflowManage:
self.work_flow_post_handler = work_flow_post_handler
self.current_node = None
self.current_result = None
self.answer = ""
def run(self):
"""
运行工作流
"""
if self.params.get('stream'):
return self.run_stream()
return self.run_block()
def run_block(self):
try:
while self.has_next_node(self.current_result):
self.current_node = self.get_next_node()
self.node_context.append(self.current_node)
self.current_result = self.current_node.run()
if self.has_next_node(self.current_result):
self.current_result.write_context(self.current_node, self)
else:
r = self.current_result.to_response(self.params['chat_id'], self.params['chat_record_id'],
self.current_node, self,
self.work_flow_post_handler)
return r
result = self.current_result.write_context(self.current_node, self)
if result is not None:
list(result)
if not self.has_next_node(self.current_result):
return tools.to_response_simple(self.params['chat_id'], self.params['chat_record_id'],
AIMessage(self.answer), self,
self.work_flow_post_handler)
except Exception as e:
if self.params.get('stream'):
return tools.to_stream_response(self.params['chat_id'], self.params['chat_record_id'],
iter([AIMessageChunk(str(e))]), self,
self.current_node.get_write_error_context(e),
self.work_flow_post_handler)
else:
return tools.to_response(self.params['chat_id'], self.params['chat_record_id'],
AIMessage(str(e)), self, self.current_node.get_write_error_context(e),
self.work_flow_post_handler)
return tools.to_response(self.params['chat_id'], self.params['chat_record_id'],
AIMessage(str(e)), self, self.current_node.get_write_error_context(e),
self.work_flow_post_handler)
def run_stream(self):
return tools.to_stream_response_simple(self.stream_event())
def stream_event(self):
try:
while self.has_next_node(self.current_result):
self.current_node = self.get_next_node()
self.node_context.append(self.current_node)
self.current_result = self.current_node.run()
result = self.current_result.write_context(self.current_node, self)
if result is not None:
for r in result:
if self.is_result():
yield self.get_chunk_content(r)
if not self.has_next_node(self.current_result):
yield self.get_chunk_content('', True)
break
self.work_flow_post_handler.handler(self.params['chat_id'], self.params['chat_record_id'],
self.answer,
self)
except Exception as e:
self.current_node.get_write_error_context(e)
self.answer += str(e)
self.work_flow_post_handler.handler(self.params['chat_id'], self.params['chat_record_id'],
self.answer,
self)
yield self.get_chunk_content(str(e), True)
def is_result(self):
"""
判断是否是返回节点
@return:
"""
return self.current_node.node_params.get('is_result', not self.has_next_node(
self.current_result)) if self.current_node.node_params is not None else False
def get_chunk_content(self, chunk, is_end=False):
return 'data: ' + json.dumps(
{'chat_id': self.params['chat_id'], 'id': self.params['chat_record_id'], 'operate': True,
'content': chunk, 'is_end': is_end}, ensure_ascii=False) + "\n\n"
def has_next_node(self, node_result: NodeResult | None):
"""

View File

@ -28,7 +28,6 @@ from application.models.api_key_model import ApplicationAccessToken
from application.serializers.application_serializers import ModelDatasetAssociation, DatasetSettingSerializer, \
ModelSettingSerializer
from application.serializers.chat_message_serializers import ChatInfo
from common.config.embedding_config import ModelManage
from common.constants.permission_constants import RoleConstants
from common.db.search import native_search, native_page_search, page_search, get_dynamics_model
from common.event import ListenerManagement
@ -40,8 +39,6 @@ from common.util.lock import try_lock, un_lock
from dataset.models import Document, Problem, Paragraph, ProblemParagraphMapping
from dataset.serializers.common_serializers import get_embedding_model_by_dataset_id
from dataset.serializers.paragraph_serializers import ParagraphSerializers
from setting.models import Model
from setting.models_provider import get_model
from smartdoc.conf import PROJECT_DIR
chat_cache = caches['chat_cache']
@ -312,7 +309,8 @@ class ChatSerializers(serializers.Serializer):
chat_id = str(uuid.uuid1())
model_id = self.data.get('model_id')
dataset_id_list = self.data.get('dataset_id_list')
application = Application(id=None, dialogue_number=3, model_id=model_id,
dialogue_number = 3 if self.data.get('multiple_rounds_dialogue', False) else 0
application = Application(id=None, dialogue_number=dialogue_number, model_id=model_id,
dataset_setting=self.data.get('dataset_setting'),
model_setting=self.data.get('model_setting'),
problem_optimization=self.data.get('problem_optimization'),

View File

@ -18,7 +18,7 @@ class ModelManage:
@staticmethod
def get_model(_id, get_model):
model_instance = ModelManage.cache.get(_id)
if model_instance is None:
if model_instance is None or not model_instance.is_cache_model():
model_instance = get_model(_id)
ModelManage.cache.set(_id, model_instance, timeout=60 * 30)
return model_instance

View File

@ -110,11 +110,16 @@ class ListenerManagement:
@embedding_poxy
def embedding_by_paragraph_data_list(data_list, paragraph_id_list, embedding_model: Embeddings):
max_kb.info(f'开始--->向量化段落:{paragraph_id_list}')
status = Status.success
try:
# 删除段落
VectorStore.get_embedding_vector().delete_by_paragraph_ids(paragraph_id_list)
def is_save_function():
return QuerySet(Paragraph).filter(id__in=paragraph_id_list).exists()
# 批量向量化
VectorStore.get_embedding_vector().batch_save(data_list, embedding_model)
VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_save_function)
except Exception as e:
max_kb_error.error(f'向量化段落:{paragraph_id_list}出现错误{str(e)}{traceback.format_exc()}')
status = Status.error
@ -141,8 +146,12 @@ class ListenerManagement:
os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql')))
# 删除段落
VectorStore.get_embedding_vector().delete_by_paragraph_id(paragraph_id)
def is_save_function():
return QuerySet(Paragraph).filter(id=paragraph_id).exists()
# 批量向量化
VectorStore.get_embedding_vector().batch_save(data_list, embedding_model)
VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_save_function)
except Exception as e:
max_kb_error.error(f'向量化段落:{paragraph_id}出现错误{str(e)}{traceback.format_exc()}')
status = Status.error
@ -175,8 +184,12 @@ class ListenerManagement:
os.path.join(PROJECT_DIR, "apps", "common", 'sql', 'list_embedding_text.sql')))
# 删除文档向量数据
VectorStore.get_embedding_vector().delete_by_document_id(document_id)
def is_save_function():
return QuerySet(Document).filter(id=document_id).exists()
# 批量向量化
VectorStore.get_embedding_vector().batch_save(data_list, embedding_model)
VectorStore.get_embedding_vector().batch_save(data_list, embedding_model, is_save_function)
except Exception as e:
max_kb_error.error(f'向量化文档:{document_id}出现错误{str(e)}{traceback.format_exc()}')
status = Status.error
@ -258,7 +271,7 @@ class ListenerManagement:
@staticmethod
def update_problem(args: UpdateProblemArgs):
problem_paragraph_mapping_list = QuerySet(ProblemParagraphMapping).filter(problem_id=args.problem_id)
embed_value = VectorStore.get_embedding_vector().embed_query(args.problem_content)
embed_value = args.embedding_model.embed_query(args.problem_content)
VectorStore.get_embedding_vector().update_by_source_ids([v.id for v in problem_paragraph_mapping_list],
{'embedding': embed_value})

View File

@ -31,6 +31,7 @@ def handle_sheet(file_name, sheet):
problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0]
title = get_row_value(row, title_row_index_dict, 'title')
title = str(title) if title is not None else ''
content = str(content)
paragraph_list.append({'title': title[0:255],
'content': content[0:4096],
'problem_list': problem_list})

View File

@ -33,7 +33,7 @@ def handle_sheet(file_name, sheet):
problem_list = [{'content': p[0:255]} for p in problem.split('\n') if len(p.strip()) > 0]
title = get_row_value(row, title_row_index_dict, 'title')
title = str(title.value) if title is not None and title.value is not None else ''
content = content.value
content = str(content.value)
paragraph_list.append({'title': title[0:255],
'content': content[0:4096],
'problem_list': problem_list})

View File

@ -27,7 +27,7 @@ def get_level_block(text, level_content_list, level_content_index, cursor):
level_content_list) else None
start_index = text.index(start_content, cursor)
end_index = text.index(next_content, start_index + 1) if next_content is not None else len(text)
return text[start_index+len(start_content):end_index], end_index
return text[start_index + len(start_content):end_index], end_index
def to_tree_obj(content, state='title'):
@ -303,17 +303,20 @@ class SplitModel:
level_content_list.insert(0, to_tree_obj(""))
cursor = 0
for i in range(len(level_content_list)):
block, cursor = get_level_block(text, level_content_list, i, cursor)
level_title_content_list = [item for item in level_content_list if item.get('state') == 'title']
for i in range(len(level_title_content_list)):
start_content: str = level_title_content_list[i].get('content')
if cursor < text.index(start_content, cursor):
level_content_list.insert(0, to_tree_obj(text[cursor: text.index(start_content, cursor)], 'block'))
block, cursor = get_level_block(text, level_title_content_list, i, cursor)
if len(block) == 0:
level_content_list[i]['children'] = [to_tree_obj("", "block")]
continue
children = self.parse_to_tree(text=block, index=index + 1)
level_content_list[i]['children'] = children
level_title_content_list[i]['children'] = children
first_child_idx_in_block = block.lstrip().index(children[0]["content"].lstrip())
if first_child_idx_in_block != 0:
inner_children = self.parse_to_tree(block[:first_child_idx_in_block], index + 1)
level_content_list[i]['children'].extend(inner_children)
level_title_content_list[i]['children'].extend(inner_children)
return level_content_list
def parse(self, text: str):

View File

@ -463,6 +463,7 @@ class DataSetSerializers(serializers.ModelSerializer):
dataset = DataSet(
**{'id': dataset_id, 'name': instance.get("name"), 'desc': instance.get('desc'), 'user_id': user_id,
'type': Type.web,
'embedding_mode_id': instance.get('embedding_mode_id'),
'meta': {'source_url': instance.get('source_url'), 'selector': instance.get('selector'),
'embedding_mode_id': instance.get('embedding_mode_id')}})
dataset.save()

View File

@ -365,7 +365,7 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
if document.type != Type.web:
return True
try:
document.status = Status.embedding
document.status = Status.queue_up
document.save()
source_url = document.meta.get('source_url')
selector_list = document.meta.get('selector').split(

View File

@ -84,9 +84,9 @@ class BaseVectorStore(ABC):
chunk_list = chunk_data(data)
result = sub_array(chunk_list)
for child_array in result:
self._batch_save(child_array, embedding)
self._batch_save(child_array, embedding, lambda: True)
def batch_save(self, data_list: List[Dict], embedding: Embeddings):
def batch_save(self, data_list: List[Dict], embedding: Embeddings, is_save_function):
# 获取锁
lock.acquire()
try:
@ -100,7 +100,10 @@ class BaseVectorStore(ABC):
chunk_list = chunk_data_list(data_list)
result = sub_array(chunk_list)
for child_array in result:
self._batch_save(child_array, embedding)
if is_save_function():
self._batch_save(child_array, embedding, is_save_function)
else:
break
finally:
# 释放锁
lock.release()
@ -113,7 +116,7 @@ class BaseVectorStore(ABC):
pass
@abstractmethod
def _batch_save(self, text_list: List[Dict], embedding: Embeddings):
def _batch_save(self, text_list: List[Dict], embedding: Embeddings, is_save_function):
pass
def search(self, query_text, dataset_id_list: list[str], exclude_document_id_list: list[str],

View File

@ -55,7 +55,7 @@ class PGVector(BaseVectorStore):
embedding.save()
return True
def _batch_save(self, text_list: List[Dict], embedding: Embeddings):
def _batch_save(self, text_list: List[Dict], embedding: Embeddings, is_save_function):
texts = [row.get('text') for row in text_list]
embeddings = embedding.embed_documents(texts)
embedding_list = [Embedding(id=uuid.uuid1(),
@ -68,7 +68,8 @@ class PGVector(BaseVectorStore):
embedding=embeddings[index],
search_vector=to_ts_vector(text_list[index]['text'])) for index in
range(0, len(text_list))]
QuerySet(Embedding).bulk_create(embedding_list) if len(embedding_list) > 0 else None
if is_save_function():
QuerySet(Embedding).bulk_create(embedding_list) if len(embedding_list) > 0 else None
return True
def hit_test(self, query_text, dataset_id_list: list[str], exclude_document_id_list: list[str], top_number: int,

View File

@ -89,6 +89,10 @@ class MaxKBBaseModel(ABC):
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
pass
@staticmethod
def is_cache_model():
return True
class BaseModelCredential(ABC):

View File

@ -18,9 +18,10 @@ from setting.models_provider.base_model_provider import MaxKBBaseModel
def get_base_url(url: str):
parse = urlparse(url)
return ParseResult(scheme=parse.scheme, netloc=parse.netloc, path='', params='',
query='',
fragment='').geturl()
result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
query='',
fragment='').geturl()
return result_url[:-1] if result_url.endswith("/") else result_url
class OllamaChatModel(MaxKBBaseModel, ChatOpenAI):
@ -28,7 +29,8 @@ class OllamaChatModel(MaxKBBaseModel, ChatOpenAI):
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
api_base = model_credential.get('api_base', '')
base_url = get_base_url(api_base)
return OllamaChatModel(model=model_name, openai_api_base=(base_url + '/v1'),
base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1')
return OllamaChatModel(model=model_name, openai_api_base=base_url,
openai_api_key=model_credential.get('api_key'))
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:

View File

@ -113,9 +113,10 @@ model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_
def get_base_url(url: str):
parse = urlparse(url)
return ParseResult(scheme=parse.scheme, netloc=parse.netloc, path='', params='',
query='',
fragment='').geturl()
result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='',
query='',
fragment='').geturl()
return result_url[:-1] if result_url.endswith("/") else result_url
def convert_to_down_model_chunk(row_str: str, chunk_index: int):

View File

@ -20,6 +20,9 @@ from setting.models_provider.base_model_provider import MaxKBBaseModel
class XFChatSparkLLM(MaxKBBaseModel, ChatSparkLLM):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):

View File

@ -65,7 +65,7 @@ class ModelSerializer(serializers.Serializer):
class Query(serializers.Serializer):
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
name = serializers.CharField(required=False, max_length=20,
name = serializers.CharField(required=False, max_length=64,
error_messages=ErrMessage.char("模型名称"))
model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型"))
@ -99,7 +99,7 @@ class ModelSerializer(serializers.Serializer):
class Edit(serializers.Serializer):
user_id = serializers.CharField(required=False, error_messages=ErrMessage.uuid("用户id"))
name = serializers.CharField(required=False, max_length=20,
name = serializers.CharField(required=False, max_length=64,
error_messages=ErrMessage.char("模型名称"))
model_type = serializers.CharField(required=False, error_messages=ErrMessage.char("模型类型"))
@ -142,7 +142,7 @@ class ModelSerializer(serializers.Serializer):
class Create(serializers.Serializer):
user_id = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户id"))
name = serializers.CharField(required=True, max_length=20, error_messages=ErrMessage.char("模型名称"))
name = serializers.CharField(required=True, max_length=64, error_messages=ErrMessage.char("模型名称"))
provider = serializers.CharField(required=True, error_messages=ErrMessage.char("供应商"))

View File

@ -3,13 +3,15 @@ import mimetypes
import os
from pathlib import Path
from PIL import Image
from ..const import CONFIG, PROJECT_DIR
mimetypes.add_type("text/css", ".css", True)
mimetypes.add_type("text/javascript", ".js", True)
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
Image.MAX_IMAGE_PIXELS = 20000000000
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/4.2/howto/deployment/checklist/

View File

@ -104,7 +104,7 @@
</div>
<div class="card-never border-r-4 mt-8">
<h5 class="p-8-12">本次对话</h5>
<div class="p-8-12 border-t-dashed lighter pre-line">
<div class="p-8-12 border-t-dashed lighter pre-wrap">
{{ item.question || '-' }}
</div>
</div>

View File

@ -42,7 +42,7 @@
</AppAvatar>
</div>
<div class="content">
<div class="text break-all">
<div class="text break-all pre-wrap">
{{ item.problem_text }}
</div>
</div>
@ -607,7 +607,7 @@ watch(
onMounted(() => {
setTimeout(() => {
if (quickInputRef.value) {
if (quickInputRef.value && mode === 'embed') {
quickInputRef.value.textarea.style.height = '0'
}
}, 1800)

View File

@ -48,10 +48,10 @@ export default {
upload: 'Upload',
default: 'Default Logo',
custom: 'Custom',
sizeTip: 'Suggested size 32*32, supports ico, png, size no more than 200KB',
sizeTip: 'Suggested size 32*32, supports jpg, png, gif, size no more than 10 MB',
cancel: 'Cancel',
save: 'Save',
fileSizeExceeded: 'File size exceeds 200KB',
fileSizeExceeded: 'File size exceeds 10 MB',
setSuccess: 'Setting Successful',
uploadImagePrompt: 'Please upload an image'
},

View File

@ -42,15 +42,15 @@ export default {
disabledSuccess: '已禁用'
},
EditAvatarDialog: {
title: '编辑logo',
title: '应用头像',
customizeUpload: '自定义上传',
upload: '上传',
default: '默认logo',
custom: '自定义',
sizeTip: '建议尺寸 32*32支持 ico、png大小不超过200KB',
sizeTip: '建议尺寸 32*32支持 JPG、PNG, GIF大小不超过 10 MB',
cancel: '取消',
save: '保存',
fileSizeExceeded: '文件大小超过 200KB',
fileSizeExceeded: '文件大小超过 10 MB',
setSuccess: '设置成功',
uploadImagePrompt: '请上传一张图片'
},

View File

@ -341,8 +341,8 @@ h5 {
word-break: break-all;
}
.pre-line {
white-space: pre-line;
.pre-wrap {
white-space: pre-wrap;
}
/*

View File

@ -47,7 +47,7 @@
}}</el-button>
<template #tip>
<div class="el-upload__tip info" style="margin-top: 0">
建议尺寸 32*32支持 JPGPNG大小不超过 200 KB
建议尺寸 32*32支持 JPGPNG, GIF大小不超过 10 MB
</div>
</template>
</el-upload>
@ -84,7 +84,7 @@
}}</el-button>
<template #tip>
<div class="el-upload__tip info" style="margin-top: 0">
建议尺寸 32*32支持 JPGPNG大小不超过 200 KB
建议尺寸 32*32支持 JPGPNG, GIF大小不超过 10 MB
</div>
</template>
</el-upload>
@ -178,8 +178,8 @@ function resetForm() {
}
const onChange = (file: any, fileList: UploadFiles, attr: string) => {
//1 200KB
const isLimit = file?.size / 1024 < 200
//1 10 MB
const isLimit = file?.size / 1024 / 1024 < 10
if (!isLimit) {
// @ts-ignore
MsgError(t('views.applicationOverview.appInfo.EditAvatarDialog.fileSizeExceeded'))

View File

@ -1,9 +1,12 @@
<template>
<el-dialog :title="$t('views.applicationOverview.appInfo.EditAvatarDialog.title')" v-model="dialogVisible">
<el-dialog
:title="$t('views.applicationOverview.appInfo.EditAvatarDialog.title')"
v-model="dialogVisible"
>
<el-radio-group v-model="radioType" class="radio-block mb-16">
<div>
<el-radio value="default">
<p>{{$t('views.applicationOverview.appInfo.EditAvatarDialog.default')}}</p>
<p>{{ $t('views.applicationOverview.appInfo.EditAvatarDialog.default') }}</p>
<AppAvatar
v-if="detail?.name"
:name="detail?.name"
@ -16,7 +19,7 @@
</div>
<div class="mt-8">
<el-radio value="custom">
<p>{{$t('views.applicationOverview.appInfo.EditAvatarDialog.customizeUpload')}}</p>
<p>{{ $t('views.applicationOverview.appInfo.EditAvatarDialog.customizeUpload') }}</p>
<div class="flex mt-8">
<AppAvatar
v-if="fileURL"
@ -35,19 +38,25 @@
accept="image/*"
:on-change="onChange"
>
<el-button icon="Upload" :disabled="radioType !== 'custom'">{{$t('views.applicationOverview.appInfo.EditAvatarDialog.upload')}}</el-button>
<el-button icon="Upload" :disabled="radioType !== 'custom'">{{
$t('views.applicationOverview.appInfo.EditAvatarDialog.upload')
}}</el-button>
</el-upload>
</div>
<div class="el-upload__tip info mt-16">
{{$t('views.applicationOverview.appInfo.EditAvatarDialog.sizeTip')}}
{{ $t('views.applicationOverview.appInfo.EditAvatarDialog.sizeTip') }}
</div>
</el-radio>
</div>
</el-radio-group>
<template #footer>
<span class="dialog-footer">
<el-button @click.prevent="dialogVisible = false"> {{$t('views.applicationOverview.appInfo.EditAvatarDialog.cancel')}}</el-button>
<el-button type="primary" @click="submit" :loading="loading"> {{$t('views.applicationOverview.appInfo.EditAvatarDialog.save')}}</el-button>
<el-button @click.prevent="dialogVisible = false">
{{ $t('views.applicationOverview.appInfo.EditAvatarDialog.cancel') }}</el-button
>
<el-button type="primary" @click="submit" :loading="loading">
{{ $t('views.applicationOverview.appInfo.EditAvatarDialog.save') }}</el-button
>
</span>
</template>
</el-dialog>
@ -94,17 +103,16 @@ const open = (data: any) => {
}
const onChange = (file: any) => {
//1 200KB
const isLimit = file?.size / 1024 < 200
//110MB
const isLimit = file?.size / 1024 / 1024 < 10
if (!isLimit) {
// @ts-ignore
MsgError(t('views.applicationOverview.appInfo.EditAvatarDialog.fileSizeExceeded'))
return false
} else {
} else {
iconFile.value = file
fileURL.value = URL.createObjectURL(file.raw)
}
}
function submit() {

View File

@ -42,7 +42,7 @@
<AppIcon iconName="app-copy"></AppIcon>
</el-button>
</div>
<div class="mt-8 white-space">
<div class="mt-8 pre-wrap">
{{ source2 }}
</div>
</div>
@ -109,9 +109,6 @@ defineExpose({ open })
font-size: 13px;
white-space: pre;
height: 180px;
.white-space {
white-space: pre-wrap;
}
}
}
</style>

View File

@ -36,7 +36,7 @@
@mousedown="onmousedown(item)"
>
<component :is="iconComponent(`${item.type}-icon`)" class="mr-8 mt-4" :size="32" />
<div class="pre-line">
<div class="pre-wrap">
<div class="lighter">{{ item.label }}</div>
<el-text type="info" size="small">{{ item.text }}</el-text>
</div>
@ -114,6 +114,7 @@ import { MsgSuccess, MsgConfirm, MsgError } from '@/utils/message'
import { datetimeFormat } from '@/utils/time'
import useStore from '@/stores'
import { WorkFlowInstance } from '@/workflow/common/validate'
import { hasPermission } from '@/utils/permission'
const { user, application } = useStore()
const router = useRouter()
@ -250,7 +251,9 @@ const closeInterval = () => {
onMounted(() => {
getDetail()
//
initInterval()
if (hasPermission(`APPLICATION:MANAGE:${id}`, 'AND')) {
initInterval()
}
})
onBeforeUnmount(() => {

View File

@ -38,7 +38,7 @@
</el-input>
</el-form-item>
</el-form>
<span v-else class="pre-line">{{ form?.content }}</span>
<span v-else class="pre-wrap">{{ form?.content }}</span>
</div>
</el-scrollbar>

View File

@ -20,10 +20,10 @@ const props = defineProps({
})
function zoomIn() {
props.lf?.zoom(true)
props.lf?.zoom(true, [0, 0])
}
function zoomOut() {
props.lf?.zoom(false)
props.lf?.zoom(false, [0, 0])
}
function fitView() {
props.lf?.resetZoom()

View File

@ -170,3 +170,13 @@ export const nodeDict: any = {
export function isWorkFlow(type: string | undefined) {
return type === 'WORK_FLOW'
}
export function isLastNode(nodeModel: any) {
const incoming = nodeModel.graphModel.getNodeIncomingNode(nodeModel.id)
const outcomming = nodeModel.graphModel.getNodeOutgoingNode(nodeModel.id)
if (incoming.length > 0 && outcomming.length === 0) {
return true
} else {
return false
}
}

View File

@ -115,13 +115,23 @@
</el-tooltip>
</div>
</template>
<el-input
<MdEditor
@wheel="wheel"
@keydown="isKeyDown = true"
@keyup="isKeyDown = false"
class="reply-node-editor"
style="height: 150px"
v-model="chat_data.prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
/>
:preview="false"
:toolbars="[]"
:footers="footers"
>
<template #defFooters>
<el-button text type="info" @click="openDialog">
<AppIcon iconName="app-magnify" style="font-size: 16px"></AppIcon>
</el-button>
</template>
</MdEditor>
</el-form-item>
<el-form-item label="历史聊天记录">
<el-input-number
@ -132,9 +142,34 @@
class="w-full"
/>
</el-form-item>
<el-form-item label="返回内容" @click.prevent>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>返回内容<span class="danger">*</span></span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content>
关闭后该节点的内容则不输出给用户
如果你想让用户看到该节点的输出内容请打开开关
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="chat_data.is_result" />
</el-form-item>
</el-form>
</el-card>
<!-- 回复内容弹出层 -->
<el-dialog v-model="dialogVisible" title="提示词" append-to-body>
<MdEditor v-model="cloneContent" :preview="false" :toolbars="[]" :footers="[]"> </MdEditor>
<template #footer>
<div class="dialog-footer mt-24">
<el-button type="primary" @click="submitDialog"> 确认 </el-button>
</div>
</template>
</el-dialog>
<!-- 添加模版 -->
<CreateModelDialog
ref="createModelRef"
@ -156,6 +191,7 @@ import applicationApi from '@/api/application'
import useStore from '@/stores'
import { relatedObject } from '@/utils/utils'
import type { Provider } from '@/api/type/model'
import { isLastNode } from '@/workflow/common/data'
const { model } = useStore()
const isKeyDown = ref(false)
@ -167,6 +203,17 @@ const wheel = (e: any) => {
return true
}
}
const dialogVisible = ref(false)
const cloneContent = ref('')
const footers: any = [null, '=', 0]
function openDialog() {
cloneContent.value = chat_data.value.prompt
dialogVisible.value = true
}
function submitDialog() {
set(props.nodeModel.properties.node_data, 'prompt', cloneContent.value)
dialogVisible.value = false
}
const {
params: { id }
} = app.config.globalProperties.$route as any
@ -180,7 +227,8 @@ const form = {
model_id: '',
system: '',
prompt: defaultPrompt,
dialogue_number: 1
dialogue_number: 1,
is_result: false
}
const chat_data = computed({
@ -240,7 +288,17 @@ const openCreateModel = (provider?: Provider) => {
onMounted(() => {
getProvider()
getModel()
if (isLastNode(props.nodeModel)) {
set(props.nodeModel.properties.node_data, 'is_result', true)
}
set(props.nodeModel, 'validate', validate)
})
</script>
<style lang="scss" scoped></style>
<style lang="scss" scoped>
.reply-node-editor {
:deep(.md-editor-footer) {
border: none !important;
}
}
</style>

View File

@ -40,14 +40,32 @@
</el-form-item>
<el-form-item label="开场白">
<MdEditor
@wheel="wheel"
@keydown="isKeyDown = true"
@keyup="isKeyDown = false"
style="height: 150px"
v-model="form_data.prologue"
:preview="false"
:toolbars="[]"
:footers="[]"
/>
class="reply-node-editor"
:footers="footers"
>
<template #defFooters>
<el-button text type="info" @click="openDialog">
<AppIcon iconName="app-magnify" style="font-size: 16px"></AppIcon>
</el-button> </template
></MdEditor>
</el-form-item>
</el-form>
<!-- 回复内容弹出层 -->
<el-dialog v-model="dialogVisible" title="开场白" append-to-body>
<MdEditor v-model="cloneContent" :preview="false" :toolbars="[]" :footers="[]"> </MdEditor>
<template #footer>
<div class="dialog-footer mt-24">
<el-button type="primary" @click="submitDialog"> 确认 </el-button>
</div>
</template>
</el-dialog>
</NodeContainer>
</template>
<script setup lang="ts">
@ -63,6 +81,26 @@ const form = {
prologue:
'您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?'
}
const isKeyDown = ref(false)
const wheel = (e: any) => {
if (isKeyDown.value) {
e.preventDefault()
} else {
e.stopPropagation()
return true
}
}
const dialogVisible = ref(false)
const cloneContent = ref('')
const footers: any = [null, '=', 0]
function openDialog() {
cloneContent.value = form_data.value.prologue
dialogVisible.value = true
}
function submitDialog() {
set(props.nodeModel.properties.node_data, 'prologue', cloneContent.value)
dialogVisible.value = false
}
const form_data = computed({
get: () => {
if (props.nodeModel.properties.node_data) {
@ -89,4 +127,10 @@ onMounted(() => {
set(props.nodeModel, 'validate', validate)
})
</script>
<style lang="scss" scoped></style>
<style lang="scss" scoped>
.reply-node-editor {
:deep(.md-editor-footer) {
border: none !important;
}
}
</style>

View File

@ -116,13 +116,23 @@
</el-tooltip>
</div>
</template>
<el-input
<MdEditor
@wheel="wheel"
@keydown="isKeyDown = true"
@keyup="isKeyDown = false"
class="reply-node-editor"
style="height: 150px"
v-model="form_data.prompt"
:rows="6"
type="textarea"
maxlength="2048"
:placeholder="defaultPrompt"
/>
:preview="false"
:toolbars="[]"
:footers="footers"
>
<template #defFooters>
<el-button text type="info" @click="openDialog">
<AppIcon iconName="app-magnify" style="font-size: 16px"></AppIcon>
</el-button>
</template>
</MdEditor>
</el-form-item>
<el-form-item label="历史聊天记录">
<el-input-number
@ -133,8 +143,34 @@
class="w-full"
/>
</el-form-item>
<el-form-item label="返回内容" @click.prevent>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>返回内容<span class="danger">*</span></span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content>
关闭后该节点的内容则不输出给用户
如果你想让用户看到该节点的输出内容请打开开关
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="form_data.is_result" />
</el-form-item>
</el-form>
</el-card>
<!-- 回复内容弹出层 -->
<el-dialog v-model="dialogVisible" title="提示词" append-to-body>
<MdEditor v-model="cloneContent" :preview="false" :toolbars="[]" :footers="[]"> </MdEditor>
<template #footer>
<div class="dialog-footer mt-24">
<el-button type="primary" @click="submitDialog"> 确认 </el-button>
</div>
</template>
</el-dialog>
<!-- 添加模版 -->
<CreateModelDialog
ref="createModelRef"
@ -156,6 +192,8 @@ import applicationApi from '@/api/application'
import useStore from '@/stores'
import { relatedObject } from '@/utils/utils'
import type { Provider } from '@/api/type/model'
import { isLastNode } from '@/workflow/common/data'
const { model } = useStore()
const isKeyDown = ref(false)
const wheel = (e: any) => {
@ -166,6 +204,17 @@ const wheel = (e: any) => {
return true
}
}
const dialogVisible = ref(false)
const cloneContent = ref('')
const footers: any = [null, '=', 0]
function openDialog() {
cloneContent.value = form_data.value.prompt
dialogVisible.value = true
}
function submitDialog() {
set(props.nodeModel.properties.node_data, 'prompt', cloneContent.value)
dialogVisible.value = false
}
const {
params: { id }
} = app.config.globalProperties.$route as any
@ -177,7 +226,8 @@ const form = {
model_id: '',
system: '你是一个问题优化大师',
prompt: defaultPrompt,
dialogue_number: 1
dialogue_number: 1,
is_result: false
}
const form_data = computed({
@ -237,7 +287,16 @@ const openCreateModel = (provider?: Provider) => {
onMounted(() => {
getProvider()
getModel()
if (isLastNode(props.nodeModel)) {
set(props.nodeModel.properties.node_data, 'is_result', true)
}
set(props.nodeModel, 'validate', validate)
})
</script>
<style lang="scss" scoped></style>
<style lang="scss" scoped>
.reply-node-editor {
:deep(.md-editor-footer) {
border: none !important;
}
}
</style>

View File

@ -16,7 +16,12 @@
<template #label>
<div class="flex-between">
<span>回复内容</span>
<el-select v-model="form_data.reply_type" size="small" style="width: 85px">
<el-select
:teleported="false"
v-model="form_data.reply_type"
size="small"
style="width: 85px"
>
<el-option label="引用变量" value="referencing" />
<el-option label="自定义" value="content" />
</el-select>
@ -24,6 +29,9 @@
</template>
<MdEditor
v-if="form_data.reply_type === 'content'"
@wheel="wheel"
@keydown="isKeyDown = true"
@keyup="isKeyDown = false"
class="reply-node-editor"
style="height: 150px"
v-model="form_data.content"
@ -46,6 +54,23 @@
v-model="form_data.fields"
/>
</el-form-item>
<el-form-item label="返回内容" @click.prevent>
<template #label>
<div class="flex align-center">
<div class="mr-4">
<span>返回内容<span class="danger">*</span></span>
</div>
<el-tooltip effect="dark" placement="right" popper-class="max-w-200">
<template #content>
关闭后该节点的内容则不输出给用户
如果你想让用户看到该节点的输出内容请打开开关
</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
</el-tooltip>
</div>
</template>
<el-switch size="small" v-model="form_data.is_result" />
</el-form-item>
</el-form>
</el-card>
<!-- 回复内容弹出层 -->
@ -64,12 +89,23 @@ import { set } from 'lodash'
import NodeContainer from '@/workflow/common/NodeContainer.vue'
import NodeCascader from '@/workflow/common/NodeCascader.vue'
import { ref, computed, onMounted } from 'vue'
import { isLastNode } from '@/workflow/common/data'
const props = defineProps<{ nodeModel: any }>()
const isKeyDown = ref(false)
const wheel = (e: any) => {
if (isKeyDown.value) {
e.preventDefault()
} else {
e.stopPropagation()
return true
}
}
const form = {
reply_type: 'content',
content: '',
fields: []
fields: [],
is_result: false
}
const footers: any = [null, '=', 0]
@ -111,6 +147,10 @@ const validate = () => {
}
onMounted(() => {
if (isLastNode(props.nodeModel)) {
set(props.nodeModel.properties.node_data, 'is_result', true)
}
set(props.nodeModel, 'validate', validate)
})
</script>