Compare commits

...

28 Commits
main ... v1.8.1

Author SHA1 Message Date
shaohuzhang1 16ecccbd31 fix: The document list status filtering is incorrect. (#1822)
Some checks failed
sync2gitee / repo-sync (push) Has been cancelled
(cherry picked from commit 36f1a3ba64)
2024-12-11 20:11:23 +08:00
shaohuzhang1 5c6e1ada42 fix: In the dialogue, the form nodes of the sub-application are not displayed as separate cards. (#1821)
(cherry picked from commit 37c963b4ad)
2024-12-11 20:11:17 +08:00
wangdan-fit2cloud 6583d8d595 fix: 基础组件增加过滤提示
(cherry picked from commit eaf31fd3e7)
2024-12-11 20:11:07 +08:00
wangdan-fit2cloud 3a7a14f590 fix: 修复嵌入式样式冲突问题
(cherry picked from commit 7a76f73682)
2024-12-11 20:10:59 +08:00
shaohuzhang1 b40336f9d1 fix: 修复函数库响应数据在日志中不显示问题 (#1814)
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
(cherry picked from commit f9437ca9b8)
2024-12-11 14:15:07 +08:00
wangdan-fit2cloud ce78192016 fix: 应用分页从20改为30 2024-12-11 11:28:42 +08:00
CaptainB 76a65d6ef1 fix: 处理回复中的html标签和内容
(cherry picked from commit daf27a76ab)
2024-12-11 11:25:08 +08:00
wxg0103 d6f36b3263 fix: 修复批量导入网页地址swagger上没有填写请求体的地方的缺陷
--bug=1050206 --user=王孝刚 【github#1809】【知识库】web知识库,批量导入网页地址时,抓到的接口有请求体,但是swagger上没有填写请求体的地方。 https://www.tapd.cn/57709429/s/1628286

(cherry picked from commit 2a64f63073)
2024-12-11 11:25:00 +08:00
CaptainB c745e29bfb fix: 处理回复中的html标签和内容
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
(cherry picked from commit 06a5f6a0de)
2024-12-10 18:53:47 +08:00
CaptainB 486261b9fc fix: 处理回复中的html标签和内容
(cherry picked from commit 2178a81c65)
2024-12-10 18:53:45 +08:00
shaohuzhang1 8568511b21 fix: 文档导出权限错误 (#1807)
Co-authored-by: wangdan-fit2cloud <dan.wang@fit2cloud.com>
(cherry picked from commit ec27e57f2c)
2024-12-10 18:53:45 +08:00
shaohuzhang1 f20ce4c1d2 fix: 修复工作流对话bug (#1806)
(cherry picked from commit 5e42dcad09)
2024-12-10 17:05:13 +08:00
CaptainB 7fa38677f7 fix: 语音播放去掉form_rander相关文本
(cherry picked from commit fa3817574b)
2024-12-10 17:05:04 +08:00
wangdan-fit2cloud 0a70ce398f fix: 修复非管理权限下的导出和状态改变
(cherry picked from commit 8d9a14a15f)
2024-12-10 17:05:03 +08:00
wxg0103 347689efef fix: 修复历史记录不显示图片的缺陷
--bug=1050151 --user=王孝刚 【应用】演示界面、全屏嵌入、浮窗嵌入历史记录和对话日志中不显示上传的图片 https://www.tapd.cn/57709429/s/1627628

(cherry picked from commit adad526a4f)
2024-12-10 17:05:01 +08:00
shaohuzhang1 d5c701277b fix: 修复嵌入图片过大指引位置错误 (#1802)
(cherry picked from commit 672cb7dd34)
2024-12-10 17:05:01 +08:00
shaohuzhang1 a7f7189df8 fix: 修复文档状态无法过滤 (#1800)
(cherry picked from commit aa94f66492)
2024-12-10 17:04:50 +08:00
shaohuzhang1 87f3c88fd2 fix: 修复前端路由警告 (#1797)
(cherry picked from commit 04f34f796e)
2024-12-10 17:04:49 +08:00
shaohuzhang1 a3a3dc95ad fix: 修复前端路由警告 (#1795)
(cherry picked from commit dc400b506d)
2024-12-10 17:04:48 +08:00
shaohuzhang1 882d577450 feat: 支持工作流ai对话节点添加节点上下文 (#1791)
Some checks are pending
sync2gitee / repo-sync (push) Waiting to run
(cherry picked from commit f65546a619)
2024-12-09 14:10:43 +08:00
wangdan-fit2cloud 960132af46 fix: 优化下拉框字重
(cherry picked from commit 28a2c9897d)
2024-12-09 14:10:32 +08:00
CaptainB 49070a5a75 feat: 支持智谱图片理解模型
(cherry picked from commit acebb7b366)
2024-12-09 14:10:30 +08:00
wxg0103 488f42a9e4 fix: 企业微信扫码信息配置错误
--bug=1050048 --user=王孝刚 【企业微信-扫码登录】配置信息错误 https://www.tapd.cn/57709429/s/1626175

(cherry picked from commit 88b565fe6d)
2024-12-09 14:10:29 +08:00
wxg0103 6f0877acf1 fix: 企业微信扫码信息配置错误
--bug=1050048 --user=王孝刚 【企业微信-扫码登录】配置信息错误 https://www.tapd.cn/57709429/s/1626175

(cherry picked from commit b87c214cb5)
2024-12-09 14:10:28 +08:00
shaohuzhang1 24e9184c61 build: 修复环境变量 (#1780)
(cherry picked from commit 40a1070b81)
2024-12-09 14:10:27 +08:00
CaptainB 9b1a497925 fix: 处理某些pdf中不包括目录和内部链接不能完整导入的问题
(cherry picked from commit fb8b96779c)
2024-12-09 14:10:26 +08:00
shaohuzhang1 1a5bb20871 fix: 修复阿里百炼项目模型v3调用方式 (#1773)
(cherry picked from commit 7c529c281c)
2024-12-09 14:10:25 +08:00
CaptainB a12e3f7db7 fix: 语音播放去掉formrander相关文本
(cherry picked from commit 7afbaba4ce)
2024-12-09 14:10:25 +08:00
41 changed files with 548 additions and 173 deletions

View File

@ -0,0 +1,21 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file common.py
@date2024/12/11 17:57
@desc:
"""
class Answer:
def __init__(self, content, view_type, runtime_node_id, chat_record_id, child_node):
self.view_type = view_type
self.content = content
self.runtime_node_id = runtime_node_id
self.chat_record_id = chat_record_id
self.child_node = child_node
def to_dict(self):
return {'view_type': self.view_type, 'content': self.content, 'runtime_node_id': self.runtime_node_id,
'chat_record_id': self.chat_record_id, 'child_node': self.child_node}

View File

@ -17,6 +17,7 @@ from django.db.models import QuerySet
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, ErrorDetail
from application.flow.common import Answer
from application.models import ChatRecord
from application.models.api_key_model import ApplicationPublicAccessClient
from common.constants.authentication_type import AuthenticationType
@ -151,11 +152,11 @@ class INode:
def save_context(self, details, workflow_manage):
pass
def get_answer_text(self):
def get_answer_list(self) -> List[Answer] | None:
if self.answer_text is None:
return None
return {'content': self.answer_text, 'runtime_node_id': self.runtime_node_id,
'chat_record_id': self.workflow_params['chat_record_id']}
return [
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {})]
def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None,
get_node_params=lambda node: node.properties.get('node_data')):

View File

@ -26,6 +26,8 @@ class ChatNodeSerializer(serializers.Serializer):
model_params_setting = serializers.DictField(required=False, error_messages=ErrMessage.integer("模型参数相关设置"))
dialogue_type = serializers.CharField(required=True, error_messages=ErrMessage.char("上下文类型"))
class IChatNode(INode):
type = 'ai-chat-node'
@ -39,5 +41,6 @@ class IChatNode(INode):
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id,
chat_record_id,
model_params_setting=None,
dialogue_type=None,
**kwargs) -> NodeResult:
pass

View File

@ -12,7 +12,7 @@ from typing import List, Dict
from django.db.models import QuerySet
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import BaseMessage
from langchain_core.messages import BaseMessage, AIMessage
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
@ -72,6 +72,22 @@ def get_default_model_params_setting(model_id):
return model_params_setting
def get_node_message(chat_record, runtime_node_id):
node_details = chat_record.get_node_details_runtime_node_id(runtime_node_id)
if node_details is None:
return []
return [HumanMessage(node_details.get('question')), AIMessage(node_details.get('answer'))]
def get_workflow_message(chat_record):
return [chat_record.get_human_message(), chat_record.get_ai_message()]
def get_message(chat_record, dialogue_type, runtime_node_id):
return get_node_message(chat_record, runtime_node_id) if dialogue_type == 'NODE' else get_workflow_message(
chat_record)
class BaseChatNode(IChatNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
@ -80,12 +96,17 @@ class BaseChatNode(IChatNode):
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
model_params_setting=None,
dialogue_type=None,
**kwargs) -> NodeResult:
if dialogue_type is None:
dialogue_type = 'WORKFLOW'
if model_params_setting is None:
model_params_setting = get_default_model_params_setting(model_id)
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type,
self.runtime_node_id)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question.content
@ -103,10 +124,10 @@ class BaseChatNode(IChatNode):
_write_context=write_context)
@staticmethod
def get_history_message(history_chat_record, dialogue_number):
def get_history_message(history_chat_record, dialogue_number, dialogue_type, runtime_node_id):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
get_message(history_chat_record[index], dialogue_type, runtime_node_id)
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message

View File

@ -1,9 +1,11 @@
# coding=utf-8
import json
import re
import time
import uuid
from typing import Dict
from typing import Dict, List
from application.flow.common import Answer
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.application_node.i_application_node import IApplicationNode
from application.models import Chat
@ -19,7 +21,8 @@ def _is_interrupt_exec(node, node_variable: Dict, workflow_variable: Dict):
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
result = node_variable.get('result')
node.context['child_node'] = node_variable.get('child_node')
node.context['application_node_dict'] = node_variable.get('application_node_dict')
node.context['node_dict'] = node_variable.get('node_dict', {})
node.context['is_interrupt_exec'] = node_variable.get('is_interrupt_exec')
node.context['message_tokens'] = result.get('usage', {}).get('prompt_tokens', 0)
node.context['answer_tokens'] = result.get('usage', {}).get('completion_tokens', 0)
@ -43,6 +46,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
answer = ''
usage = {}
node_child_node = {}
application_node_dict = node.context.get('application_node_dict', {})
is_interrupt_exec = False
for chunk in response:
# 先把流转成字符串
@ -52,6 +56,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
runtime_node_id = response_content.get('runtime_node_id', '')
chat_record_id = response_content.get('chat_record_id', '')
child_node = response_content.get('child_node')
view_type = response_content.get('view_type')
node_type = response_content.get('node_type')
real_node_id = response_content.get('real_node_id')
node_is_end = response_content.get('node_is_end', False)
@ -60,16 +65,32 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
answer += content
node_child_node = {'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
'child_node': child_node}
if real_node_id is not None:
application_node = application_node_dict.get(real_node_id, None)
if application_node is None:
application_node_dict[real_node_id] = {'content': content,
'runtime_node_id': runtime_node_id,
'chat_record_id': chat_record_id,
'child_node': child_node,
'index': len(application_node_dict),
'view_type': view_type}
else:
application_node['content'] += content
yield {'content': content,
'node_type': node_type,
'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
'child_node': child_node,
'real_node_id': real_node_id,
'node_is_end': node_is_end}
'node_is_end': node_is_end,
'view_type': view_type}
usage = response_content.get('usage', {})
node_variable['result'] = {'usage': usage}
node_variable['is_interrupt_exec'] = is_interrupt_exec
node_variable['child_node'] = node_child_node
node_variable['application_node_dict'] = application_node_dict
_write_context(node_variable, workflow_variable, node, workflow, answer)
@ -88,12 +109,43 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
_write_context(node_variable, workflow_variable, node, workflow, answer)
def reset_application_node_dict(application_node_dict, runtime_node_id, node_data):
try:
if application_node_dict is None:
return
for key in application_node_dict:
application_node = application_node_dict[key]
if application_node.get('runtime_node_id') == runtime_node_id:
content: str = application_node.get('content')
match = re.search('<form_rander>.*?</form_rander>', content)
if match:
form_setting_str = match.group().replace('<form_rander>', '').replace('</form_rander>', '')
form_setting = json.loads(form_setting_str)
form_setting['is_submit'] = True
form_setting['form_data'] = node_data
value = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
res = re.sub('<form_rander>.*?</form_rander>',
'${value}', content)
application_node['content'] = res.replace('${value}', value)
except Exception as e:
pass
class BaseApplicationNode(IApplicationNode):
def get_answer_text(self):
def get_answer_list(self) -> List[Answer] | None:
if self.answer_text is None:
return None
return {'content': self.answer_text, 'runtime_node_id': self.runtime_node_id,
'chat_record_id': self.workflow_params['chat_record_id'], 'child_node': self.context.get('child_node')}
application_node_dict = self.context.get('application_node_dict')
if application_node_dict is None:
return [
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'],
self.context.get('child_node'))]
else:
return [Answer(n.get('content'), n.get('view_type'), self.runtime_node_id,
self.workflow_params['chat_record_id'], {'runtime_node_id': n.get('runtime_node_id'),
'chat_record_id': n.get('chat_record_id')
, 'child_node': n.get('child_node')}) for n in
sorted(application_node_dict.values(), key=lambda item: item.get('index'))]
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
@ -122,6 +174,8 @@ class BaseApplicationNode(IApplicationNode):
runtime_node_id = child_node.get('runtime_node_id')
record_id = child_node.get('chat_record_id')
child_node_value = child_node.get('child_node')
application_node_dict = self.context.get('application_node_dict')
reset_application_node_dict(application_node_dict, runtime_node_id, node_data)
response = ChatMessageSerializer(
data={'chat_id': current_chat_id, 'message': message,
@ -179,5 +233,6 @@ class BaseApplicationNode(IApplicationNode):
'err_message': self.err_message,
'global_fields': global_fields,
'document_list': self.workflow_manage.document_list,
'image_list': self.workflow_manage.image_list
'image_list': self.workflow_manage.image_list,
'application_node_dict': self.context.get('application_node_dict')
}

View File

@ -8,10 +8,11 @@
"""
import json
import time
from typing import Dict
from typing import Dict, List
from langchain_core.prompts import PromptTemplate
from application.flow.common import Answer
from application.flow.i_step_node import NodeResult
from application.flow.step_node.form_node.i_form_node import IFormNode
@ -60,7 +61,7 @@ class BaseFormNode(IFormNode):
{'result': value, 'form_field_list': form_field_list, 'form_content_format': form_content_format}, {},
_write_context=write_context)
def get_answer_text(self):
def get_answer_list(self) -> List[Answer] | None:
form_content_format = self.context.get('form_content_format')
form_field_list = self.context.get('form_field_list')
form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id,
@ -70,8 +71,7 @@ class BaseFormNode(IFormNode):
form = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
value = prompt_template.format(form=form)
return {'content': value, 'runtime_node_id': self.runtime_node_id,
'chat_record_id': self.workflow_params['chat_record_id']}
return [Answer(value, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], None)]
def get_details(self, index: int, **kwargs):
form_content_format = self.context.get('form_content_format')

View File

@ -29,7 +29,7 @@ def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
result = str(step_variable['result']) + '\n'
yield result
workflow.answer += result
node.answer_text = result
node.context['run_time'] = time.time() - node.context['start_time']
@ -94,6 +94,7 @@ class BaseFunctionLibNodeNode(IFunctionLibNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
self.answer_text = details.get('result')
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
if not function_lib.is_active:

View File

@ -27,7 +27,7 @@ def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
result = str(step_variable['result']) + '\n'
yield result
workflow.answer += result
node.answer_text = result
node.context['run_time'] = time.time() - node.context['start_time']

View File

@ -19,6 +19,7 @@ from rest_framework import status
from rest_framework.exceptions import ErrorDetail, ValidationError
from application.flow import tools
from application.flow.common import Answer
from application.flow.i_step_node import INode, WorkFlowPostHandler, NodeResult
from application.flow.step_node import get_node
from common.exception.app_exception import AppApiException
@ -302,6 +303,9 @@ class WorkflowManage:
get_node_params=get_node_params)
self.start_node.valid_args(
{**self.start_node.node_params, 'form_data': start_node_data}, self.start_node.workflow_params)
if self.start_node.type == 'application-node':
application_node_dict = node_details.get('application_node_dict', {})
self.start_node.context['application_node_dict'] = application_node_dict
self.node_context.append(self.start_node)
continue
@ -454,6 +458,7 @@ class WorkflowManage:
content = r
child_node = {}
node_is_end = False
view_type = current_node.view_type
if isinstance(r, dict):
content = r.get('content')
child_node = {'runtime_node_id': r.get('runtime_node_id'),
@ -461,6 +466,7 @@ class WorkflowManage:
, 'child_node': r.get('child_node')}
real_node_id = r.get('real_node_id')
node_is_end = r.get('node_is_end')
view_type = r.get('view_type')
chunk = self.base_to_response.to_stream_chunk_response(self.params['chat_id'],
self.params['chat_record_id'],
current_node.id,
@ -468,7 +474,7 @@ class WorkflowManage:
content, False, 0, 0,
{'node_type': current_node.type,
'runtime_node_id': current_node.runtime_node_id,
'view_type': current_node.view_type,
'view_type': view_type,
'child_node': child_node,
'node_is_end': node_is_end,
'real_node_id': real_node_id})
@ -480,7 +486,7 @@ class WorkflowManage:
'', False, 0, 0, {'node_is_end': True,
'runtime_node_id': current_node.runtime_node_id,
'node_type': current_node.type,
'view_type': current_node.view_type,
'view_type': view_type,
'child_node': child_node,
'real_node_id': real_node_id})
node_chunk.end(chunk)
@ -575,35 +581,29 @@ class WorkflowManage:
def get_answer_text_list(self):
result = []
next_node_id_list = []
if self.start_node is not None:
next_node_id_list = [edge.targetNodeId for edge in self.flow.edges if
edge.sourceNodeId == self.start_node.id]
for index in range(len(self.node_context)):
node = self.node_context[index]
up_node = None
if index > 0:
up_node = self.node_context[index - 1]
answer_text = node.get_answer_text()
if answer_text is not None:
if up_node is None or node.view_type == 'single_view' or (
node.view_type == 'many_view' and up_node.view_type == 'single_view'):
result.append(node.get_answer_text())
elif self.chat_record is not None and next_node_id_list.__contains__(
node.id) and up_node is not None and not next_node_id_list.__contains__(
up_node.id):
result.append(node.get_answer_text())
answer_list = reduce(lambda x, y: [*x, *y],
[n.get_answer_list() for n in self.node_context if n.get_answer_list() is not None],
[])
up_node = None
for index in range(len(answer_list)):
current_answer = answer_list[index]
if len(current_answer.content) > 0:
if up_node is None or current_answer.view_type == 'single_view' or (
current_answer.view_type == 'many_view' and up_node.view_type == 'single_view'):
result.append(current_answer)
else:
if len(result) > 0:
exec_index = len(result) - 1
content = result[exec_index]['content']
result[exec_index]['content'] += answer_text['content'] if len(
content) == 0 else ('\n\n' + answer_text['content'])
content = result[exec_index].content
result[exec_index].content += current_answer.content if len(
content) == 0 else ('\n\n' + current_answer.content)
else:
answer_text = node.get_answer_text()
result.insert(0, answer_text)
return result
result.insert(0, current_answer)
up_node = current_answer
if len(result) == 0:
# 如果没有响应 就响应一个空数据
return [Answer('', '', '', '', {}).to_dict()]
return [r.to_dict() for r in result]
def get_next_node(self):
"""

View File

@ -167,5 +167,8 @@ class ChatRecord(AppModelMixin):
def get_ai_message(self):
return AIMessage(content=self.answer_text)
def get_node_details_runtime_node_id(self, runtime_node_id):
return self.details.get(runtime_node_id, None)
class Meta:
db_table = "application_chat_record"

View File

@ -341,10 +341,12 @@ class ChatMessageSerializer(serializers.Serializer):
user_id = chat_info.application.user_id
chat_record_id = self.data.get('chat_record_id')
chat_record = None
history_chat_record = chat_info.chat_record_list
if chat_record_id is not None:
chat_record = self.get_chat_record(chat_info, chat_record_id)
history_chat_record = [r for r in chat_info.chat_record_list if str(r.id) != chat_record_id]
work_flow_manage = WorkflowManage(Flow.new_instance(chat_info.work_flow_version.work_flow),
{'history_chat_record': chat_info.chat_record_list, 'question': message,
{'history_chat_record': history_chat_record, 'question': message,
'chat_id': chat_info.chat_id, 'chat_record_id': str(
uuid.uuid1()) if chat_record is None else chat_record.id,
'stream': stream,

View File

@ -67,6 +67,20 @@ const initChat=(root)=>{
const chat_button_img=root.querySelector('.maxkb-chat-button > img')
// 对话框元素
const chat_container=root.querySelector('#maxkb-chat-container')
// 引导层
const mask_content = root.querySelector('.maxkb-mask > .maxkb-content')
const mask_tips = root.querySelector('.maxkb-tips')
chat_button_img.onload=(event)=>{
if(mask_content){
mask_content.style.width = chat_button_img.width + 'px'
mask_content.style.height = chat_button_img.height + 'px'
if('{{x_type}}'=='left'){
mask_tips.style.marginLeft = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px'
}else{
mask_tips.style.marginRight = (chat_button_img.naturalWidth>500?500:chat_button_img.naturalWidth)-64 + 'px'
}
}
}
const viewport=root.querySelector('.maxkb-openviewport')
const closeviewport=root.querySelector('.maxkb-closeviewport')
@ -90,17 +104,16 @@ const initChat=(root)=>{
}
const drag=(e)=>{
if (['touchmove','touchstart'].includes(e.type)) {
chat_button.style.top=(e.touches[0].clientY-25)+'px'
chat_button.style.left=(e.touches[0].clientX-25)+'px'
chat_button.style.top=(e.touches[0].clientY-chat_button_img.naturalHeight/2)+'px'
chat_button.style.left=(e.touches[0].clientX-chat_button_img.naturalWidth/2)+'px'
} else {
chat_button.style.top=(e.y-25)+'px'
chat_button.style.left=(e.x-25)+'px'
chat_button.style.top=(e.y-chat_button_img.naturalHeight/2)+'px'
chat_button.style.left=(e.x-chat_button_img.naturalWidth/2)+'px'
}
chat_button.style.width =chat_button_img.naturalWidth+'px'
chat_button.style.height =chat_button_img.naturalHeight+'px'
}
if({{is_draggable}}){
console.dir(chat_button_img)
chat_button.addEventListener("drag",drag)
chat_button.addEventListener("dragover",(e)=>{
e.preventDefault()
@ -155,7 +168,7 @@ function initMaxkbStyle(root){
#maxkb .maxkb-mask {
position: fixed;
z-index: 999;
z-index: 10001;
background-color: transparent;
height: 100%;
width: 100%;
@ -242,8 +255,6 @@ function initMaxkbStyle(root){
{{x_type}}: {{x_value}}px;
{{y_type}}: {{y_value}}px;
cursor: pointer;
max-height:500px;
max-width:500px;
z-index:10000;
}
#maxkb #maxkb-chat-container{
@ -261,6 +272,7 @@ function initMaxkbStyle(root){
position: absolute;
display: flex;
align-items: center;
line-height: 18px;
}
#maxkb #maxkb-chat-container .maxkb-operate .maxkb-chat-close{
margin-left:15px;
@ -306,4 +318,4 @@ function embedChatbot() {
initMaxkb()
} else console.error('invalid parameter')
}
window.onload = embedChatbot
window.addEventListener('load',embedChatbot)

View File

@ -31,6 +31,16 @@ default_pattern_list = [re.compile('(?<=^)# .*|(?<=\\n)# .*'),
max_kb = logging.getLogger("max_kb")
def check_links_in_pdf(doc):
for page_number in range(len(doc)):
page = doc[page_number]
links = page.get_links()
if links:
for link in links:
if link['kind'] == 1:
return True
return False
class PdfSplitHandle(BaseSplitHandle):
def handle(self, file, pattern_list: List, with_filter: bool, limit: int, get_buffer, save_image):
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
@ -175,6 +185,9 @@ class PdfSplitHandle(BaseSplitHandle):
@staticmethod
def handle_links(doc, pattern_list, with_filter, limit):
# 检查文档是否包含内部链接
if not check_links_in_pdf(doc):
return
# 创建存储章节内容的数组
chapters = []
toc_start_page = -1

View File

@ -56,7 +56,6 @@ from embedding.task.embedding import embedding_by_document, delete_embedding_by_
delete_embedding_by_document, update_embedding_dataset_id, delete_embedding_by_paragraph_ids, \
embedding_by_document_list
from smartdoc.conf import PROJECT_DIR
from django.db import models
parse_qa_handle_list = [XlsParseQAHandle(), CsvParseQAHandle(), XlsxParseQAHandle()]
parse_table_handle_list = [CsvSplitHandle(), XlsSplitHandle(), XlsxSplitHandle()]
@ -145,6 +144,19 @@ class DocumentWebInstanceSerializer(ApiMixin, serializers.Serializer):
description='知识库id'),
]
@staticmethod
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['source_url_list'],
properties={
'source_url_list': openapi.Schema(type=openapi.TYPE_ARRAY, title="文档地址列表",
description="文档地址列表",
items=openapi.Schema(type=openapi.TYPE_STRING)),
'selector': openapi.Schema(type=openapi.TYPE_STRING, title="选择器", description="选择器")
}
)
class DocumentInstanceSerializer(ApiMixin, serializers.Serializer):
name = serializers.CharField(required=True,
@ -364,6 +376,7 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
"文档名称"))
hit_handling_method = serializers.CharField(required=False, error_messages=ErrMessage.char("命中处理方式"))
is_active = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean("文档是否可用"))
task_type = serializers.IntegerField(required=False, error_messages=ErrMessage.integer("任务类型"))
status = serializers.CharField(required=False, error_messages=ErrMessage.char("文档状态"))
def get_query_set(self):
@ -375,8 +388,22 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
query_set = query_set.filter(**{'hit_handling_method': self.data.get('hit_handling_method')})
if 'is_active' in self.data and self.data.get('is_active') is not None:
query_set = query_set.filter(**{'is_active': self.data.get('is_active')})
if 'status' in self.data and self.data.get('status') is not None:
query_set = query_set.filter(**{'status': self.data.get('status')})
if 'status' in self.data and self.data.get(
'status') is not None:
task_type = self.data.get('task_type')
status = self.data.get(
'status')
if task_type is not None:
query_set = query_set.annotate(
reversed_status=Reverse('status'),
task_type_status=Substr('reversed_status', TaskType(task_type).value,
1),
).filter(task_type_status=State(status).value).values('id')
else:
if status != State.SUCCESS.value:
query_set = query_set.filter(status__icontains=status)
else:
query_set = query_set.filter(status__iregex='^[2n]*$')
query_set = query_set.order_by('-create_time', 'id')
return query_set
@ -648,7 +675,7 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
ListenerManagement.update_status(QuerySet(Paragraph).annotate(
reversed_status=Reverse('status'),
task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
TaskType(instance.get('type')).value),
1),
).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
document_id=document_id).values('id'),
TaskType(instance.get('type')),
@ -656,7 +683,7 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
ListenerManagement.update_status(QuerySet(Document).annotate(
reversed_status=Reverse('status'),
task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
TaskType(instance.get('type')).value),
1),
).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
id=document_id).values('id'),
TaskType(instance.get('type')),

View File

@ -236,7 +236,7 @@ class Document(APIView):
return result.success(
DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).cancel(
request.data
))
))
class Refresh(APIView):
authentication_classes = [TokenAuth]
@ -309,7 +309,7 @@ class Document(APIView):
manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
tags=["知识库/文档"])
@has_permissions(
lambda r, k: Permission(group=Group.DATASET, operate=Operate.USE,
lambda r, k: Permission(group=Group.DATASET, operate=Operate.MANAGE,
dynamic_tag=k.get('dataset_id')))
def get(self, request: Request, dataset_id: str, document_id: str):
return DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).export()

View File

@ -6,6 +6,7 @@
@date2024/10/16 16:34
@desc:
"""
from functools import reduce
from typing import Dict, List
from langchain_community.embeddings import DashScopeEmbeddings
@ -14,6 +15,12 @@ from langchain_community.embeddings.dashscope import embed_with_retry
from setting.models_provider.base_model_provider import MaxKBBaseModel
def proxy_embed_documents(texts: List[str], step_size, embed_documents):
value = [embed_documents(texts[start_index:start_index + step_size]) for start_index in
range(0, len(texts), step_size)]
return reduce(lambda x, y: [*x, *y], value, [])
class AliyunBaiLianEmbedding(MaxKBBaseModel, DashScopeEmbeddings):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
@ -23,6 +30,11 @@ class AliyunBaiLianEmbedding(MaxKBBaseModel, DashScopeEmbeddings):
)
def embed_documents(self, texts: List[str]) -> List[List[float]]:
if self.model == 'text-embedding-v3':
return proxy_embed_documents(texts, 6, self._embed_documents)
return self._embed_documents(texts)
def _embed_documents(self, texts: List[str]) -> List[List[float]]:
"""Call out to DashScope's embedding endpoint for embedding search docs.
Args:

View File

@ -0,0 +1,47 @@
# coding=utf-8
import base64
import os
from typing import Dict
from langchain_core.messages import HumanMessage
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
class ZhiPuImageModelCredential(BaseForm, BaseModelCredential):
api_key = forms.PasswordInputField('API Key', required=True)
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential)
res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])])
for chunk in res:
print(chunk)
except Exception as e:
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
def get_model_params_setting_form(self, model_name):
pass

View File

@ -0,0 +1,26 @@
from typing import Dict
from langchain_openai.chat_models import ChatOpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
def custom_get_token_ids(text: str):
tokenizer = TokenizerManage.get_tokenizer()
return tokenizer.encode(text)
class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
return ZhiPuImage(
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://open.bigmodel.cn/api/paas/v4',
# stream_options={"include_usage": True},
streaming=True,
**optional_params,
)

View File

@ -11,18 +11,40 @@ import os
from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
ModelInfoManage
from setting.models_provider.impl.zhipu_model_provider.credential.image import ZhiPuImageModelCredential
from setting.models_provider.impl.zhipu_model_provider.credential.llm import ZhiPuLLMModelCredential
from setting.models_provider.impl.zhipu_model_provider.model.image import ZhiPuImage
from setting.models_provider.impl.zhipu_model_provider.model.llm import ZhipuChatModel
from smartdoc.conf import PROJECT_DIR
qwen_model_credential = ZhiPuLLMModelCredential()
zhipu_image_model_credential = ZhiPuImageModelCredential()
model_info_list = [
ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel),
ModelInfo('glm-4v', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel),
ModelInfo('glm-3-turbo', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel)
]
model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info(
ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel)).build()
model_info_image_list = [
ModelInfo('glm-4v-plus', '具有强大的多模态理解能力。能够同时理解多达五张图像,并支持视频内容理解',
ModelTypeConst.IMAGE, zhipu_image_model_credential,
ZhiPuImage),
ModelInfo('glm-4v', '专注于单图理解。适用于需要高效图像解析的场景',
ModelTypeConst.IMAGE, zhipu_image_model_credential,
ZhiPuImage),
ModelInfo('glm-4v-flash', '专注于单图理解。适用于需要高效图像解析的场景(免费)',
ModelTypeConst.IMAGE, zhipu_image_model_credential,
ZhiPuImage),
]
model_info_manage = (
ModelInfoManage.builder()
.append_model_info_list(model_info_list)
.append_default_model_info(ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel))
.append_model_info_list(model_info_image_list)
.build()
)
class ZhiPuModelProvider(IModelProvider):

View File

@ -42,7 +42,7 @@ ENV MAXKB_VERSION="${DOCKER_IMAGE_TAG} (build at ${BUILD_AT}, commit: ${GITHUB_C
MAXKB_DB_PASSWORD=Password123@postgres \
MAXKB_EMBEDDING_MODEL_NAME=/opt/maxkb/model/embedding/shibing624_text2vec-base-chinese \
MAXKB_EMBEDDING_MODEL_PATH=/opt/maxkb/model/embedding \
MAXKB_SANDBOX=true \
MAXKB_SANDBOX=1 \
LANG=en_US.UTF-8 \
PATH=/opt/py3/bin:$PATH \
POSTGRES_USER=root \

View File

@ -120,7 +120,15 @@ export class ChatRecordManage {
this.chat.answer_text = this.chat.answer_text + chunk_answer
}
get_current_up_node() {
for (let i = this.node_list.length - 2; i >= 0; i--) {
const n = this.node_list[i]
if (n.content.length > 0) {
return n
}
}
return undefined
}
get_run_node() {
if (
this.write_node_info &&
@ -135,7 +143,7 @@ export class ChatRecordManage {
const index = this.node_list.indexOf(run_node)
let current_up_node = undefined
if (index > 0) {
current_up_node = this.node_list[index - 1]
current_up_node = this.get_current_up_node()
}
let answer_text_list_index = 0
@ -251,9 +259,9 @@ export class ChatRecordManage {
(node_info.divider_content ? node_info.divider_content.splice(0).join('') : '') +
node_info.current_node.buffer.splice(0).join(''),
node_info.answer_text_list_index,
current_node.chat_record_id,
current_node.runtime_node_id,
current_node.child_node
node_info.current_node.chat_record_id,
node_info.current_node.runtime_node_id,
node_info.current_node.child_node
)
if (node_info.current_node.buffer.length == 0) {
node_info.current_node.is_end = true
@ -293,9 +301,11 @@ export class ChatRecordManage {
let n = this.node_list.find((item) => item.real_node_id == chunk.real_node_id)
if (n) {
n.buffer.push(...chunk.content)
n.content += chunk.content
} else {
n = {
buffer: [...chunk.content],
content: chunk.content,
real_node_id: chunk.real_node_id,
node_id: chunk.node_id,
chat_record_id: chunk.chat_record_id,

View File

@ -152,12 +152,21 @@ function markdownToPlainText(md: string) {
)
}
function removeFormRander(text: string) {
return text
.replace(/<form_rander>[\s\S]*?<\/form_rander>/g, '')
.trim()
}
const playAnswerText = (text: string) => {
if (!text) {
text = '抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。'
}
//
text = removeFormRander(text)
// text
text = markdownToPlainText(text)
// console.log(text)
audioPlayerStatus.value = true
if (props.tts_type === 'BROWSER') {
if (text !== utterance.value?.text) {

View File

@ -131,12 +131,22 @@ function markdownToPlainText(md: string) {
)
}
function removeFormRander(text: string) {
return text
.replace(/<form_rander>[\s\S]*?<\/form_rander>/g, '')
.trim()
}
const playAnswerText = (text: string) => {
if (!text) {
text = '抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。'
}
//
text = removeFormRander(text)
// text
text = markdownToPlainText(text)
// console.log(text)
audioPlayerStatus.value = true
if (props.tts_type === 'BROWSER') {
if (text !== utterance.value?.text) {

View File

@ -20,7 +20,10 @@
<template v-for="(item, index) in document_list" :key="index">
<el-card shadow="never" style="--el-card-padding: 8px" class="download-file cursor">
<div class="download-button flex align-center" @click="downloadFile(item)">
<el-icon class="mr-4"><Download /></el-icon>
<el-icon class="mr-4">
<Download />
</el-icon>
点击下载文件
</div>
<div class="show flex align-center">
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
@ -61,6 +64,7 @@
import { type chatType } from '@/api/type/application'
import { getImgUrl, getAttrsArray, downloadByURL } from '@/utils/utils'
import { onMounted, computed } from 'vue'
const props = defineProps<{
application: any
chatRecord: chatType
@ -68,20 +72,20 @@ const props = defineProps<{
const document_list = computed(() => {
if (props.chatRecord?.upload_meta) {
return props.chatRecord.upload_meta?.document_list || []
} else if (props.chatRecord.execution_details?.length > 0) {
return props.chatRecord.execution_details[0]?.document_list || []
} else {
return []
}
const startNode = props.chatRecord.execution_details?.find(
(detail) => detail.type === 'start-node'
)
return startNode?.document_list || []
})
const image_list = computed(() => {
if (props.chatRecord?.upload_meta) {
return props.chatRecord.upload_meta?.image_list || []
} else if (props.chatRecord.execution_details?.length > 0) {
return props.chatRecord.execution_details[0]?.image_list || []
} else {
return []
}
const startNode = props.chatRecord.execution_details?.find(
(detail) => detail.type === 'start-node'
)
return startNode?.image_list || []
})
function downloadFile(item: any) {
@ -94,21 +98,26 @@ onMounted(() => {})
.download-file {
width: 200px;
height: 43px;
&:hover {
color: var(--el-color-primary);
border: 1px solid var(--el-color-primary);
.download-button {
display: block;
text-align: center;
line-height: 26px;
}
.show {
display: none;
}
}
.show {
display: block;
}
.download-button {
display: none;
}

View File

@ -73,6 +73,8 @@ defineExpose({
li {
padding: 10px 16px;
font-weight: 400;
color: var(--el-text-color-regular);
font-size: 14px;
&.active {
background: var(--el-color-primary-light-9);
border-radius: 4px;

View File

@ -225,8 +225,9 @@ export const exportExcel: (
params: any,
loading?: NProgress | Ref<boolean>
) => {
return promise(request({ url: url, method: 'get', params, responseType: 'blob' }), loading)
.then((res: any) => {
return promise(request({ url: url, method: 'get', params, responseType: 'blob' }), loading).then(
(res: any) => {
console.log(res)
if (res) {
const blob = new Blob([res], {
type: 'application/vnd.ms-excel'
@ -239,8 +240,8 @@ export const exportExcel: (
window.URL.revokeObjectURL(link.href)
}
return true
})
.catch((e) => {})
}
)
}
export const exportExcelPost: (
@ -265,22 +266,20 @@ export const exportExcelPost: (
responseType: 'blob'
}),
loading
)
.then((res: any) => {
if (res) {
const blob = new Blob([res], {
type: 'application/vnd.ms-excel'
})
const link = document.createElement('a')
link.href = window.URL.createObjectURL(blob)
link.download = fileName
link.click()
// 释放内存
window.URL.revokeObjectURL(link.href)
}
return true
})
.catch((e) => {})
).then((res: any) => {
if (res) {
const blob = new Blob([res], {
type: 'application/vnd.ms-excel'
})
const link = document.createElement('a')
link.href = window.URL.createObjectURL(blob)
link.download = fileName
link.click()
// 释放内存
window.URL.revokeObjectURL(link.href)
}
return true
})
}
export const download: (

View File

@ -9,7 +9,8 @@ const applicationRouter = {
children: [
{
path: '/application',
name: 'application',
name: 'application-index',
meta: { title: '应用主页', activeMenu: '/application' },
component: () => import('@/views/application/index.vue')
},
{

View File

@ -8,7 +8,8 @@ const datasetRouter = {
children: [
{
path: '/dataset',
name: 'dataset',
name: 'dataset-index',
meta: { title: '知识库主页', activeMenu: '/dataset' },
component: () => import('@/views/dataset/index.vue')
},
{

View File

@ -8,7 +8,8 @@ const functionLibRouter = {
children: [
{
path: '/function-lib',
name: 'function-lib',
name: 'function-lib-index',
meta: { title: '函数库主页', activeMenu: '/function-lib' },
component: () => import('@/views/function-lib/index.vue')
}
]

View File

@ -133,6 +133,7 @@
.el-card {
--el-card-padding: calc(var(--app-base-px) * 2);
color: var(--el-text-color-regular);
}
.el-dropdown {
color: var(--app-text-color);
@ -267,6 +268,9 @@
.el-select-group .el-select-dropdown__item {
padding-left: 11px;
}
.el-select-dropdown__item {
font-weight: 400;
}
.el-select__caret {
color: var(--app-text-color-secondary);

View File

@ -11,19 +11,24 @@
<el-tab-pane label="基础组件" name="base">
<el-scrollbar height="400">
<template v-for="(item, index) in filter_menu_nodes" :key="index">
<div
class="workflow-dropdown-item cursor flex p-8-12"
@click.stop="clickNodes(item)"
@mousedown.stop="onmousedown(item)"
>
<component :is="iconComponent(`${item.type}-icon`)" class="mr-8 mt-4" :size="32" />
<div class="pre-wrap">
<div class="lighter">{{ item.label }}</div>
<el-text type="info" size="small">{{ item.text }}</el-text>
<div v-if="filter_menu_nodes.length > 0">
<template v-for="(item, index) in filter_menu_nodes" :key="index">
<div
class="workflow-dropdown-item cursor flex p-8-12"
@click.stop="clickNodes(item)"
@mousedown.stop="onmousedown(item)"
>
<component :is="iconComponent(`${item.type}-icon`)" class="mr-8 mt-4" :size="32" />
<div class="pre-wrap">
<div class="lighter">{{ item.label }}</div>
<el-text type="info" size="small">{{ item.text }}</el-text>
</div>
</div>
</div>
</template>
</template>
</div>
<div v-else class="ml-16 mt-8">
<el-text type="info">没有找到相关结果</el-text>
</div>
</el-scrollbar>
</el-tab-pane>
<el-tab-pane label="函数库" name="function">

View File

@ -171,7 +171,7 @@ const applicationList = ref<any[]>([])
const paginationConfig = reactive({
current_page: 1,
page_size: 20,
page_size: 30,
total: 0
})
interface UserOption {
@ -248,18 +248,18 @@ function mapToUrlParams(map: any[]) {
}
function getAccessToken(id: string) {
applicationList.value.filter((app)=>app.id === id)[0]?.work_flow?.nodes
?.filter((v: any) => v.id === 'base-node')
.map((v: any) => {
apiInputParams.value = v.properties.api_input_field_list
? v.properties.api_input_field_list
.map((v: any) => {
return {
name: v.variable,
value: v.default_value
}
})
: v.properties.input_field_list
applicationList.value
.filter((app) => app.id === id)[0]
?.work_flow?.nodes?.filter((v: any) => v.id === 'base-node')
.map((v: any) => {
apiInputParams.value = v.properties.api_input_field_list
? v.properties.api_input_field_list.map((v: any) => {
return {
name: v.variable,
value: v.default_value
}
})
: v.properties.input_field_list
? v.properties.input_field_list
.filter((v: any) => v.assignment_method === 'api_input')
.map((v: any) => {
@ -269,9 +269,11 @@ function getAccessToken(id: string) {
}
})
: []
})
})
const apiParams = mapToUrlParams(apiInputParams.value) ? '?' + mapToUrlParams(apiInputParams.value) : ''
const apiParams = mapToUrlParams(apiInputParams.value)
? '?' + mapToUrlParams(apiInputParams.value)
: ''
application.asyncGetAccessToken(id, loading).then((res: any) => {
window.open(application.location + res?.data?.access_token + apiParams)
})

View File

@ -120,8 +120,14 @@ const open = async (platform: Platform) => {
let defaultCallbackUrl = window.location.origin
switch (platform.key) {
case 'wecom':
if (currentPlatform.config.app_key) {
currentPlatform.config.agent_id = currentPlatform.config.app_key
delete currentPlatform.config.app_key
}
currentPlatform.config.callback_url = `${defaultCallbackUrl}/api/wecom`
break
case 'dingtalk':
if (currentPlatform.config.agent_id && currentPlatform.key === 'dingtalk') {
if (currentPlatform.config.agent_id) {
currentPlatform.config.corp_id = currentPlatform.config.agent_id
delete currentPlatform.config.agent_id
}

View File

@ -48,7 +48,7 @@ import { cloneDeep } from 'lodash'
import { ref, computed } from 'vue'
import EditParagraphDialog from './EditParagraphDialog.vue'
import { MsgConfirm } from '@/utils/message'
const page_size = ref<number>(20)
const page_size = ref<number>(30)
const current_page = ref<number>(1)
const currentCIndex = ref<number>(0)
const EditParagraphDialogRef = ref()

View File

@ -160,7 +160,7 @@ const loading = ref(false)
const datasetList = ref<any[]>([])
const paginationConfig = reactive({
current_page: 1,
page_size: 20,
page_size: 30,
total: 0
})

View File

@ -99,34 +99,44 @@
>全部</el-dropdown-item
>
<el-dropdown-item
:class="filterMethod['status'] === '1' ? 'is-active' : ''"
:class="filterMethod['status'] === State.SUCCESS ? 'is-active' : ''"
class="justify-center"
:command="beforeCommand('status', '1')"
:command="beforeCommand('status', State.SUCCESS)"
>成功</el-dropdown-item
>
<el-dropdown-item
:class="filterMethod['status'] === '2' ? 'is-active' : ''"
:class="filterMethod['status'] === State.FAILURE ? 'is-active' : ''"
class="justify-center"
:command="beforeCommand('status', '2')"
:command="beforeCommand('status', State.FAILURE)"
>失败</el-dropdown-item
>
<el-dropdown-item
:class="filterMethod['status'] === '0' ? 'is-active' : ''"
:class="
filterMethod['status'] === State.STARTED &&
filterMethod['task_type'] == TaskType.EMBEDDING
? 'is-active'
: ''
"
class="justify-center"
:command="beforeCommand('status', '0')"
:command="beforeCommand('status', State.STARTED, TaskType.EMBEDDING)"
>索引中</el-dropdown-item
>
<el-dropdown-item
:class="filterMethod['status'] === '3' ? 'is-active' : ''"
:class="filterMethod['status'] === State.PENDING ? 'is-active' : ''"
class="justify-center"
:command="beforeCommand('status', '3')"
:command="beforeCommand('status', State.PENDING)"
>排队中</el-dropdown-item
>
<el-dropdown-item
:class="filterMethod['status'] === '4' ? 'is-active' : ''"
:class="
filterMethod['status'] === State.STARTED &&
filterMethod['task_type'] === TaskType.GENERATE_PROBLEM
? 'is-active'
: ''
"
class="justify-center"
:command="beforeCommand('status', '4')"
>生成问题中</el-dropdown-item
:command="beforeCommand('status', State.STARTED, TaskType.GENERATE_PROBLEM)"
>生成</el-dropdown-item
>
</el-dropdown-menu>
</template>
@ -177,9 +187,10 @@
<template #default="{ row }">
<div @click.stop>
<el-switch
:loading="loading"
size="small"
v-model="row.is_active"
@change="changeState($event, row)"
:before-change="() => changeState(row)"
/>
</div>
</template>
@ -481,13 +492,18 @@ function openDatasetDialog(row?: any) {
function dropdownHandle(obj: any) {
filterMethod.value[obj.attr] = obj.command
if (obj.attr == 'status') {
filterMethod.value['task_type'] = obj.task_type
}
getList()
}
function beforeCommand(attr: string, val: any) {
function beforeCommand(attr: string, val: any, task_type?: number) {
return {
attr: attr,
command: val
command: val,
task_type
}
}
const cancelTask = (row: any, task_type: number) => {
@ -663,18 +679,24 @@ function deleteDocument(row: any) {
更新名称或状态
*/
function updateData(documentId: string, data: any, msg: string) {
documentApi.putDocument(id, documentId, data, loading).then((res) => {
const index = documentData.value.findIndex((v) => v.id === documentId)
documentData.value.splice(index, 1, res.data)
MsgSuccess(msg)
})
documentApi
.putDocument(id, documentId, data, loading)
.then((res) => {
const index = documentData.value.findIndex((v) => v.id === documentId)
documentData.value.splice(index, 1, res.data)
MsgSuccess(msg)
return true
})
.catch(() => {
return false
})
}
function changeState(bool: Boolean, row: any) {
function changeState(row: any) {
const obj = {
is_active: bool
is_active: !row.is_active
}
const str = bool ? '启用成功' : '禁用成功'
const str = !row.is_active ? '启用成功' : '禁用成功'
currentMouseId.value && updateData(row.id, obj, str)
}

View File

@ -142,7 +142,7 @@ const functionLibList = ref<any[]>([])
const paginationConfig = reactive({
current_page: 1,
page_size: 20,
page_size: 30,
total: 0
})

View File

@ -104,8 +104,9 @@
>
<div class="active-button" @click.stop>
<el-switch
:loading="loading"
v-model="item.is_active"
@change="changeState($event, item)"
:before-change="() => changeState(item)"
size="small"
/>
</div>
@ -162,7 +163,6 @@
<ParagraphDialog ref="ParagraphDialogRef" :title="title" @refresh="refresh" />
<SelectDocumentDialog ref="SelectDocumentDialogRef" @refresh="refreshMigrateParagraph" />
<GenerateRelatedDialog ref="GenerateRelatedDialogRef" @refresh="refresh" />
</LayoutContainer>
</template>
<script setup lang="ts">
@ -198,7 +198,7 @@ const multipleSelection = ref<any[]>([])
const paginationConfig = reactive({
current_page: 1,
page_size: 20,
page_size: 30,
total: 0
})
@ -258,11 +258,20 @@ function searchHandle() {
getParagraphList()
}
function changeState(bool: Boolean, row: any) {
function changeState(row: any) {
const obj = {
is_active: bool
is_active: !row.is_active
}
paragraph.asyncPutParagraph(id, documentId, row.id, obj, changeStateloading).then((res) => {})
paragraph
.asyncPutParagraph(id, documentId, row.id, obj, changeStateloading)
.then((res) => {
const index = paragraphDetail.value.findIndex((v) => v.id === row.id)
paragraphDetail.value[index].is_active = !paragraphDetail.value[index].is_active
return true
})
.catch(() => {
return false
})
}
function deleteParagraph(row: any) {
@ -328,7 +337,6 @@ function refresh(data: any) {
}
}
const GenerateRelatedDialogRef = ref()
function openGenerateDialog(row?: any) {
const arr: string[] = []

View File

@ -2,7 +2,7 @@
<LayoutContainer header="模型设置">
<div class="template-manage flex main-calc-height">
<div class="template-manage__left p-8 border-r">
<h4 style="padding-bottom: 8px">供应商</h4>
<h4 class="p-16">供应商</h4>
<div class="model-list-height-left">
<div
class="all-mode flex cursor"
@ -33,7 +33,7 @@
ref="commonList1"
>
<template #default="{ row }">
<div class="flex">
<div class="flex align-center">
<span
:innerHTML="row.icon"
alt=""
@ -59,7 +59,7 @@
ref="commonList2"
>
<template #default="{ row }">
<div class="flex">
<div class="flex align-center">
<span
:innerHTML="row.icon"
alt=""
@ -301,11 +301,11 @@ onMounted(() => {
}
.model-list-height {
height: calc(var(--create-dataset-height) - 70px);
height: calc(var(--create-dataset-height) - 80px);
}
.model-list-height-left {
height: calc(var(--create-dataset-height));
height: calc(var(--create-dataset-height) - 40px);
}
.all-mode {
padding: 10px 16px;
@ -338,6 +338,9 @@ onMounted(() => {
:deep(.el-collapse-item__wrap) {
border-bottom: none !important;
}
:deep(.el-collapse-item__content) {
padding-bottom: 0 !important;;
}
}
}
</style>

View File

@ -93,9 +93,8 @@
v-if="showAnchor"
@mousemove.stop
@mousedown.stop
@keydown.stop
@click.stop
@wheel.stop
@wheel="handleWheel"
:show="showAnchor"
:id="id"
style="left: 100%; top: 50%; transform: translate(0, -50%)"
@ -142,6 +141,12 @@ const showNode = computed({
return true
}
})
const handleWheel = (event: any) => {
const isCombinationKeyPressed = event.ctrlKey || event.metaKey
if (!isCombinationKeyPressed) {
event.stopPropagation()
}
}
const node_status = computed(() => {
if (props.nodeModel.properties.status) {
return props.nodeModel.properties.status

View File

@ -148,6 +148,15 @@
/>
</el-form-item>
<el-form-item label="历史聊天记录">
<template #label>
<div class="flex-between">
<div>历史聊天记录</div>
<el-select v-model="chat_data.dialogue_type" type="small" style="width: 100px">
<el-option label="节点" value="NODE" />
<el-option label="工作流" value="WORKFLOW" />
</el-select>
</div>
</template>
<el-input-number
v-model="chat_data.dialogue_number"
:min="0"
@ -246,7 +255,8 @@ const form = {
dialogue_number: 1,
is_result: false,
temperature: null,
max_tokens: null
max_tokens: null,
dialogue_type: 'WORKFLOW'
}
const chat_data = computed({
@ -321,8 +331,10 @@ onMounted(() => {
set(props.nodeModel.properties.node_data, 'is_result', true)
}
}
set(props.nodeModel, 'validate', validate)
if (!chat_data.value.dialogue_type) {
chat_data.value.dialogue_type = 'WORKFLOW'
}
})
</script>
<style lang="scss" scoped></style>