Compare commits

..

2 Commits

Author SHA1 Message Date
wxg0103 847755b1c2 feat: add response body schemas for various API endpoints 2025-08-19 15:15:46 +08:00
wxg0103 b57455d0ee refactor: expand permissions for application access token
--bug=1060032 --user=王孝刚 【桂物智慧】 api 调用客户端修改对话摘要接口,报错403,没有访问权限 https://www.tapd.cn/57709429/s/1757188
2025-08-19 11:06:36 +08:00
84 changed files with 528 additions and 734 deletions

View File

@ -33,13 +33,13 @@ jobs:
- name: Checkout
uses: actions/checkout@v4
with:
ref: v1
ref: main
- name: Prepare
id: prepare
run: |
DOCKER_IMAGE=ghcr.io/1panel-dev/maxkb-python-pg
DOCKER_PLATFORMS=${{ github.event.inputs.architecture }}
TAG_NAME=python3.11-pg15.14
TAG_NAME=python3.11-pg15.8
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
echo ::set-output name=docker_image::${DOCKER_IMAGE}
echo ::set-output name=version::${TAG_NAME}

View File

@ -24,7 +24,7 @@ MaxKB = Max Knowledge Brain, it is an open-source platform for building enterpri
Execute the script below to start a MaxKB container using Docker:
```bash
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages 1panel/maxkb:v1
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages 1panel/maxkb
```
Access MaxKB web interface at `http://your_server_ip:8080` with default admin credentials:
@ -32,7 +32,7 @@ Access MaxKB web interface at `http://your_server_ip:8080` with default admin cr
- username: admin
- password: MaxKB@123..
中国用户如遇到 Docker 镜像 Pull 失败问题,请参照该 [离线安装文档](https://maxkb.cn/docs/v1/installation/offline_installtion/) 进行安装。
中国用户如遇到 Docker 镜像 Pull 失败问题,请参照该 [离线安装文档](https://maxkb.cn/docs/installation/offline_installtion/) 进行安装。
## Screenshots

View File

@ -14,12 +14,12 @@
</p>
<hr/>
MaxKB = Max Knowledge Brain是一个强大易用的企业级智能体平台,致力于解决企业 AI 落地面临的技术门槛高、部署成本高、迭代周期长等问题助力企业在人工智能时代赢得先机。秉承“开箱即用伴随成长”的设计理念MaxKB 支持企业快速接入主流大模型高效构建专属知识库并提供从基础问答RAG、复杂流程自动化工作流到智能体Agent的渐进式升级路径全面赋能智能客服、智能办公助手等多种应用场景。
MaxKB = Max Knowledge Brain是一款强大易用的企业级智能体平台,支持 RAG 检索增强生成、工作流编排、MCP 工具调用能力。MaxKB 支持对接各种主流大语言模型,广泛应用于智能客服、企业内部知识库问答、员工助手、学术研究与教育等场景。
- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果;
- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求;
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度;
- **模型中立**支持对接各种大模型包括本地私有大模型DeepSeek R1 / Qwen 3 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等和国外公共大模型OpenAI / Claude / Gemini 等)。
- **模型中立**支持对接各种大模型包括本地私有大模型DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等和国外公共大模型OpenAI / Claude / Gemini 等)。
MaxKB 三分钟视频介绍https://www.bilibili.com/video/BV18JypYeEkj/
@ -27,10 +27,10 @@ MaxKB 三分钟视频介绍https://www.bilibili.com/video/BV18JypYeEkj/
```
# Linux 机器
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb:v1
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb
# Windows 机器
docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/postgresql/data -v C:/python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb:v1
docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/postgresql/data -v C:/python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb
# 用户名: admin
# 密码: MaxKB@123..
@ -38,8 +38,8 @@ docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/po
- 你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB
- 如果是内网环境,推荐使用 [离线安装包](https://community.fit2cloud.com/#/products/maxkb/downloads) 进行安装部署;
- MaxKB 不同产品产品版本的对比请参见:[MaxKB 产品版本对比](https://maxkb.cn/price)
- 如果您需要向团队介绍 MaxKB可以使用这个 [官方 PPT 材料](https://fit2cloud.com/maxkb/download/introduce-maxkb_202507.pdf)。
- MaxKB 产品版本分为社区版和专业版,详情请参见:[MaxKB 产品版本对比](https://maxkb.cn/pricing.html)
- 如果您需要向团队介绍 MaxKB可以使用这个 [官方 PPT 材料](https://maxkb.cn/download/introduce-maxkb_202503.pdf)。
如你有更多问题,可以查看使用手册,或者通过论坛与我们交流。

View File

@ -11,6 +11,7 @@ import json
import re
import time
from functools import reduce
from types import AsyncGeneratorType
from typing import List, Dict
from django.db.models import QuerySet
@ -32,26 +33,14 @@ tool_message_template = """
<strong>Called MCP Tool: <em>%s</em></strong>
</summary>
%s
</details>
"""
tool_message_json_template = """
```json
%s
```
</details>
"""
def generate_tool_message_template(name, context):
if '```' in context:
return tool_message_template % (name, context)
else:
return tool_message_template % (name, tool_message_json_template % (context))
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
reasoning_content: str):
chat_model = node_variable.get('chat_model')
@ -120,7 +109,7 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers):
response = agent.astream({"messages": message_list}, stream_mode='messages')
async for chunk in response:
if isinstance(chunk[0], ToolMessage):
content = generate_tool_message_template(chunk[0].name, chunk[0].content)
content = tool_message_template % (chunk[0].name, chunk[0].content)
chunk[0].content = content
yield chunk[0]
if isinstance(chunk[0], AIMessageChunk):
@ -199,7 +188,6 @@ class BaseChatNode(IChatNode):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
self.context['reasoning_content'] = details.get('reasoning_content')
self.context['model_setting'] = details.get('model_setting')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
@ -286,7 +274,6 @@ class BaseChatNode(IChatNode):
"index": index,
'run_time': self.context.get('run_time'),
'system': self.context.get('system'),
'model_setting': self.context.get('model_setting'),
'history_message': [{'content': message.content, 'role': message.type} for message in
(self.context.get('history_message') if self.context.get(
'history_message') is not None else [])],

View File

@ -66,7 +66,7 @@ class BaseDocumentExtractNode(IDocumentExtractNode):
for doc in document:
file = QuerySet(File).filter(id=doc['file_id']).first()
buffer = io.BytesIO(file.get_byte())
buffer = io.BytesIO(file.get_byte().tobytes())
buffer.name = doc['name'] # this is the important line
for split_handle in (parse_table_handle_list + split_handles):

View File

@ -45,8 +45,6 @@ def get_field_value(debug_field_list, name, is_required):
def valid_reference_value(_type, value, name):
if value is None:
return
if _type == 'int':
instance_type = int | float
elif _type == 'float':
@ -72,17 +70,10 @@ def convert_value(name: str, value, _type, is_required, source, node):
if not is_required and source == 'reference' and (value is None or len(value) == 0):
return None
if source == 'reference':
if value and isinstance(value, list) and len(value) == 0:
if not is_required:
return None
else:
raise Exception(f"字段:{name}类型:{_type}值:{value}必填参数")
value = node.workflow_manage.get_reference_field(
value[0],
value[1:])
valid_reference_value(_type, value, name)
if value is None:
return None
if _type == 'int':
return int(value)
if _type == 'float':

View File

@ -32,8 +32,6 @@ def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
def valid_reference_value(_type, value, name):
if value is None:
return
if _type == 'int':
instance_type = int | float
elif _type == 'float':
@ -54,17 +52,10 @@ def convert_value(name: str, value, _type, is_required, source, node):
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
return None
if source == 'reference':
if value and isinstance(value, list) and len(value) == 0:
if not is_required:
return None
else:
raise Exception(f"字段:{name}类型:{_type}值:{value}必填参数")
value = node.workflow_manage.get_reference_field(
value[0],
value[1:])
valid_reference_value(_type, value, name)
if value is None:
return None
if _type == 'int':
return int(value)
if _type == 'float':

View File

@ -62,7 +62,7 @@ def file_id_to_base64(file_id: str):
file = QuerySet(File).filter(id=file_id).first()
file_bytes = file.get_byte()
base64_image = base64.b64encode(file_bytes).decode("utf-8")
return [base64_image, what(None, file_bytes)]
return [base64_image, what(None, file_bytes.tobytes())]
class BaseImageUnderstandNode(IImageUnderstandNode):
@ -172,7 +172,7 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
file = QuerySet(File).filter(id=file_id).first()
image_bytes = file.get_byte()
base64_image = base64.b64encode(image_bytes).decode("utf-8")
image_format = what(None, image_bytes)
image_format = what(None, image_bytes.tobytes())
images.append({'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
messages = [HumanMessage(
content=[

View File

@ -14,6 +14,8 @@ class BaseMcpNode(IMcpNode):
self.context['result'] = details.get('result')
self.context['tool_params'] = details.get('tool_params')
self.context['mcp_tool'] = details.get('mcp_tool')
if self.node_params.get('is_result', False):
self.answer_text = details.get('result')
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
servers = json.loads(mcp_servers)

View File

@ -18,7 +18,6 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['result'] = details.get('answer')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
@ -32,7 +31,7 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
# 根据file_name 吧文件转成mp3格式
file_format = file.file_name.split('.')[-1]
with tempfile.NamedTemporaryFile(delete=False, suffix=f'.{file_format}') as temp_file:
temp_file.write(file.get_byte())
temp_file.write(file.get_byte().tobytes())
temp_file_path = temp_file.name
with tempfile.NamedTemporaryFile(delete=False, suffix='.mp3') as temp_amr_file:
temp_mp3_path = temp_amr_file.name

View File

@ -37,7 +37,6 @@ def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
class BaseTextToSpeechNode(ITextToSpeechNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['result'] = details.get('result')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
@ -74,5 +73,4 @@ class BaseTextToSpeechNode(ITextToSpeechNode):
'content': self.context.get('content'),
'err_message': self.err_message,
'answer': self.context.get('answer'),
'result': self.context.get('result')
}

View File

@ -298,8 +298,8 @@ class WorkflowManage:
if global_fields is not None:
for global_field in global_fields:
global_field_list.append({**global_field, 'node_id': node_id, 'node_name': node_name})
field_list.sort(key=lambda f: len(f.get('node_name') + f.get('value')), reverse=True)
global_field_list.sort(key=lambda f: len(f.get('node_name') + f.get('value')), reverse=True)
field_list.sort(key=lambda f: len(f.get('node_name')), reverse=True)
global_field_list.sort(key=lambda f: len(f.get('node_name')), reverse=True)
self.field_list = field_list
self.global_field_list = global_field_list
@ -755,10 +755,7 @@ class WorkflowManage:
if node_id == 'global':
return INode.get_field(self.context, fields)
else:
node = self.get_node_by_id(node_id)
if node:
return node.get_reference_field(fields)
return None
return self.get_node_by_id(node_id).get_reference_field(fields)
def get_workflow_content(self):
context = {

View File

@ -1,8 +1,9 @@
# Generated by Django 4.2.15 on 2024-09-18 16:14
import logging
import psycopg
import psycopg2
from django.db import migrations
from psycopg2 import extensions
from smartdoc.const import CONFIG
@ -16,7 +17,7 @@ def get_connect(db_name):
"port": CONFIG.get('DB_PORT')
}
# 建立连接
connect = psycopg.connect(**conn_params)
connect = psycopg2.connect(**conn_params)
return connect
@ -27,7 +28,7 @@ def sql_execute(conn, reindex_sql: str, alter_database_sql: str):
@param conn:
@param alter_database_sql:
"""
conn.autocommit = True
conn.set_isolation_level(extensions.ISOLATION_LEVEL_AUTOCOMMIT)
with conn.cursor() as cursor:
cursor.execute(reindex_sql, [])
cursor.execute(alter_database_sql, [])

View File

@ -16,7 +16,6 @@ import re
import uuid
from functools import reduce
from typing import Dict, List
from django.contrib.postgres.fields import ArrayField
from django.core import cache, validators
from django.core import signing
@ -25,8 +24,8 @@ from django.db.models import QuerySet
from django.db.models.expressions import RawSQL
from django.http import HttpResponse
from django.template import Template, Context
from django.utils.translation import gettext_lazy as _, get_language, to_locale
from langchain_mcp_adapters.client import MultiServerMCPClient
from mcp.client.sse import sse_client
from rest_framework import serializers, status
from rest_framework.utils.formatting import lazy_format
@ -39,7 +38,7 @@ from common.config.embedding_config import VectorStore
from common.constants.authentication_type import AuthenticationType
from common.db.search import get_dynamics_model, native_search, native_page_search
from common.db.sql_execute import select_list
from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed
from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed, ChatException
from common.field.common import UploadedImageField, UploadedFileField
from common.models.db_model_manage import DBModelManage
from common.response import result
@ -58,6 +57,7 @@ from setting.models_provider.tools import get_model_instance_by_model_user_id
from setting.serializers.provider_serializers import ModelSerializer
from smartdoc.conf import PROJECT_DIR
from users.models import User
from django.utils.translation import gettext_lazy as _, get_language, to_locale
chat_cache = cache.caches['chat_cache']
@ -1328,9 +1328,6 @@ class ApplicationSerializer(serializers.Serializer):
if '"stdio"' in self.data.get('mcp_servers'):
raise AppApiException(500, _('stdio is not supported'))
servers = json.loads(self.data.get('mcp_servers'))
for server, config in servers.items():
if config.get('transport') not in ['sse', 'streamable_http']:
raise AppApiException(500, _('Only support transport=sse or transport=streamable_http'))
async def get_mcp_tools(servers):
async with MultiServerMCPClient(servers) as client:

View File

@ -395,14 +395,13 @@ class ChatMessageSerializer(serializers.Serializer):
work_flow_manage = WorkflowManage(Flow.new_instance(chat_info.work_flow_version.work_flow),
{'history_chat_record': history_chat_record, 'question': message,
'chat_id': chat_info.chat_id, 'chat_record_id': str(
uuid.uuid1()) if chat_record is None else str(chat_record.id),
uuid.uuid1()) if chat_record is None else chat_record.id,
'stream': stream,
're_chat': re_chat,
'client_id': client_id,
'client_type': client_type,
'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type),
base_to_response, form_data, image_list, document_list, audio_list,
other_list,
base_to_response, form_data, image_list, document_list, audio_list, other_list,
self.data.get('runtime_node_id'),
self.data.get('node_data'), chat_record, self.data.get('child_node'))
r = work_flow_manage.run()

View File

@ -222,8 +222,7 @@ class ChatSerializers(serializers.Serializer):
reference_paragraph,
"\n".join([
f"{improve_paragraph_list[index].get('title')}\n{improve_paragraph_list[index].get('content')}"
for index in range(len(improve_paragraph_list))
]) if improve_paragraph_list is not None else "",
for index in range(len(improve_paragraph_list))]),
row.get('asker').get('user_name'),
row.get('message_tokens') + row.get('answer_tokens'), row.get('run_time'),
str(row.get('create_time').astimezone(pytz.timezone(TIME_ZONE)).strftime('%Y-%m-%d %H:%M:%S')

View File

@ -38,6 +38,15 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_STRING,
title=_("Application authentication token"),
description=_("Application authentication token"),
default="token"
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
@ -133,6 +142,27 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
description=_("Primary key id")),
'secret_key': openapi.Schema(type=openapi.TYPE_STRING, title=_("Secret key"),
description=_("Secret key")),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
description=_("Is activation")),
'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"),
description=_("Application ID")),
'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN,
title=_("Is cross-domain allowed"),
description=_("Is cross-domain allowed")),
'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'),
items=openapi.Schema(type=openapi.TYPE_STRING))
}
)
class AccessToken(ApiMixin):
@staticmethod
def get_request_params_api():
@ -171,6 +201,37 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=[],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
description=_("Primary key id")),
'access_token': openapi.Schema(type=openapi.TYPE_STRING, title=_("Access Token"),
description=_("Access Token")),
'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"),
description=_("Reset Token")),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
description=_("Is activation")),
'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"),
description=_("Number of visits")),
'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"),
description=_("Whether to enable whitelist")),
'white_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"),
description=_("Whitelist")),
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN,
title=_("Whether to display knowledge sources"),
description=_("Whether to display knowledge sources")),
'language': openapi.Schema(type=openapi.TYPE_STRING,
title=_("language"),
description=_("language"))
}
)
class Edit(ApiMixin):
@staticmethod
def get_request_body_api():
@ -302,19 +363,7 @@ class ApplicationApi(ApiMixin):
'no_references_prompt': openapi.Schema(type=openapi.TYPE_STRING,
title=_("No citation segmentation prompt"),
default="{question}",
description=_("No citation segmentation prompt")),
'reasoning_content_enable': openapi.Schema(type=openapi.TYPE_BOOLEAN,
title=_("Reasoning enable"),
default=False,
description=_("Reasoning enable")),
'reasoning_content_end': openapi.Schema(type=openapi.TYPE_STRING,
title=_("Reasoning end tag"),
default="</think>",
description=_("Reasoning end tag")),
"reasoning_content_start": openapi.Schema(type=openapi.TYPE_STRING,
title=_("Reasoning start tag"),
default="<think>",
description=_("Reasoning start tag"))
description=_("No citation segmentation prompt"))
}
)
@ -379,6 +428,56 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type',
'work_flow'],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
description=_("Primary key id")),
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
description=_("Application Name")),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
description=_("Application Description")),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
description=_("Model id")),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
title=_("Number of multi-round conversations"),
description=_("Number of multi-round conversations")),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
description=_("Opening remarks")),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
title=_("List of associated knowledge base IDs"),
description=_("List of associated knowledge base IDs")),
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
description=_("Problem Optimization"), default=True),
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
description=_("Application Type SIMPLE | WORK_FLOW")),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
title=_('Question optimization tips'),
description=_("Question optimization tips"),
default=_(
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"),
description=_("Text-to-speech model ID")),
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"),
description=_("Speech-to-text model id")),
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"),
description=_("Is speech-to-text enabled")),
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"),
description=_("Is text-to-speech enabled")),
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"),
description=_("Text-to-speech type")),
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
}
)
class Query(ApiMixin):
@staticmethod
def get_request_params_api():

View File

@ -319,6 +319,15 @@ class ChatApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_STRING,
title=_("Conversation ID"),
description=_("Conversation ID"),
default="chat_id"
)
@staticmethod
def get_request_params_api():
return [openapi.Parameter(name='application_id',
@ -326,6 +335,11 @@ class ChatApi(ApiMixin):
type=openapi.TYPE_STRING,
required=True,
description=_('Application ID')),
openapi.Parameter(name='history_day',
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
required=True,
description=_('Historical days')),
openapi.Parameter(name='abstract', in_=openapi.IN_QUERY, type=openapi.TYPE_STRING, required=False,
description=_("abstract")),
openapi.Parameter(name='min_star', in_=openapi.IN_QUERY, type=openapi.TYPE_INTEGER, required=False,

View File

@ -7,6 +7,16 @@
@desc:
"""
from django.core import cache
from django.http import HttpResponse
from django.utils.translation import gettext_lazy as _, gettext
from drf_yasg.utils import swagger_auto_schema
from langchain_core.prompts import PromptTemplate
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser
from rest_framework.request import Request
from rest_framework.views import APIView
from application.serializers.application_serializers import ApplicationSerializer
from application.serializers.application_statistics_serializers import ApplicationStatisticsSerializer
from application.swagger_api.application_api import ApplicationApi
@ -21,14 +31,6 @@ from common.response import result
from common.swagger_api.common_api import CommonApi
from common.util.common import query_params_to_single_dict
from dataset.serializers.dataset_serializers import DataSetSerializers
from django.core import cache
from django.http import HttpResponse
from django.utils.translation import gettext_lazy as _
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser
from rest_framework.request import Request
from rest_framework.views import APIView
chat_cache = cache.caches['chat_cache']
@ -371,7 +373,8 @@ class Application(APIView):
operation_id=_("Modify application API_KEY"),
tags=[_('Application/API_KEY')],
manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api(),
request_body=ApplicationApi.ApiKey.Operate.get_request_body_api())
request_body=ApplicationApi.ApiKey.Operate.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.ApiKey.Operate.get_response_body_api()))
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
@ -413,7 +416,8 @@ class Application(APIView):
operation_id=_("Modify Application AccessToken"),
tags=[_('Application/Public Access')],
manual_parameters=ApplicationApi.AccessToken.get_request_params_api(),
request_body=ApplicationApi.AccessToken.get_request_body_api())
request_body=ApplicationApi.AccessToken.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.AccessToken.get_response_body_api()))
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
@ -453,6 +457,7 @@ class Application(APIView):
@swagger_auto_schema(operation_summary=_("Application Certification"),
operation_id=_("Application Certification"),
request_body=ApplicationApi.Authentication.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.Authentication.get_response_body_api()),
tags=[_("Application/Certification")],
security=[])
def post(self, request: Request):
@ -470,6 +475,7 @@ class Application(APIView):
@swagger_auto_schema(operation_summary=_("Create an application"),
operation_id=_("Create an application"),
request_body=ApplicationApi.Create.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.Create.get_response_body_api()),
tags=[_('Application')])
@has_permissions(PermissionConstants.APPLICATION_CREATE, compare=CompareConstants.AND)
@log(menu='Application', operate="Create an application",
@ -492,7 +498,7 @@ class Application(APIView):
class HitTest(APIView):
authentication_classes = [TokenAuth]
@action(methods="PUT", detail=False)
@action(methods="GET", detail=False)
@swagger_auto_schema(operation_summary=_("Hit Test List"), operation_id=_("Hit Test List"),
manual_parameters=CommonApi.HitTestApi.get_request_params_api(),
responses=result.get_api_array_response(CommonApi.HitTestApi.get_response_body_api()),
@ -503,15 +509,15 @@ class Application(APIView):
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND))
def put(self, request: Request, application_id: str):
return result.success(ApplicationSerializer.HitTest(data={
'id': application_id,
'user_id': request.user.id,
"query_text": request.data.get("query_text"),
"top_number": request.data.get("top_number"),
'similarity': request.data.get('similarity'),
'search_mode': request.data.get('search_mode')}
).hit_test())
def get(self, request: Request, application_id: str):
return result.success(
ApplicationSerializer.HitTest(data={'id': application_id, 'user_id': request.user.id,
"query_text": request.query_params.get("query_text"),
"top_number": request.query_params.get("top_number"),
'similarity': request.query_params.get('similarity'),
'search_mode': request.query_params.get(
'search_mode')}).hit_test(
))
class Publish(APIView):
authentication_classes = [TokenAuth]

View File

@ -94,6 +94,7 @@ class ChatView(APIView):
@swagger_auto_schema(operation_summary=_("Get the workflow temporary session id"),
operation_id=_("Get the workflow temporary session id"),
request_body=ChatApi.OpenWorkFlowTemp.get_request_body_api(),
responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
tags=[_("Application/Chat")])
def post(self, request: Request):
return result.success(ChatSerializers.OpenWorkFlowChat(
@ -106,6 +107,7 @@ class ChatView(APIView):
@swagger_auto_schema(operation_summary=_("Get a temporary session id"),
operation_id=_("Get a temporary session id"),
request_body=ChatApi.OpenTempChat.get_request_body_api(),
responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
tags=[_("Application/Chat")])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):
@ -239,6 +241,7 @@ class ChatView(APIView):
@swagger_auto_schema(operation_summary=_("Client modifies dialogue summary"),
operation_id=_("Client modifies dialogue summary"),
request_body=ChatClientHistoryApi.Operate.ReAbstract.get_request_body_api(),
responses=result.get_default_response(),
tags=[_("Application/Conversation Log")])
@has_permissions(ViewPermission(
[RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.ADMIN, RoleConstants.USER],
@ -418,6 +421,7 @@ class ChatView(APIView):
operation_id=_("Add to Knowledge Base"),
manual_parameters=ImproveApi.get_request_params_api_post(),
request_body=ImproveApi.get_request_body_api_post(),
responses=result.get_default_response(),
tags=[_("Application/Conversation Log/Add to Knowledge Base")]
)
@has_permissions(

View File

@ -6,18 +6,18 @@
@date2024/3/14 03:02
@desc: 用户认证
"""
from django.core import cache
from django.db.models import QuerySet
from django.utils.translation import gettext_lazy as _
from common.auth.handle.auth_base_handle import AuthBaseHandle
from common.constants.authentication_type import AuthenticationType
from common.constants.permission_constants import RoleConstants, get_permission_list_by_role, Auth
from common.exception.app_exception import AppAuthenticationFailed
from smartdoc.const import CONFIG
from smartdoc.settings import JWT_AUTH
from users.models import User
from users.models.user import get_user_dynamics_permission
from django.core import cache
from users.models.user import get_user_dynamics_permission
from django.utils.translation import gettext_lazy as _
token_cache = cache.caches['token_cache']
@ -35,7 +35,7 @@ class UserToken(AuthBaseHandle):
auth_details = get_token_details()
user = QuerySet(User).get(id=auth_details['id'])
# 续期
token_cache.touch(token, timeout=CONFIG.get_session_timeout())
token_cache.touch(token, timeout=JWT_AUTH['JWT_EXPIRATION_DELTA'].total_seconds())
rule = RoleConstants[user.role]
permission_list = get_permission_list_by_role(RoleConstants[user.role])
# 获取用户的应用和知识库的权限

View File

@ -24,7 +24,6 @@ from common.util.file_util import get_file_content
from common.util.lock import try_lock, un_lock
from common.util.page_utils import page_desc
from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping, TaskType, State
from dataset.serializers.common_serializers import create_dataset_index
from embedding.models import SourceType, SearchMode
from smartdoc.conf import PROJECT_DIR
from django.utils.translation import gettext_lazy as _
@ -282,8 +281,6 @@ class ListenerManagement:
ListenerManagement.get_aggregation_document_status(
document_id)),
is_the_task_interrupted)
# 检查是否存在索引
create_dataset_index(document_id=document_id)
except Exception as e:
max_kb_error.error(_('Vectorized document: {document_id} error {error} {traceback}').format(
document_id=document_id, error=str(e), traceback=traceback.format_exc()))

View File

@ -15,21 +15,33 @@ from django.utils.translation import gettext_lazy as _
class CommonApi:
class HitTestApi(ApiMixin):
@staticmethod
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['query_text', 'top_number', 'similarity', 'search_mode'],
properties={
'query_text': openapi.Schema(type=openapi.TYPE_STRING, title=_('query text'),
description=_('query text')),
'top_number': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('top number'),
description=_('top number')),
'similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('similarity'),
description=_('similarity')),
'search_mode': openapi.Schema(type=openapi.TYPE_STRING, title=_('search mode'),
description=_('search mode'))
}
)
def get_request_params_api():
return [
openapi.Parameter(name='query_text',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
required=True,
description=_('query text')),
openapi.Parameter(name='top_number',
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
default=10,
required=True,
description='topN'),
openapi.Parameter(name='similarity',
in_=openapi.IN_QUERY,
type=openapi.TYPE_NUMBER,
default=0.6,
required=True,
description=_('similarity')),
openapi.Parameter(name='search_mode',
in_=openapi.IN_QUERY,
type=openapi.TYPE_STRING,
default="embedding",
required=True,
description=_('Retrieval pattern embedding|keywords|blend')
)
]
@staticmethod
def get_response_body_api():

View File

@ -3,7 +3,6 @@ import logging
import re
import traceback
from functools import reduce
from pathlib import Path
from typing import List, Set
from urllib.parse import urljoin, urlparse, ParseResult, urlsplit, urlunparse
@ -53,28 +52,6 @@ def remove_fragment(url: str) -> str:
return urlunparse(modified_url)
def remove_last_path_robust(url):
"""健壮地删除URL的最后一个路径部分"""
parsed = urlparse(url)
# 分割路径并过滤空字符串
paths = [p for p in parsed.path.split('/') if p]
if paths:
paths.pop() # 移除最后一个路径
# 重建路径
new_path = '/' + '/'.join(paths) if paths else '/'
# 重建URL
return urlunparse((
parsed.scheme,
parsed.netloc,
new_path,
parsed.params,
parsed.query,
parsed.fragment
))
class Fork:
class Response:
def __init__(self, content: str, child_link_list: List[ChildLink], status, message: str):
@ -93,8 +70,6 @@ class Fork:
def __init__(self, base_fork_url: str, selector_list: List[str]):
base_fork_url = remove_fragment(base_fork_url)
if any([True for end_str in ['index.html', '.htm', '.html'] if base_fork_url.endswith(end_str)]):
base_fork_url =remove_last_path_robust(base_fork_url)
self.base_fork_url = urljoin(base_fork_url if base_fork_url.endswith("/") else base_fork_url + '/', '.')
parsed = urlsplit(base_fork_url)
query = parsed.query
@ -162,30 +137,18 @@ class Fork:
html_content = response.content.decode(encoding)
beautiful_soup = BeautifulSoup(html_content, "html.parser")
meta_list = beautiful_soup.find_all('meta')
charset_list = Fork.get_charset_list(meta_list)
charset_list = [meta.attrs.get('charset') for meta in meta_list if
meta.attrs is not None and 'charset' in meta.attrs]
if len(charset_list) > 0:
charset = charset_list[0]
if charset != encoding:
try:
html_content = response.content.decode(charset, errors='replace')
html_content = response.content.decode(charset)
except Exception as e:
logging.getLogger("max_kb").error(f'{e}: {traceback.format_exc()}')
logging.getLogger("max_kb").error(f'{e}')
return BeautifulSoup(html_content, "html.parser")
return beautiful_soup
@staticmethod
def get_charset_list(meta_list):
charset_list = []
for meta in meta_list:
if meta.attrs is not None:
if 'charset' in meta.attrs:
charset_list.append(meta.attrs.get('charset'))
elif meta.attrs.get('http-equiv', '').lower() == 'content-type' and 'content' in meta.attrs:
match = re.search(r'charset=([^\s;]+)', meta.attrs['content'], re.I)
if match:
charset_list.append(match.group(1))
return charset_list
def fork(self):
try:
@ -212,4 +175,4 @@ class Fork:
def handler(base_url, response: Fork.Response):
print(base_url.url, base_url.tag.text if base_url.tag else None, response.content)
# ForkManage('https://hzqcgc.htc.edu.cn/jxky.htm', ['.md-content']).fork(3, set(), handler)
# ForkManage('https://bbs.fit2cloud.com/c/de/6', ['.md-content']).fork(3, set(), handler)

View File

@ -18,13 +18,13 @@ from rest_framework import serializers
from common.config.embedding_config import ModelManage
from common.db.search import native_search
from common.db.sql_execute import update_execute, sql_execute
from common.db.sql_execute import update_execute
from common.exception.app_exception import AppApiException
from common.mixins.api_mixin import ApiMixin
from common.util.field_message import ErrMessage
from common.util.file_util import get_file_content
from common.util.fork import Fork
from dataset.models import Paragraph, Problem, ProblemParagraphMapping, DataSet, File, Image, Document
from dataset.models import Paragraph, Problem, ProblemParagraphMapping, DataSet, File, Image
from setting.models_provider import get_model
from smartdoc.conf import PROJECT_DIR
from django.utils.translation import gettext_lazy as _
@ -224,46 +224,6 @@ def get_embedding_model_id_by_dataset_id_list(dataset_id_list: List):
return str(dataset_list[0].embedding_mode_id)
def create_dataset_index(dataset_id=None, document_id=None):
if dataset_id is None and document_id is None:
raise AppApiException(500, _('Dataset ID or Document ID must be provided'))
if dataset_id is not None:
k_id = dataset_id
else:
document = QuerySet(Document).filter(id=document_id).first()
k_id = document.dataset_id
sql = f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename = 'embedding' AND indexname = 'embedding_hnsw_idx_{k_id}'"
index = sql_execute(sql, [])
if not index:
sql = f"SELECT vector_dims(embedding) AS dims FROM embedding WHERE dataset_id = '{k_id}' LIMIT 1"
result = sql_execute(sql, [])
if len(result) == 0:
return
dims = result[0]['dims']
sql = f"""CREATE INDEX "embedding_hnsw_idx_{k_id}" ON embedding USING hnsw ((embedding::vector({dims})) vector_cosine_ops) WHERE dataset_id = '{k_id}'"""
update_execute(sql, [])
def drop_dataset_index(dataset_id=None, document_id=None):
if dataset_id is None and document_id is None:
raise AppApiException(500, _('Dataset ID or Document ID must be provided'))
if dataset_id is not None:
k_id = dataset_id
else:
document = QuerySet(Document).filter(id=document_id).first()
k_id = document.dataset_id
sql = f"SELECT indexname, indexdef FROM pg_indexes WHERE tablename = 'embedding' AND indexname = 'embedding_hnsw_idx_{k_id}'"
index = sql_execute(sql, [])
if index:
sql = f'DROP INDEX "embedding_hnsw_idx_{k_id}"'
update_execute(sql, [])
class GenerateRelatedSerializer(ApiMixin, serializers.Serializer):
model_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('Model id')))
prompt = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_('Prompt word')))

View File

@ -44,7 +44,7 @@ from dataset.models.data_set import DataSet, Document, Paragraph, Problem, Type,
State, File, Image
from dataset.serializers.common_serializers import list_paragraph, MetaSerializer, ProblemParagraphManage, \
get_embedding_model_by_dataset_id, get_embedding_model_id_by_dataset_id, write_image, zip_dir, \
GenerateRelatedSerializer, drop_dataset_index
GenerateRelatedSerializer
from dataset.serializers.document_serializers import DocumentSerializers, DocumentInstanceSerializer
from dataset.task import sync_web_dataset, sync_replace_web_dataset, generate_related_by_dataset_id
from embedding.models import SearchMode
@ -526,7 +526,7 @@ class DataSetSerializers(serializers.ModelSerializer):
def get_request_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['name', 'desc', 'embedding_mode_id'],
required=['name', 'desc'],
properties={
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset name'),
description=_('dataset name')),
@ -788,7 +788,6 @@ class DataSetSerializers(serializers.ModelSerializer):
QuerySet(ProblemParagraphMapping).filter(dataset=dataset).delete()
QuerySet(Paragraph).filter(dataset=dataset).delete()
QuerySet(Problem).filter(dataset=dataset).delete()
drop_dataset_index(dataset_id=dataset.id)
dataset.delete()
delete_embedding_by_dataset(self.data.get('id'))
return True

View File

@ -141,8 +141,7 @@ class DocumentEditInstanceSerializer(ApiMixin, serializers.Serializer):
if 'meta' in self.data and self.data.get('meta') is not None:
dataset_meta_valid_map = self.get_meta_valid_map()
valid_class = dataset_meta_valid_map.get(document.type)
if valid_class is not None:
valid_class(data=self.data.get('meta')).is_valid(raise_exception=True)
valid_class(data=self.data.get('meta')).is_valid(raise_exception=True)
class DocumentWebInstanceSerializer(ApiMixin, serializers.Serializer):
@ -809,40 +808,27 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['create_time', 'update_time', 'id', 'name', 'char_length', 'status', 'is_active',
'type', 'meta', 'dataset_id', 'hit_handling_method', 'directly_return_similarity',
'status_meta', 'paragraph_count'],
required=['id', 'name', 'char_length', 'user_id', 'paragraph_count', 'is_active'
'update_time', 'create_time'],
properties={
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
description=_('create time'),
default="1970-01-01 00:00:00"),
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
description=_('update time'),
default="1970-01-01 00:00:00"),
'id': openapi.Schema(type=openapi.TYPE_STRING, title="id",
description="id", default="xx"),
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('name'),
description=_('name'), default="xx"),
'char_length': openapi.Schema(type=openapi.TYPE_INTEGER, title=_('char length'),
description=_('char length'), default=10),
'status':openapi.Schema(type=openapi.TYPE_STRING, title=_('status'),
description=_('status'), default="xx"),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
description=_('Is active'), default=True),
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('type'),
description=_('type'), default="xx"),
'meta': openapi.Schema(type=openapi.TYPE_OBJECT, title=_('meta'),
description=_('meta'), default="{}"),
'dataset_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('dataset_id'),
description=_('dataset_id'), default="xx"),
'hit_handling_method': openapi.Schema(type=openapi.TYPE_STRING, title=_('hit_handling_method'),
description=_('hit_handling_method'), default="xx"),
'directly_return_similarity': openapi.Schema(type=openapi.TYPE_NUMBER, title=_('directly_return_similarity'),
description=_('directly_return_similarity'), default="xx"),
'status_meta': openapi.Schema(type=openapi.TYPE_OBJECT, title=_('status_meta'),
description=_('status_meta'), default="{}"),
'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
'paragraph_count': openapi.Schema(type=openapi.TYPE_INTEGER, title="_('document count')",
description="_('document count')", default=1),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
description=_('Is active'), default=True),
'update_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('update time'),
description=_('update time'),
default="1970-01-01 00:00:00"),
'create_time': openapi.Schema(type=openapi.TYPE_STRING, title=_('create time'),
description=_('create time'),
default="1970-01-01 00:00:00"
)
}
)
@ -869,7 +855,7 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
class Create(ApiMixin, serializers.Serializer):
dataset_id = serializers.UUIDField(required=True, error_messages=ErrMessage.char(
_('dataset id')))
_('document id')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
@ -997,7 +983,7 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
in_=openapi.IN_PATH,
type=openapi.TYPE_STRING,
required=True,
description=_('dataset id'))
description=_('document id'))
]
class Split(ApiMixin, serializers.Serializer):

View File

@ -226,14 +226,6 @@ class ParagraphSerializers(ApiMixin, serializers.Serializer):
def association(self, with_valid=True, with_embedding=True):
if with_valid:
self.is_valid(raise_exception=True)
# 已关联则直接返回
if QuerySet(ProblemParagraphMapping).filter(
dataset_id=self.data.get('dataset_id'),
document_id=self.data.get('document_id'),
paragraph_id=self.data.get('paragraph_id'),
problem_id=self.data.get('problem_id')
).exists():
return True
problem = QuerySet(Problem).filter(id=self.data.get("problem_id")).first()
problem_paragraph_mapping = ProblemParagraphMapping(id=uuid.uuid1(),
document_id=self.data.get('document_id'),

View File

@ -7,13 +7,13 @@
@desc:
"""
from django.utils.translation import gettext_lazy as _
from drf_yasg.utils import swagger_auto_schema
from rest_framework.decorators import action
from rest_framework.parsers import MultiPartParser
from rest_framework.views import APIView
from rest_framework.views import Request
import dataset.models
from common.auth import TokenAuth, has_permissions
from common.constants.permission_constants import PermissionConstants, CompareConstants, Permission, Group, Operate, \
ViewPermission, RoleConstants
@ -25,6 +25,7 @@ from dataset.serializers.common_serializers import GenerateRelatedSerializer
from dataset.serializers.dataset_serializers import DataSetSerializers
from dataset.views.common import get_dataset_operation_object
from setting.serializers.provider_serializers import ModelSerializer
from django.utils.translation import gettext_lazy as _
class Dataset(APIView):
@ -140,22 +141,21 @@ class Dataset(APIView):
class HitTest(APIView):
authentication_classes = [TokenAuth]
@action(methods="PUT", detail=False)
@action(methods="GET", detail=False)
@swagger_auto_schema(operation_summary=_('Hit test list'), operation_id=_('Hit test list'),
request_body=CommonApi.HitTestApi.get_request_body_api(),
manual_parameters=CommonApi.HitTestApi.get_request_params_api(),
responses=result.get_api_array_response(CommonApi.HitTestApi.get_response_body_api()),
tags=[_('Knowledge Base')])
@has_permissions(lambda r, keywords: Permission(group=Group.DATASET, operate=Operate.USE,
dynamic_tag=keywords.get('dataset_id')))
def put(self, request: Request, dataset_id: str):
return result.success(DataSetSerializers.HitTest(data={
'id': dataset_id,
'user_id': request.user.id,
"query_text": request.data.get("query_text"),
"top_number": request.data.get("top_number"),
'similarity': request.data.get('similarity'),
'search_mode': request.data.get('search_mode')}
).hit_test())
def get(self, request: Request, dataset_id: str):
return result.success(
DataSetSerializers.HitTest(data={'id': dataset_id, 'user_id': request.user.id,
"query_text": request.query_params.get("query_text"),
"top_number": request.query_params.get("top_number"),
'similarity': request.query_params.get('similarity'),
'search_mode': request.query_params.get('search_mode')}).hit_test(
))
class Embedding(APIView):
authentication_classes = [TokenAuth]
@ -181,6 +181,7 @@ class Dataset(APIView):
@swagger_auto_schema(operation_summary=_('Generate related'), operation_id=_('Generate related'),
manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
request_body=GenerateRelatedSerializer.get_request_body_api(),
responses=result.get_default_response(),
tags=[_('Knowledge Base')]
)
@log(menu='document', operate="Generate related documents",

View File

@ -5,17 +5,15 @@ SELECT
FROM
(
SELECT DISTINCT ON
( "paragraph_id" ) ( 1 - distince + ts_similarity ) as similarity, *,
(1 - distince + ts_similarity) AS comprehensive_score
( "paragraph_id" ) ( similarity ),* ,
similarity AS comprehensive_score
FROM
(
SELECT
*,
(embedding.embedding::vector(%s) <=> %s) as distince,
(ts_rank_cd( embedding.search_vector, websearch_to_tsquery('simple', %s ), 32 )) AS ts_similarity
(( 1 - ( embedding.embedding <=> %s ) )+ts_rank_cd( embedding.search_vector, websearch_to_tsquery('simple', %s ), 32 )) AS similarity
FROM
embedding ${embedding_query}
ORDER BY distince
) TEMP
ORDER BY
paragraph_id,

View File

@ -5,12 +5,12 @@ SELECT
FROM
(
SELECT DISTINCT ON
("paragraph_id") ( 1 - distince ),* ,(1 - distince) AS comprehensive_score
("paragraph_id") ( similarity ),* ,similarity AS comprehensive_score
FROM
( SELECT *, ( embedding.embedding::vector(%s) <=> %s ) AS distince FROM embedding ${embedding_query} ORDER BY distince) TEMP
( SELECT *, ( 1 - ( embedding.embedding <=> %s ) ) AS similarity FROM embedding ${embedding_query}) TEMP
ORDER BY
paragraph_id,
distince
similarity DESC
) DISTINCT_TEMP
WHERE comprehensive_score>%s
ORDER BY comprehensive_score DESC

View File

@ -17,7 +17,6 @@ from common.config.embedding_config import ModelManage
from common.event import ListenerManagement, UpdateProblemArgs, UpdateEmbeddingDatasetIdArgs, \
UpdateEmbeddingDocumentIdArgs
from dataset.models import Document, TaskType, State
from dataset.serializers.common_serializers import drop_dataset_index
from ops import celery_app
from setting.models import Model
from setting.models_provider import get_model
@ -111,7 +110,6 @@ def embedding_by_dataset(dataset_id, model_id):
max_kb.info(_('Start--->Vectorized dataset: {dataset_id}').format(dataset_id=dataset_id))
try:
ListenerManagement.delete_embedding_by_dataset(dataset_id)
drop_dataset_index(dataset_id=dataset_id)
document_list = QuerySet(Document).filter(dataset_id=dataset_id)
max_kb.info(_('Dataset documentation: {document_names}').format(
document_names=", ".join([d.name for d in document_list])))

View File

@ -12,6 +12,7 @@ import uuid
from abc import ABC, abstractmethod
from typing import Dict, List
import jieba
from django.contrib.postgres.search import SearchVector
from django.db.models import QuerySet, Value
from langchain_core.embeddings import Embeddings
@ -168,13 +169,8 @@ class EmbeddingSearch(ISearch):
os.path.join(PROJECT_DIR, "apps", "embedding", 'sql',
'embedding_search.sql')),
with_table_name=True)
embedding_model = select_list(exec_sql, [
len(query_embedding),
json.dumps(query_embedding),
*exec_params,
similarity,
top_number
])
embedding_model = select_list(exec_sql,
[json.dumps(query_embedding), *exec_params, similarity, top_number])
return embedding_model
def support(self, search_mode: SearchMode):
@ -194,12 +190,8 @@ class KeywordsSearch(ISearch):
os.path.join(PROJECT_DIR, "apps", "embedding", 'sql',
'keywords_search.sql')),
with_table_name=True)
embedding_model = select_list(exec_sql, [
to_query(query_text),
*exec_params,
similarity,
top_number
])
embedding_model = select_list(exec_sql,
[to_query(query_text), *exec_params, similarity, top_number])
return embedding_model
def support(self, search_mode: SearchMode):
@ -219,14 +211,9 @@ class BlendSearch(ISearch):
os.path.join(PROJECT_DIR, "apps", "embedding", 'sql',
'blend_search.sql')),
with_table_name=True)
embedding_model = select_list(exec_sql, [
len(query_embedding),
json.dumps(query_embedding),
to_query(query_text),
*exec_params,
similarity,
top_number
])
embedding_model = select_list(exec_sql,
[json.dumps(query_embedding), to_query(query_text), *exec_params, similarity,
top_number])
return embedding_model
def support(self, search_mode: SearchMode):

View File

@ -1,127 +0,0 @@
# Generated by Django 4.2.15 on 2025-03-13 07:21
from django.db import migrations
from django.db.models import Q
mysql_template = """
def query_mysql(host,port, user, password, database, sql):
import pymysql
import json
from pymysql.cursors import DictCursor
from datetime import datetime, date
def default_serializer(obj):
from decimal import Decimal
if isinstance(obj, (datetime, date)):
return obj.isoformat() # 将 datetime/date 转换为 ISO 格式字符串
elif isinstance(obj, Decimal):
return float(obj) # 将 Decimal 转换为 float
raise TypeError(f"Type {type(obj)} not serializable")
try:
# 创建连接
db = pymysql.connect(
host=host,
port=int(port),
user=user,
password=password,
database=database,
cursorclass=DictCursor # 使用字典游标
)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cursor.execute(sql)
# 使用 fetchall() 方法获取所有数据
data = cursor.fetchall()
# 处理 bytes 类型的数据
for row in data:
for key, value in row.items():
if isinstance(value, bytes):
row[key] = value.decode("utf-8") # 转换为字符串
# 将数据序列化为 JSON
json_data = json.dumps(data, default=default_serializer, ensure_ascii=False)
return json_data
# 关闭数据库连接
db.close()
except Exception as e:
print(f"Error while connecting to MySQL: {e}")
raise e
"""
pgsql_template = """
def queryPgSQL(database, user, password, host, port, query):
import psycopg2
import json
from datetime import datetime
# 自定义 JSON 序列化函数
def default_serializer(obj):
from decimal import Decimal
if isinstance(obj, datetime):
return obj.isoformat() # 将 datetime 转换为 ISO 格式字符串
elif isinstance(obj, Decimal):
return float(obj) # 将 Decimal 转换为 float
raise TypeError(f"Type {type(obj)} not serializable")
# 数据库连接信息
conn_params = {
"dbname": database,
"user": user,
"password": password,
"host": host,
"port": port
}
try:
# 建立连接
conn = psycopg2.connect(**conn_params)
print("连接成功!")
# 创建游标对象
cursor = conn.cursor()
# 执行查询语句
cursor.execute(query)
# 获取查询结果
rows = cursor.fetchall()
# 处理 bytes 类型的数据
columns = [desc[0] for desc in cursor.description]
result = [dict(zip(columns, row)) for row in rows]
# 转换为 JSON 格式
json_result = json.dumps(result, default=default_serializer, ensure_ascii=False)
return json_result
except Exception as e:
print(f"发生错误:{e}")
raise e
finally:
# 关闭游标和连接
if cursor:
cursor.close()
if conn:
conn.close()
"""
def fix_type(apps, schema_editor):
FunctionLib = apps.get_model('function_lib', 'FunctionLib')
FunctionLib.objects.filter(
Q(id='22c21b76-0308-11f0-9694-5618c4394482') | Q(template_id='22c21b76-0308-11f0-9694-5618c4394482')
).update(code=mysql_template)
FunctionLib.objects.filter(
Q(id='bd1e8b88-0302-11f0-87bb-5618c4394482') | Q(template_id='bd1e8b88-0302-11f0-87bb-5618c4394482')
).update(code=pgsql_template)
class Migration(migrations.Migration):
dependencies = [
('function_lib', '0003_functionlib_function_type_functionlib_icon_and_more'),
]
operations = [
migrations.RunPython(fix_type)
]

View File

@ -33,13 +33,11 @@ from smartdoc.const import CONFIG
function_executor = FunctionExecutor(CONFIG.get('SANDBOX'))
class FlibInstance:
def __init__(self, function_lib: dict, version: str):
self.function_lib = function_lib
self.version = version
def encryption(message: str):
"""
加密敏感字段数据 加密方式是 如果密码是 1234567890 那么给前端则是 123******890
@ -70,8 +68,7 @@ def encryption(message: str):
class FunctionLibModelSerializer(serializers.ModelSerializer):
class Meta:
model = FunctionLib
fields = ['id', 'name', 'icon', 'desc', 'code', 'input_field_list', 'init_field_list', 'init_params',
'permission_type', 'is_active', 'user_id', 'template_id',
fields = ['id', 'name', 'icon', 'desc', 'code', 'input_field_list','init_field_list', 'init_params', 'permission_type', 'is_active', 'user_id', 'template_id',
'create_time', 'update_time']
@ -151,6 +148,7 @@ class FunctionLibSerializer(serializers.Serializer):
select_user_id = serializers.CharField(required=False, allow_null=True, allow_blank=True)
function_type = serializers.CharField(required=False, allow_null=True, allow_blank=True)
def get_query_set(self):
query_set = QuerySet(FunctionLib).filter(
(Q(user_id=self.data.get('user_id')) | Q(permission_type='PUBLIC')))
@ -271,7 +269,7 @@ class FunctionLibSerializer(serializers.Serializer):
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if not QuerySet(FunctionLib).filter(user_id=self.data.get('user_id'), id=self.data.get('id')).exists():
if not QuerySet(FunctionLib).filter(id=self.data.get('id')).exists():
raise AppApiException(500, _('Function does not exist'))
def delete(self, with_valid=True):
@ -287,8 +285,7 @@ class FunctionLibSerializer(serializers.Serializer):
if with_valid:
self.is_valid(raise_exception=True)
EditFunctionLib(data=instance).is_valid(raise_exception=True)
edit_field_list = ['name', 'desc', 'code', 'icon', 'input_field_list', 'init_field_list', 'init_params',
'permission_type', 'is_active']
edit_field_list = ['name', 'desc', 'code', 'icon', 'input_field_list', 'init_field_list', 'init_params', 'permission_type', 'is_active']
edit_dict = {field: instance.get(field) for field in edit_field_list if (
field in instance and instance.get(field) is not None)}
@ -320,8 +317,7 @@ class FunctionLibSerializer(serializers.Serializer):
if function_lib.init_params:
function_lib.init_params = json.loads(rsa_long_decrypt(function_lib.init_params))
if function_lib.init_field_list:
password_fields = [i["field"] for i in function_lib.init_field_list if
i.get("input_type") == "PasswordInput"]
password_fields = [i["field"] for i in function_lib.init_field_list if i.get("input_type") == "PasswordInput"]
if function_lib.init_params:
for k in function_lib.init_params:
if k in password_fields and function_lib.init_params[k]:

View File

@ -195,6 +195,53 @@ class FunctionLibApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['id', 'name', 'code', 'input_field_list', 'permission_type'],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')),
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
description=_('function name')),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
description=_('function description')),
'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
description=_('function content')),
'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
description=_('permission')),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
description=_('Is active')),
'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
description=_('Input variable list'),
items=openapi.Schema(type=openapi.TYPE_OBJECT,
required=['name', 'is_required', 'source'],
properties={
'name': openapi.Schema(
type=openapi.TYPE_STRING,
title=_('variable name'),
description=_('variable name')),
'is_required': openapi.Schema(
type=openapi.TYPE_BOOLEAN,
title=_('required'),
description=_('required')),
'type': openapi.Schema(
type=openapi.TYPE_STRING,
title=_('type'),
description=_(
'Field type string|int|dict|array|float')
),
'source': openapi.Schema(
type=openapi.TYPE_STRING,
title=_('source'),
description=_(
'The source only supports custom|reference')),
}))
}
)
class Export(ApiMixin):
@staticmethod
def get_request_params_api():
@ -214,4 +261,4 @@ class FunctionLibApi(ApiMixin):
type=openapi.TYPE_FILE,
required=True,
description=_('Upload image files'))
]
]

View File

@ -44,6 +44,7 @@ class FunctionLibView(APIView):
@swagger_auto_schema(operation_summary=_('Create function'),
operation_id=_('Create function'),
request_body=FunctionLibApi.Create.get_request_body_api(),
responses=result.get_api_response(FunctionLibApi.Create.get_response_body_api()),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
@log(menu='Function', operate="Create function",
@ -58,6 +59,7 @@ class FunctionLibView(APIView):
@swagger_auto_schema(operation_summary=_('Debug function'),
operation_id=_('Debug function'),
request_body=FunctionLibApi.Debug.get_request_body_api(),
responses=result.get_default_response(),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):
@ -72,6 +74,7 @@ class FunctionLibView(APIView):
@swagger_auto_schema(operation_summary=_('Update function'),
operation_id=_('Update function'),
request_body=FunctionLibApi.Edit.get_request_body_api(),
responses=result.get_api_response(FunctionLibApi.Edit.get_request_body_api()),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
@log(menu='Function', operate="Update function",
@ -84,6 +87,7 @@ class FunctionLibView(APIView):
@action(methods=['DELETE'], detail=False)
@swagger_auto_schema(operation_summary=_('Delete function'),
operation_id=_('Delete function'),
responses=result.get_default_response(),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
@log(menu='Function', operate="Delete function",

View File

@ -26,6 +26,7 @@ class PyLintView(APIView):
@swagger_auto_schema(operation_summary=_('Check code'),
operation_id=_('Check code'),
request_body=PyLintApi.get_request_body_api(),
responses=result.get_api_response(PyLintApi.get_request_body_api()),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):

View File

@ -7238,7 +7238,7 @@ msgstr ""
msgid ""
"The confirmation password must be 6-20 characters long and must be a "
"combination of letters, numbers, and special characters."
msgstr "The confirmation password must be 6-20 characters long and must be a combination of letters, numbers, and special characters.(Special character support:_、!、@、#、$、(、) ……)"
msgstr ""
#: community/apps/users/serializers/user_serializers.py:380
#, python-brace-format
@ -7499,13 +7499,4 @@ msgid "Captcha code error or expiration"
msgstr ""
msgid "captcha"
msgstr ""
msgid "Reasoning enable"
msgstr ""
msgid "Reasoning start tag"
msgstr ""
msgid "Reasoning end tag"
msgstr ""

View File

@ -7395,7 +7395,7 @@ msgstr "语言只支持:"
msgid ""
"The confirmation password must be 6-20 characters long and must be a "
"combination of letters, numbers, and special characters."
msgstr "确认密码长度6-20个字符必须字母、数字、特殊字符组合特殊字符支持_、!、@、#、$、(、) ……)"
msgstr "确认密码长度6-20个字符必须字母、数字、特殊字符组合"
#: community/apps/users/serializers/user_serializers.py:380
#, python-brace-format
@ -7662,13 +7662,4 @@ msgid "Captcha code error or expiration"
msgstr "验证码错误或过期"
msgid "captcha"
msgstr "验证码"
msgid "Reasoning enable"
msgstr "开启思考过程"
msgid "Reasoning start tag"
msgstr "思考过程开始标签"
msgid "Reasoning end tag"
msgstr "思考过程结束标签"
msgstr "验证码"

View File

@ -7405,7 +7405,7 @@ msgstr "語言只支持:"
msgid ""
"The confirmation password must be 6-20 characters long and must be a "
"combination of letters, numbers, and special characters."
msgstr "確認密碼長度6-20個字符必須字母、數字、特殊字符組合特殊字元支持_、!、@、#、$、(、) ……)"
msgstr "確認密碼長度6-20個字符必須字母、數字、特殊字符組合"
#: community/apps/users/serializers/user_serializers.py:380
#, python-brace-format
@ -7672,13 +7672,4 @@ msgid "Captcha code error or expiration"
msgstr "驗證碼錯誤或過期"
msgid "captcha"
msgstr "驗證碼"
msgid "Reasoning enable"
msgstr "開啟思考過程"
msgid "Reasoning start tag"
msgstr "思考過程開始標籤"
msgid "Reasoning end tag"
msgstr "思考過程結束標籤"
msgstr "驗證碼"

View File

@ -1,61 +0,0 @@
import logging
import psycopg
from django.db import migrations
from smartdoc.const import CONFIG
def get_connect(db_name):
conn_params = {
"dbname": db_name,
"user": CONFIG.get('DB_USER'),
"password": CONFIG.get('DB_PASSWORD'),
"host": CONFIG.get('DB_HOST'),
"port": CONFIG.get('DB_PORT')
}
# 建立连接
connect = psycopg.connect(**conn_params)
return connect
def sql_execute(conn, reindex_sql: str, alter_database_sql: str):
"""
执行一条sql
@param reindex_sql:
@param conn:
@param alter_database_sql:
"""
conn.autocommit = True
with conn.cursor() as cursor:
cursor.execute(reindex_sql, [])
cursor.execute(alter_database_sql, [])
cursor.close()
def re_index(apps, schema_editor):
app_db_name = CONFIG.get('DB_NAME')
try:
re_index_database(app_db_name)
except Exception as e:
logging.error(f'reindex database {app_db_name}发送错误:{str(e)}')
try:
re_index_database('root')
except Exception as e:
logging.error(f'reindex database root 发送错误:{str(e)}')
def re_index_database(db_name):
db_conn = get_connect(db_name)
sql_execute(db_conn, f'REINDEX DATABASE "{db_name}";', f'ALTER DATABASE "{db_name}" REFRESH COLLATION VERSION;')
db_conn.close()
class Migration(migrations.Migration):
dependencies = [
('setting', '0010_log'),
]
operations = [
migrations.RunPython(re_index, atomic=False)
]

View File

@ -99,7 +99,7 @@ class BaseChatOpenAI(ChatOpenAI):
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
return self.usage_metadata.get('input_tokens', self.usage_metadata.get('prompt_tokens', 0))
return self.usage_metadata.get('input_tokens', 0)
def get_num_tokens(self, text: str) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
@ -108,8 +108,7 @@ class BaseChatOpenAI(ChatOpenAI):
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))
return self.get_last_generation_info().get('output_tokens',
self.get_last_generation_info().get('completion_tokens', 0))
return self.get_last_generation_info().get('output_tokens', 0)
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
kwargs['stream_usage'] = True

View File

@ -171,6 +171,24 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')),
'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')),
'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')),
'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'),
description=_('Is active')),
'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')),
'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('member type'),
description=_('member type manage|member')),
}
)
@transaction.atomic
def batch_add_member(self, user_id_list: List[str], with_valid=True):
"""

View File

@ -38,6 +38,7 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Add member'),
operation_id=_('Add member'),
request_body=TeamMemberSerializer().get_request_body_api(),
responses=result.get_default_response(),
tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_CREATE)
@log(menu='Team', operate='Add member',
@ -53,6 +54,7 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Add members in batches'),
operation_id=_('Add members in batches'),
request_body=TeamMemberSerializer.get_bach_request_body_api(),
responses=result.get_api_array_response(TeamMemberSerializer.get_response_body_api()),
tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_CREATE)
@log(menu='Team', operate='Add members in batches',
@ -78,6 +80,7 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Update team member permissions'),
operation_id=_('Update team member permissions'),
request_body=UpdateTeamMemberPermissionSerializer().get_request_body_api(),
responses=result.get_default_response(),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
tags=[_('Team')]
)
@ -93,6 +96,7 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Remove member'),
operation_id=_('Remove member'),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
responses=result.get_default_response(),
tags=[_('Team')]
)
@has_permissions(PermissionConstants.TEAM_DELETE)

View File

@ -31,7 +31,8 @@ class Model(APIView):
@action(methods=['POST'], detail=False)
@swagger_auto_schema(operation_summary=_('Create model'),
operation_id=_('Create model'),
request_body=ModelCreateApi.get_request_body_api()
request_body=ModelCreateApi.get_request_body_api(),
manual_parameters=result.get_api_response(ModelCreateApi.get_request_body_api())
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
@log(menu='model', operate='Create model',
@ -45,7 +46,8 @@ class Model(APIView):
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary=_('Download model, trial only with Ollama platform'),
operation_id=_('Download model, trial only with Ollama platform'),
request_body=ModelCreateApi.get_request_body_api()
request_body=ModelCreateApi.get_request_body_api(),
responses=result.get_api_response(ModelCreateApi.get_request_body_api())
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
def put(self, request: Request):
@ -123,7 +125,8 @@ class Model(APIView):
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary=_('Update model'),
operation_id=_('Update model'),
request_body=ModelEditApi.get_request_body_api()
request_body=ModelEditApi.get_request_body_api(),
responses=result.get_api_response(ModelEditApi.get_request_body_api())
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
@log(menu='model', operate='Update model',
@ -166,7 +169,8 @@ class Provide(APIView):
@swagger_auto_schema(operation_summary=_('Call the supplier function to obtain form data'),
operation_id=_('Call the supplier function to obtain form data'),
manual_parameters=ProvideApi.get_request_params_api(),
request_body=ProvideApi.get_request_body_api()
request_body=ProvideApi.get_request_body_api(),
responses=result.get_api_response(ProvideApi.get_request_body_api())
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
@log(menu='model', operate='Call the supplier function to obtain form data')

View File

@ -7,7 +7,6 @@
2. 程序需要, 用户不需要更改的写到settings中
3. 程序需要, 用户需要更改的写到本config中
"""
import datetime
import errno
import logging
import os
@ -113,19 +112,13 @@ class Config(dict):
"USER": self.get('DB_USER'),
"PASSWORD": self.get('DB_PASSWORD'),
"ENGINE": self.get('DB_ENGINE'),
"CONN_MAX_AGE": 0,
"POOL_OPTIONS": {
"POOL_SIZE": 20,
"MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW')),
"RECYCLE": 1800,
"TIMEOUT": 30,
'PRE_PING': True
'RECYCLE': 30 * 60
}
}
def get_session_timeout(self):
return datetime.timedelta(seconds=int(self.get('SESSION_TIMEOUT', 60 * 60 * 2)))
def get_language_code(self):
return self.get('LANGUAGE_CODE', 'zh-CN')

View File

@ -22,7 +22,7 @@ from common.constants.permission_constants import PermissionConstants, CompareCo
from common.log.log import log
from common.response import result
from common.util.common import encryption
from smartdoc.const import CONFIG
from smartdoc.settings import JWT_AUTH
from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \
RePasswordSerializer, \
SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer, \
@ -84,7 +84,7 @@ class SwitchUserLanguageView(APIView):
description=_("language")),
}
),
responses=RePasswordSerializer().get_response_body_api(),
responses=result.get_default_response(),
tags=[_("User management")])
@log(menu='User management', operate='Switch Language',
get_operation_object=lambda r, k: {'name': r.user.username})
@ -111,7 +111,7 @@ class ResetCurrentUserPasswordView(APIView):
description=_("Password"))
}
),
responses=RePasswordSerializer().get_response_body_api(),
responses=result.get_default_response(),
tags=[_("User management")])
@log(menu='User management', operate='Modify current user password',
get_operation_object=lambda r, k: {'name': r.user.username},
@ -199,7 +199,7 @@ class Login(APIView):
# 校验请求参数
user = login_request.is_valid(raise_exception=True)
token = login_request.get_user_token()
token_cache.set(token, user, timeout=CONFIG.get_session_timeout())
token_cache.set(token, user, timeout=JWT_AUTH['JWT_EXPIRATION_DELTA'])
return result.success(token)

View File

@ -5,7 +5,7 @@ RUN cd ui && \
npm install && \
npm run build && \
rm -rf ./node_modules
FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.14 AS stage-build
FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.8 AS stage-build
ARG DEPENDENCIES=" \
python3-pip"
@ -29,7 +29,7 @@ RUN python3 -m venv /opt/py3 && \
poetry install && \
export MAXKB_CONFIG_TYPE=ENV && python3 /opt/maxkb/app/apps/manage.py compilemessages
FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.14
FROM ghcr.io/1panel-dev/maxkb-python-pg:python3.11-pg15.8
ARG DOCKER_IMAGE_TAG=dev \
BUILD_AT \
GITHUB_COMMIT
@ -70,7 +70,6 @@ RUN chmod 755 /opt/maxkb/app/installer/run-maxkb.sh && \
useradd --no-create-home --home /opt/maxkb/app/sandbox sandbox -g root && \
chown -R sandbox:root /opt/maxkb/app/sandbox && \
chmod g-x /usr/local/bin/* /usr/bin/* /bin/* /usr/sbin/* /sbin/* /usr/lib/postgresql/15/bin/* && \
chmod g+xr /usr/bin/ld.so && \
chmod g+x /usr/local/bin/python* && \
find /etc/ -type f ! -path '/etc/resolv.conf' ! -path '/etc/hosts' | xargs chmod g-rx

View File

@ -1,5 +1,5 @@
FROM python:3.11-slim-trixie AS python-stage
FROM postgres:15.14-trixie
FROM python:3.11-slim-bullseye AS python-stage
FROM postgres:15.8-bullseye
ARG DEPENDENCIES=" \
libexpat1-dev \

View File

@ -23,7 +23,7 @@ langchain-huggingface = "0.1.2"
langchain-ollama = "0.3.2"
langgraph = "0.3.27"
mcp = "1.8.0"
psycopg = { extras = ["binary"], version = "3.2.9" }
psycopg2-binary = "2.9.10"
jieba = "0.42.1"
diskcache = "5.6.3"
pillow = "10.4.0"
@ -39,7 +39,7 @@ html2text = "2024.2.26"
django-ipware = "6.0.5"
django-apscheduler = "0.6.2"
pymupdf = "1.24.9"
pypdf = "6.0.0"
pypdf = "4.3.1"
rapidocr-onnxruntime = "1.3.24"
python-docx = "1.1.2"
xlwt = "1.3.0"
@ -64,7 +64,7 @@ pylint = "3.3.6"
pydub = "0.25.1"
cffi = "1.17.1"
pysilk = "0.0.1"
django-db-connection-pool = "1.2.6"
django-db-connection-pool = "1.2.5"
opencv-python-headless = "4.11.0.86"
pymysql = "1.1.1"
accelerate = "1.6.0"

View File

@ -34,11 +34,10 @@
"katex": "^0.16.10",
"lodash": "^4.17.21",
"marked": "^12.0.2",
"md-editor-v3": "^5.8.4",
"md-editor-v3": "^4.16.7",
"mermaid": "^10.9.0",
"mitt": "^3.0.0",
"moment": "^2.30.1",
"nanoid": "^5.1.5",
"npm": "^10.2.4",
"nprogress": "^0.2.0",
"pinia": "^2.1.6",
@ -54,7 +53,8 @@
"vue-draggable-plus": "^0.6.0",
"vue-i18n": "^9.13.1",
"vue-router": "^4.2.4",
"vue3-menus": "^1.1.2"
"vue3-menus": "^1.1.2",
"vuedraggable": "^4.1.0"
},
"devDependencies": {
"@rushstack/eslint-patch": "^1.3.2",

View File

@ -227,7 +227,7 @@ const getApplicationHitTest: (
data: any,
loading?: Ref<boolean>
) => Promise<Result<Array<any>>> = (application_id, data, loading) => {
return put(`${prefix}/${application_id}/hit_test`, data, undefined, loading)
return get(`${prefix}/${application_id}/hit_test`, data, loading)
}
/**

View File

@ -186,7 +186,7 @@ const getDatasetHitTest: (
data: any,
loading?: Ref<boolean>
) => Promise<Result<Array<any>>> = (dataset_id, data, loading) => {
return put(`${prefix}/${dataset_id}/hit_test`, data, undefined, loading)
return get(`${prefix}/${dataset_id}/hit_test`, data, loading)
}
/**

View File

@ -182,7 +182,6 @@
@keydown.enter="sendChatHandle($event)"
@paste="handlePaste"
@drop="handleDrop"
@dragover.prevent="handleDragOver"
/>
<div class="operate flex align-center">
@ -288,7 +287,7 @@
</div>
</template>
<script setup lang="ts">
import { ref, computed, onMounted, nextTick, watch, reactive } from 'vue'
import { ref, computed, onMounted, nextTick, watch } from 'vue'
import Recorder from 'recorder-core'
import TouchChat from './TouchChat.vue'
import applicationApi from '@/api/application'
@ -393,7 +392,17 @@ const checkMaxFilesLimit = () => {
uploadOtherList.value.length
)
}
const file_name_eq = (str: string, str1: string) => {
return (
str.replaceAll(' ', '') === str1.replaceAll(' ', '') ||
decodeHtmlEntities(str) === decodeHtmlEntities(str1)
)
}
function decodeHtmlEntities(str: string) {
const tempDiv = document.createElement('div')
tempDiv.innerHTML = str
return tempDiv.textContent || tempDiv.innerText || ''
}
const uploadFile = async (file: any, fileList: any) => {
const { maxFiles, fileLimit } = props.applicationDetails.file_upload_setting
//
@ -418,7 +427,6 @@ const uploadFile = async (file: any, fileList: any) => {
const formData = new FormData()
formData.append('file', file.raw, file.name)
//
file = reactive(file)
const extension = file.name.split('.').pop().toUpperCase() //
if (imageExtensions.includes(extension)) {
uploadImageList.value.push(file)
@ -452,8 +460,44 @@ const uploadFile = async (file: any, fileList: any) => {
)
.then((response) => {
fileList.splice(0, fileList.length)
file.url = response.data[0].url
file.file_id = response.data[0].file_id
uploadImageList.value.forEach((file: any) => {
const f = response.data.filter((f: any) => file_name_eq(f.name, file.name))
if (f.length > 0) {
file.url = f[0].url
file.file_id = f[0].file_id
}
})
uploadDocumentList.value.forEach((file: any) => {
const f = response.data.filter((f: any) => file_name_eq(f.name, file.name))
if (f.length > 0) {
file.url = f[0].url
file.file_id = f[0].file_id
}
})
uploadAudioList.value.forEach((file: any) => {
const f = response.data.filter((f: any) => file_name_eq(f.name, file.name))
if (f.length > 0) {
file.url = f[0].url
file.file_id = f[0].file_id
}
})
uploadVideoList.value.forEach((file: any) => {
const f = response.data.filter((f: any) => file_name_eq(f.name, file.name))
if (f.length > 0) {
file.url = f[0].url
file.file_id = f[0].file_id
}
})
uploadOtherList.value.forEach((file: any) => {
const f = response.data.filter((f: any) => file_name_eq(f.name, file.name))
if (f.length > 0) {
file.url = f[0].url
file.file_id = f[0].file_id
}
})
if (!inputValue.value && uploadImageList.value.length > 0) {
inputValue.value = t('chat.uploadFile.imageMessage')
}
})
}
//
@ -485,7 +529,6 @@ const handlePaste = (event: ClipboardEvent) => {
//
event.preventDefault()
}
//
const handleDrop = (event: DragEvent) => {
if (!props.applicationDetails.file_upload_enable) return
@ -505,12 +548,6 @@ const handleDrop = (event: DragEvent) => {
uploadFile(elFile, [elFile])
})
}
const handleDragOver = (event: DragEvent) => {
if (event.dataTransfer) {
event.dataTransfer.dropEffect = 'copy' // Firefox
}
}
// id
const intervalId = ref<any | null>(null)
//
@ -528,16 +565,7 @@ const uploadOtherList = ref<Array<any>>([])
const showDelete = ref('')
const isDisabledChat = computed(
() =>
!(
(inputValue.value.trim() ||
uploadImageList.value.length > 0 ||
uploadDocumentList.value.length > 0 ||
uploadVideoList.value.length > 0 ||
uploadAudioList.value.length > 0 ||
uploadOtherList.value.length > 0) &&
(props.appId || props.applicationDetails?.name)
)
() => !(inputValue.value.trim() && (props.appId || props.applicationDetails?.name))
)
//
const isMicrophone = ref(false)
@ -737,34 +765,11 @@ const stopTimer = () => {
}
}
const getQuestion = () => {
if (!inputValue.value.trim()) {
const fileLenth = [
uploadImageList.value.length > 0,
uploadDocumentList.value.length > 0,
uploadAudioList.value.length > 0,
uploadOtherList.value.length > 0
]
if (fileLenth.filter((f) => f).length > 1) {
return t('chat.uploadFile.otherMessage')
} else if (fileLenth[0]) {
return t('chat.uploadFile.imageMessage')
} else if (fileLenth[1]) {
return t('chat.uploadFile.documentMessage')
} else if (fileLenth[2]) {
return t('chat.uploadFile.audioMessage')
} else if (fileLenth[3]) {
return t('chat.uploadFile.otherMessage')
}
}
return inputValue.value.trim()
}
function autoSendMessage() {
props
.validate()
.then(() => {
props.sendMessage(getQuestion(), {
props.sendMessage(inputValue.value, {
image_list: uploadImageList.value,
document_list: uploadDocumentList.value,
audio_list: uploadAudioList.value,
@ -799,14 +804,7 @@ function sendChatHandle(event?: any) {
//
event?.preventDefault()
if (!isDisabledChat.value && !props.loading && !event?.isComposing) {
if (
inputValue.value.trim() ||
uploadImageList.value.length > 0 ||
uploadDocumentList.value.length > 0 ||
uploadAudioList.value.length > 0 ||
uploadVideoList.value.length > 0 ||
uploadOtherList.value.length > 0
) {
if (inputValue.value.trim()) {
autoSendMessage()
}
}

View File

@ -454,22 +454,14 @@ class AudioManage {
this.statusList[index] = AudioStatus.ERROR
}
} else {
if (window.speechSynthesis.paused && self) {
if (window.speechSynthesis.paused) {
window.speechSynthesis.resume()
this.statusList[index] = AudioStatus.PLAY_INT
} else {
//
if (window.speechSynthesis.speaking) {
if (window.speechSynthesis.pending) {
window.speechSynthesis.cancel()
}
//
setTimeout(() => {
if (speechSynthesis.speaking) {
return
}
speechSynthesis.speak(audioElement)
this.statusList[index] = AudioStatus.PLAY_INT
}, 500)
speechSynthesis.speak(audioElement)
this.statusList[index] = AudioStatus.PLAY_INT
}
}
}
@ -490,6 +482,11 @@ class AudioManage {
this.statusList[index] = AudioStatus.READY
if (self) {
window.speechSynthesis.pause()
nextTick(() => {
if (!window.speechSynthesis.paused) {
window.speechSynthesis.cancel()
}
})
} else {
window.speechSynthesis.cancel()
}

View File

@ -17,7 +17,6 @@
:source="prologue"
:send-message="sendMessage"
reasoning_content=""
:type="type"
></MdRenderer>
</el-card>
</div>
@ -59,7 +58,7 @@ const prologue = computed(() => {
]
let _temp = temp
for (const index in tag_list) {
_temp = _temp.replace(new RegExp(tag_list[index], 'g'), '')
_temp = _temp.replaceAll(tag_list[index], '')
}
const quick_question_list = _temp.match(/-\s.+/g)
let result = temp

View File

@ -163,13 +163,13 @@ const initialApiFormData = ref({})
const isUserInput = computed(
() =>
props.applicationDetails.work_flow?.nodes?.filter((v: any) => v.id === 'base-node')[0]
?.properties.user_input_field_list.length > 0
.properties.user_input_field_list.length > 0
)
const isAPIInput = computed(
() =>
props.type === 'debug-ai-chat' &&
props.applicationDetails.work_flow?.nodes?.filter((v: any) => v.id === 'base-node')[0]
?.properties.api_input_field_list.length > 0
.properties.api_input_field_list.length > 0
)
const showUserInputContent = computed(() => {
return (

View File

@ -5,13 +5,11 @@
<template v-for="(item, index) in md_view_list" :key="index">
<div
v-if="item.type === 'question'"
@click="
sendMessage && type !== 'log' ? sendMessage(item.content, 'new') : (content: string) => {}
"
@click="sendMessage ? sendMessage(item.content, 'new') : (content: string) => {}"
class="problem-button mt-4 mb-4 flex"
:class="sendMessage && type !== 'log' ? 'cursor' : 'disabled'"
:class="sendMessage ? 'cursor' : 'disabled'"
>
<el-icon class="mr-8" style="margin-top: 2px">
<el-icon class="mr-8" style="margin-top: 2px;">
<EditPen />
</el-icon>
{{ item.content }}
@ -78,7 +76,6 @@ const props = withDefaults(
chat_record_id?: string
runtime_node_id?: string
disabled?: boolean
type?: 'log' | 'ai-chat' | 'debug-ai-chat'
}>(),
{
source: '',
@ -240,7 +237,7 @@ const split_form_rander_ = (source: string, type: string) => {
padding: 12px;
box-sizing: border-box;
color: var(--el-text-color-regular);
word-break: break-word;
word-break: break-all;
&:hover {
background: var(--el-color-primary-light-9);

View File

@ -72,7 +72,7 @@
<SelectProviderDialog
v-if="showFooter"
ref="selectProviderRef"
@change="(provider: any, modelType: any) => openCreateModel(provider, modelType)"
@change="(provider, modelType) => openCreateModel(provider, modelType)"
/>
</div>
</template>
@ -82,6 +82,8 @@ import type { Provider } from '@/api/type/model'
import { relatedObject } from '@/utils/utils'
import CreateModelDialog from '@/views/template/component/CreateModelDialog.vue'
import SelectProviderDialog from '@/views/template/component/SelectProviderDialog.vue'
import { t } from '@/locales'
import useStore from '@/stores'
defineOptions({ name: 'ModelSelect' })

View File

@ -63,9 +63,6 @@ export default {
limitMessage2: 'files',
sizeLimit: 'Each file must not exceed',
imageMessage: 'Please process the image content',
documentMessage: 'Please understand the content of the document',
audioMessage: 'Please understand the audio content',
otherMessage: 'Please understand the file content',
errorMessage: 'Upload Failed'
},
executionDetails: {

View File

@ -139,7 +139,7 @@ Response requirements:
hybridSearch: 'Hybrid Search',
hybridSearchTooltip:
'Hybrid search is a retrieval method based on both vector and text similarity, suitable for medium data volumes in the knowledge.',
similarityThreshold: 'Similarity not lower than',
similarityThreshold: 'Similarity higher than',
similarityTooltip: 'The higher the similarity, the stronger the correlation.',
topReferences: 'Top N Segments',
maxCharacters: 'Maximum Characters per Reference',

View File

@ -149,7 +149,7 @@ export default {
tooltip: 'When user asks a question, handle matched segments according to the set method.'
},
similarity: {
label: 'Similarity not lower than',
label: 'Similarity Higher Than',
placeholder: 'Directly return segment content',
requiredMessage: 'Please enter similarity value'
}

View File

@ -61,9 +61,6 @@ export default {
limitMessage2: '个文件',
sizeLimit: '单个文件大小不能超过',
imageMessage: '请解析图片内容',
documentMessage: '请理解文档内容',
audioMessage: '请理解音频内容',
otherMessage: '请理解文件内容',
errorMessage: '上传失败'
},
executionDetails: {

View File

@ -130,7 +130,7 @@ export default {
hybridSearch: '混合检索',
hybridSearchTooltip:
'混合检索是一种基于向量和文本相似度的检索方式,适用于知识库中的中等数据量场景。',
similarityThreshold: '相似度不低于',
similarityThreshold: '相似度于',
similarityTooltip: '相似度越高相关性越强。',
topReferences: '引用分段数 TOP',
maxCharacters: '最多引用字符数',

View File

@ -147,7 +147,7 @@ export default {
tooltip: '用户提问时,命中文档下的分段时按照设置的方式进行处理。'
},
similarity: {
label: '相似度不低于',
label: '相似度于',
placeholder: '直接返回分段内容',
requiredMessage: '请输入相似度'
}

View File

@ -61,9 +61,6 @@ export default {
limitMessage2: '個文件',
sizeLimit: '單個文件大小不能超過',
imageMessage: '請解析圖片內容',
documentMessage: '請理解檔案內容',
audioMessage: '請理解音訊內容',
otherMessage: '請理解檔案內容',
errorMessage: '上傳失敗'
},
executionDetails: {

View File

@ -129,7 +129,7 @@ export default {
hybridSearch: '混合檢索',
hybridSearchTooltip:
'混合檢索是一種基於向量和文本相似度的檢索方式,適用於知識庫中的中等數據量場景。',
similarityThreshold: '相似度不低於',
similarityThreshold: '相似度於',
similarityTooltip: '相似度越高相關性越強。',
topReferences: '引用分段數 TOP',
maxCharacters: '最多引用字元數',

View File

@ -146,7 +146,7 @@ export default {
tooltip: '用戶提問時,命中文檔下的分段時按照設置的方式進行處理。'
},
similarity: {
label: '相似度不低於',
label: '相似度高于',
placeholder: '直接返回分段内容',
requiredMessage: '请输入相似度'
}

View File

@ -62,7 +62,7 @@
}
.el-form-item__label {
font-weight: 400;
width: 100% !important;
width: 100%;
}
.el-form-item__error {

View File

@ -1,5 +1,5 @@
import { MsgError } from '@/utils/message'
import { nanoid } from 'nanoid'
export function toThousands(num: any) {
return num?.toString().replace(/\d+/, function (n: any) {
return n.replace(/(\d)(?=(?:\d{3})+$)/g, '$1,')
@ -25,7 +25,7 @@ export function filesize(size: number) {
id
*/
export const randomId = function () {
return nanoid()
return Math.floor(Math.random() * 10000) + ''
}
/*
@ -48,9 +48,7 @@ const typeList: any = {
export function getImgUrl(name: string) {
const list = Object.values(typeList).flat()
const type = list.includes(fileType(name).toLowerCase())
? fileType(name).toLowerCase()
: 'unknown'
const type = list.includes(fileType(name).toLowerCase()) ? fileType(name).toLowerCase() : 'unknown'
return new URL(`../assets/fileType/${type}-icon.svg`, import.meta.url).href
}
// 是否是白名单后缀

View File

@ -3,7 +3,7 @@
<div class="header border-b flex-between p-12-24">
<div class="flex align-center">
<back-button @click="back"></back-button>
<h4 class="ellipsis" style="max-width: 270px" :title="detail?.name">{{ detail?.name }}</h4>
<h4>{{ detail?.name }}</h4>
<div v-if="showHistory && disablePublic">
<el-text type="info" class="ml-16 color-secondary"
>{{ $t('views.applicationWorkflow.info.previewVersion') }}
@ -101,7 +101,7 @@
/>
</div>
<h4 class="ellipsis" style="max-width: 270px" :title="detail?.name">
<h4>
{{ detail?.name || $t('views.application.applicationForm.form.appName.label') }}
</h4>
</div>
@ -279,6 +279,7 @@ async function publicHandle() {
return
}
applicationApi.putPublishApplication(id as String, obj, loading).then(() => {
application.asyncGetApplicationDetail(id, loading).then((res: any) => {
detail.value.name = res.data.name
MsgSuccess(t('views.applicationWorkflow.tip.publicSuccess'))

View File

@ -28,9 +28,7 @@
/>
</div>
<h4 class="ellipsis-1" style="width: 50%" :title="applicationDetail?.name">
{{ applicationDetail?.name }}
</h4>
<h4>{{ applicationDetail?.name }}</h4>
</div>
</div>
<div>
@ -265,7 +263,7 @@ function getChatRecord() {
currentChatId.value,
paginationConfig,
loading,
true
false
)
.then((res: any) => {
paginationConfig.total = res.data.total

View File

@ -27,9 +27,7 @@
/>
</div>
<h4 class="ellipsis-1" style="width: 66%" :title="applicationDetail?.name">
{{ applicationDetail?.name }}
</h4>
<h4>{{ applicationDetail?.name }}</h4>
</div>
</div>
<div>
@ -261,7 +259,7 @@ function getChatRecord() {
currentChatId.value,
paginationConfig,
loading,
true
false
)
.then((res: any) => {
paginationConfig.total = res.data.total

View File

@ -27,9 +27,7 @@
:size="32"
/>
</div>
<h4 class="ellipsis-1" style="width: 66%" :title="applicationDetail?.name">
{{ applicationDetail?.name }}
</h4>
<h4>{{ applicationDetail?.name }}</h4>
</div>
</div>
<div>
@ -315,7 +313,7 @@ function getChatRecord() {
currentChatId.value,
paginationConfig.value,
loading,
true
false
)
.then((res: any) => {
paginationConfig.value.total = res.data.total

View File

@ -256,7 +256,9 @@ async function submit() {
} else {
if (detail.value.type === '2') {
datasetApi.putLarkDataset(id, obj, loading).then((res) => {
MsgSuccess(t('common.saveSuccess'))
datasetApi.putReEmbeddingDataset(id).then(() => {
MsgSuccess(t('common.saveSuccess'))
})
})
} else {
datasetApi.putDataset(id, obj, loading).then((res) => {

View File

@ -8,16 +8,15 @@
</h4>
</template>
<div class="hit-test__main p-16" v-loading="loading">
<div
class="question-title flex align-center"
:style="{ visibility: questionTitle ? 'visible' : 'hidden' }"
>
<AppAvatar>
<img src="@/assets/user-icon.svg" style="width: 54%" alt="" />
</AppAvatar>
<h4 class="break-all ellipsis-1 ml-8" style="width: 66%" :title="questionTitle">
{{ questionTitle }}
</h4>
<div class="question-title" :style="{ visibility: questionTitle ? 'visible' : 'hidden' }">
<div class="avatar">
<AppAvatar>
<img src="@/assets/user-icon.svg" style="width: 54%" alt="" />
</AppAvatar>
</div>
<div class="content">
<h4 class="text break-all">{{ questionTitle }}</h4>
</div>
</div>
<el-scrollbar>
<div class="hit-test-height">
@ -350,6 +349,20 @@ onMounted(() => {})
</script>
<style lang="scss" scoped>
.hit-test {
.question-title {
.avatar {
float: left;
}
.content {
padding-left: 40px;
.text {
padding: 6px 0;
height: 34px;
box-sizing: border-box;
}
}
}
&__operate {
.operate-textarea {
box-shadow: 0px 6px 24px 0px rgba(31, 35, 41, 0.08);

View File

@ -45,13 +45,7 @@
>
</el-input>
<img
:src="identifyCode"
alt=""
height="38"
class="ml-8 cursor border border-r-4"
@click="makeCode"
/>
<img :src="identifyCode" alt="" height="38" class="ml-8 cursor border border-r-4" @click="makeCode" />
</div>
</el-form-item>
</div>
@ -130,6 +124,7 @@ import useStore from '@/stores'
import authApi from '@/api/auth-setting'
import useApi from '@/api/user'
import { MsgConfirm, MsgError, MsgSuccess } from '@/utils/message'
import { t, getBrowserLang } from '@/locales'
import QrCodeTab from '@/views/login/components/QrCodeTab.vue'
import { useI18n } from 'vue-i18n'
@ -264,25 +259,20 @@ function changeMode(val: string) {
}
const login = () => {
if (!loginFormRef.value) {
return
}
loginFormRef.value?.validate((valid) => {
if (valid) {
loading.value = true
user
.login(
loginMode.value,
loginForm.value.username,
loginForm.value.password,
loginForm.value.captcha
)
.then(() => {
locale.value = localStorage.getItem('MaxKB-locale') || getBrowserLang() || 'en-US'
router.push({ name: 'home' })
})
.finally(() => (loading.value = false))
}
loginFormRef.value?.validate().then(() => {
loading.value = true
user
.login(
loginMode.value,
loginForm.value.username,
loginForm.value.password,
loginForm.value.captcha
)
.then(() => {
locale.value = localStorage.getItem('MaxKB-locale') || getBrowserLang() || 'en-US'
router.push({ name: 'home' })
})
.finally(() => (loading.value = false))
})
}

View File

@ -44,12 +44,7 @@
clearable
>
<template #prepend>
<el-select
v-model="searchType"
placeholder="Select"
style="width: 80px"
@change="searchTypeChange"
>
<el-select v-model="searchType" placeholder="Select" style="width: 80px">
<el-option :label="$t('common.title')" value="title" />
<el-option :label="$t('common.content')" value="content" />
</el-select>
@ -138,7 +133,9 @@
<el-dropdown-menu>
<el-dropdown-item @click="openGenerateDialog(item)">
<el-icon><Connection /></el-icon>
{{ $t('views.document.generateQuestion.title') }}</el-dropdown-item
{{
$t('views.document.generateQuestion.title')
}}</el-dropdown-item
>
<el-dropdown-item @click="openSelectDocumentDialog(item)">
<AppIcon iconName="app-migrate"></AppIcon>
@ -210,10 +207,6 @@ const title = ref('')
const search = ref('')
const searchType = ref('title')
const searchTypeChange = () => {
search.value = ''
}
//
const isBatch = ref(false)
const multipleSelection = ref<any[]>([])
@ -320,7 +313,7 @@ function addParagraph() {
ParagraphDialogRef.value.open()
}
function editParagraph(row: any) {
title.value = t('views.paragraph.paragraphDetail')
title.value = t('views.paragraph.paragraphDetail')
ParagraphDialogRef.value.open(row)
}

View File

@ -301,8 +301,7 @@ function clickNodes(item: any) {
type: 'app-edge',
sourceNodeId: props.nodeModel.id,
sourceAnchorId: anchorData.value?.id,
targetNodeId: nodeModel.id,
targetAnchorId: nodeModel.id + '_left'
targetNodeId: nodeModel.id
})
closeNodeMenu()

View File

@ -12,6 +12,7 @@
ref="el"
v-bind:modelValue="form_data.branch"
:disabled="form_data.branch === 2"
:filter="'.no-drag'"
handle=".handle"
:animation="150"
ghostClass="ghost"

View File

@ -189,6 +189,7 @@ const sync_form_field_list = () => {
]
set(props.nodeModel.properties.config, 'fields', fields)
props.nodeModel.clear_next_node_field(false)
onDragHandle()
}
const addFormCollectRef = ref<InstanceType<typeof AddFormCollect>>()
const editFormCollectRef = ref<InstanceType<typeof EditFormCollect>>()
@ -263,7 +264,7 @@ function onDragHandle() {
onEnd: (evt) => {
if (evt.oldIndex === undefined || evt.newIndex === undefined) return
//
const items = cloneDeep([...form_data.value.form_field_list])
const items = [...form_data.value.form_field_list]
const [movedItem] = items.splice(evt.oldIndex, 1)
items.splice(evt.newIndex, 0, movedItem)
form_data.value.form_field_list = items
@ -275,7 +276,6 @@ onMounted(() => {
set(props.nodeModel, 'validate', validate)
sync_form_field_list()
props.nodeModel.graphModel.eventCenter.emit('refresh_incoming_node_field')
onDragHandle()
})
</script>
<style lang="scss" scoped></style>

View File

@ -91,7 +91,6 @@ import { ref, computed, onMounted } from 'vue'
import { isLastNode } from '@/workflow/common/data'
import applicationApi from '@/api/application'
import { app } from '@/main'
import {t} from "@/locales";
const props = defineProps<{ nodeModel: any }>()
const nodeCascaderRef = ref()
@ -120,16 +119,8 @@ const chat_data = computed({
const FunctionNodeFormRef = ref<FormInstance>()
const validate = () => {
for (const item of chat_data.value.input_field_list) {
if (item.source === 'reference' && item.is_required && item.value[0] !== 'global') {
if (props.nodeModel.graphModel.nodes.filter((node: any) => node.id === item.value[0]).length === 0 ) {
item.value = []
return Promise.reject({node: props.nodeModel, errMessage: item.name + t('dynamicsForm.tip.requiredMessage')})
}
}
}
return FunctionNodeFormRef.value?.validate().catch((err) => {
return Promise.reject({node: props.nodeModel, errMessage: err})
return Promise.reject({ node: props.nodeModel, errMessage: err })
})
}

View File

@ -83,6 +83,7 @@
$t('views.application.applicationForm.form.prompt.tooltip')
}}</template>
<AppIcon iconName="app-warning" class="app-warning-icon"></AppIcon>
<el-icon><EditPen /></el-icon>
</el-tooltip>
</div>
</template>