Compare commits

..

No commits in common. "main" and "v1.10.4-lts" have entirely different histories.

146 changed files with 759 additions and 2309 deletions

View File

@ -1,17 +0,0 @@
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
timezone: "Asia/Shanghai"
day: "friday"
target-branch: "v2"
groups:
python-dependencies:
patterns:
- "*"
# ignore:
# - dependency-name: "pymupdf"
# versions: ["*"]

View File

@ -7,7 +7,7 @@ on:
inputs:
dockerImageTag:
description: 'Image Tag'
default: 'v1.10.7-dev'
default: 'v1.10.3-dev'
required: true
dockerImageTagWithLatest:
description: '是否发布latest tag正式发版时选择测试版本切勿选择'
@ -36,7 +36,7 @@ on:
jobs:
build-and-push-to-fit2cloud-registry:
if: ${{ contains(github.event.inputs.registry, 'fit2cloud') }}
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- name: Check Disk Space
run: df -h
@ -52,6 +52,10 @@ jobs:
swap-storage: true
- name: Check Disk Space
run: df -h
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 8
- name: Checkout
uses: actions/checkout@v4
with:
@ -64,17 +68,24 @@ jobs:
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
else
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
fi
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
${DOCKER_IMAGE_TAGS} .
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
# Until https://github.com/tonistiigi/binfmt/issues/215
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
buildkitd-config-inline: |
[worker.oci]
max-parallelism = 1
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
@ -89,12 +100,11 @@ jobs:
password: ${{ secrets.FIT2CLOUD_REGISTRY_PASSWORD }}
- name: Docker Buildx (build-and-push)
run: |
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile
build-and-push-to-dockerhub:
if: ${{ contains(github.event.inputs.registry, 'dockerhub') }}
runs-on: ubuntu-latest
runs-on: ubuntu-22.04
steps:
- name: Check Disk Space
run: df -h
@ -110,6 +120,10 @@ jobs:
swap-storage: true
- name: Check Disk Space
run: df -h
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 8
- name: Checkout
uses: actions/checkout@v4
with:
@ -122,17 +136,24 @@ jobs:
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
else
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
fi
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
${DOCKER_IMAGE_TAGS} .
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
# Until https://github.com/tonistiigi/binfmt/issues/215
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
with:
buildkitd-config-inline: |
[worker.oci]
max-parallelism = 1
- name: Login to GitHub Container Registry
uses: docker/login-action@v3
with:
@ -146,5 +167,4 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker Buildx (build-and-push)
run: |
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile

View File

@ -1,6 +1,5 @@
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
<h3 align="center">Open-source platform for building enterprise-grade agents</h3>
<h3 align="center">强大易用的企业级智能体平台</h3>
<h3 align="center">Ready-to-use AI Chatbot</h3>
<p align="center"><a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a></p>
<p align="center">
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
@ -11,10 +10,10 @@
</p>
<hr/>
MaxKB = Max Knowledge Brain, it is an open-source platform for building enterprise-grade agents. MaxKB integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
MaxKB = Max Knowledge Base, it is a ready-to-use AI chatbot that integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization, and RAG (Retrieval-Augmented Generation). This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
- **Flexible Orchestration**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
- **Multi Modal**: Native support for input and output text, image, audio and video.
@ -56,6 +55,8 @@ Access MaxKB web interface at `http://your_server_ip:8080` with default admin cr
## Feature Comparison
MaxKB is positioned as an Ready-to-use RAG (Retrieval-Augmented Generation) intelligent Q&A application, rather than a middleware platform for building large model applications. The following table is merely a comparison from a functional perspective.
<table style="width: 100%;">
<tr>
<th align="center">Feature</th>

View File

@ -1,25 +1,25 @@
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
<h3 align="center">强大易用的企业级智能体平台</h3>
<h3 align="center">基于大模型和 RAG 的知识库问答系统</h3>
<h4 align="center">Ready-to-use, flexible RAG Chatbot</h4>
<p align="center">
<a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: auto;" /></a>
<a href="https://market.aliyun.com/products/53690006/cmjj00067609.html?userCode=kmemb8jp" target="_blank"><img src="https://img.alicdn.com/imgextra/i2/O1CN01H5JIwY1rZ0OobDjnJ_!!6000000005644-2-tps-1000-216.png" alt="1Panel-dev%2FMaxKB | Aliyun" style="width: 250px; height: auto;" /></a>
</p>
<p align="center">
<a href="README_EN.md"><img src="https://img.shields.io/badge/English_README-blue" alt="English README"></a>
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb" alt="License: GPL v3"></a>
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?style=flat-square" alt="Stars"></a>
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
<a href="https://gitee.com/fit2cloud-feizhiyun/MaxKB"><img src="https://gitee.com/fit2cloud-feizhiyun/MaxKB/badge/star.svg?theme=gvp" alt="Gitee Stars"></a>
<a href="https://gitcode.com/feizhiyun/MaxKB"><img src="https://gitcode.com/feizhiyun/MaxKB/star/badge.svg" alt="GitCode Stars"></a>
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?style=flat-square" alt="Stars"></a>
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
</p>
<hr/>
MaxKB = Max Knowledge Brain是一款强大易用的企业级智能体平台支持 RAG 检索增强生成、工作流编排、MCP 工具调用能力。MaxKB 支持对接各种主流大语言模型,广泛应用于智能客服、企业内部知识库问答、员工助手、学术研究与教育等场景。
MaxKB = Max Knowledge Base是一款开箱即用的 RAG Chatbot具备强大的工作流和 MCP 工具调用能力。它支持对接各种主流大语言模型LLMs,广泛应用于智能客服、企业内部知识库、学术研究与教育等场景。
- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果;
- **开箱即用**:支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化和 RAG检索增强生成有效减少大模型幻觉智能问答交互体验好
- **模型中立**支持对接各种大模型包括本地私有大模型DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等和国外公共大模型OpenAI / Claude / Gemini 等);
- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求;
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度;
- **模型中立**支持对接各种大模型包括本地私有大模型DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等和国外公共大模型OpenAI / Claude / Gemini 等)。
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度。
MaxKB 三分钟视频介绍https://www.bilibili.com/video/BV18JypYeEkj/

View File

@ -24,16 +24,3 @@
- [MaxKB 应用案例:唐山海事局-“小海”AI语音助手](https://news.qq.com/rain/a/20250223A030BE00)
- [MaxKB 应用案例:湖南汉寿政务](http://hsds.hsdj.gov.cn:19999/ui/chat/a2c976736739aadc)
- [MaxKB 应用案例:广州市妇女儿童医疗中心-AI医疗数据分类分级小助手](https://mp.weixin.qq.com/s/YHUMkUOAaUomBV8bswpK3g)
- [MaxKB 应用案例:苏州热工研究院有限公司-维修大纲评估质量自查AI小助手](https://mp.weixin.qq.com/s/Ts5FQdnv7Tu9Jp7bvofCVA)
- [MaxKB 应用案例:国核自仪系统工程有限公司-NuCON AI帮](https://mp.weixin.qq.com/s/HNPc7u5xVfGLJr8IQz3vjQ)
- [MaxKB 应用案例深圳通开启Deep Seek智能应用新篇章](https://mp.weixin.qq.com/s/SILN0GSescH9LyeQqYP0VQ)
- [MaxKB 应用案例南通智慧出行领跑长三角首款接入DeepSeek的"畅行南通"APP上线AI新场景](https://mp.weixin.qq.com/s/WEC9UQ6msY0VS8LhTZh-Ew)
- [MaxKB 应用案例:中船动力人工智能"智慧动力云助手"及首批数字员工正式上线](https://mp.weixin.qq.com/s/OGcEkjh9DzGO1Tkc9nr7qg)
- [MaxKB 应用案例AI+矿山DeepSeek助力绿色智慧矿山智慧“升级”](https://mp.weixin.qq.com/s/SZstxTvVoLZg0ECbZbfpIA)
- [MaxKB 应用案例DeepSeek落地弘盛铜业国产大模型点亮"黑灯工厂"新引擎](https://mp.weixin.qq.com/s/Eczdx574MS5RMF7WfHN7_A)
- [MaxKB 应用案例:拥抱智能时代!中国五矿以 “AI+”赋能企业发展](https://mp.weixin.qq.com/s/D5vBtlX2E81pWE3_2OgWSw)
- [MaxKB 应用案例DeepSeek赋能中冶武勘AI智能体](https://mp.weixin.qq.com/s/8m0vxGcWXNdZazziQrLyxg)
- [MaxKB 应用案例重磅陕西广电网络“秦岭云”平台实现DeepSeek本地化部署](https://mp.weixin.qq.com/s/ZKmEU_wWShK1YDomKJHQeA)
- [MaxKB 应用案例粤海集团完成DeepSeek私有化部署助力集团智能化管理](https://mp.weixin.qq.com/s/2JbVp0-kr9Hfp-0whH4cvg)
- [MaxKB 应用案例建筑材料工业信息中心完成DeepSeek本地化部署推动行业数智化转型新发展](https://mp.weixin.qq.com/s/HThGSnND3qDF8ySEqiM4jw)
- [MaxKB 应用案例一起DeepSeek福建设计以AI大模型开启新篇章](https://mp.weixin.qq.com/s/m67e-H7iQBg3d24NM82UjA)

View File

@ -40,7 +40,6 @@ tool_message_template = """
"""
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
reasoning_content: str):
chat_model = node_variable.get('chat_model')
@ -103,6 +102,7 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
async def _yield_mcp_response(chat_model, message_list, mcp_servers):
async with MultiServerMCPClient(json.loads(mcp_servers)) as client:
agent = create_react_agent(chat_model, client.get_tools())
@ -115,7 +115,6 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers):
if isinstance(chunk[0], AIMessageChunk):
yield chunk[0]
def mcp_response_generator(chat_model, message_list, mcp_servers):
loop = asyncio.new_event_loop()
try:
@ -131,7 +130,6 @@ def mcp_response_generator(chat_model, message_list, mcp_servers):
finally:
loop.close()
async def anext_async(agen):
return await agen.__anext__()
@ -188,8 +186,7 @@ class BaseChatNode(IChatNode):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
self.context['reasoning_content'] = details.get('reasoning_content')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
model_params_setting=None,
@ -219,7 +216,7 @@ class BaseChatNode(IChatNode):
message_list = self.generate_message_list(system, prompt, history_message)
self.context['message_list'] = message_list
if mcp_enable and mcp_servers is not None and '"stdio"' not in mcp_servers:
if mcp_enable and mcp_servers is not None:
r = mcp_response_generator(chat_model, message_list, mcp_servers)
return NodeResult(
{'result': r, 'chat_model': chat_model, 'message_list': message_list,

View File

@ -168,8 +168,7 @@ class BaseApplicationNode(IApplicationNode):
self.context['question'] = details.get('question')
self.context['type'] = details.get('type')
self.context['reasoning_content'] = details.get('reasoning_content')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
@ -179,8 +178,7 @@ class BaseApplicationNode(IApplicationNode):
current_chat_id = string_to_uuid(chat_id + application_id)
Chat.objects.get_or_create(id=current_chat_id, defaults={
'application_id': application_id,
'abstract': message[0:1024],
'client_id': client_id,
'abstract': message[0:1024]
})
if app_document_list is None:
app_document_list = []

View File

@ -15,9 +15,7 @@ from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
class BaseReplyNode(IReplyNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
if reply_type == 'referencing':
result = self.get_reference_content(fields)

View File

@ -38,8 +38,7 @@ class BaseFormNode(IFormNode):
self.context['start_time'] = details.get('start_time')
self.context['form_data'] = form_data
self.context['is_submit'] = details.get('is_submit')
if self.node_params.get('is_result', False):
self.answer_text = details.get('result')
self.answer_text = details.get('result')
if form_data is not None:
for key in form_data:
self.context[key] = form_data[key]
@ -71,7 +70,7 @@ class BaseFormNode(IFormNode):
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
'form_data': self.context.get('form_data', {}),
"is_submit": self.context.get("is_submit", False)}
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
form = f'<form_rander>{json.dumps(form_setting,ensure_ascii=False)}</form_rander>'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
@ -86,7 +85,7 @@ class BaseFormNode(IFormNode):
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
'form_data': self.context.get('form_data', {}),
"is_submit": self.context.get("is_submit", False)}
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
form = f'<form_rander>{json.dumps(form_setting,ensure_ascii=False)}</form_rander>'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')

View File

@ -65,7 +65,7 @@ def valid_reference_value(_type, value, name):
def convert_value(name: str, value, _type, is_required, source, node):
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
if not is_required and value is None:
return None
if not is_required and source == 'reference' and (value is None or len(value) == 0):
return None
@ -113,8 +113,7 @@ def valid_function(function_lib, user_id):
class BaseFunctionLibNodeNode(IFunctionLibNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
if self.node_params.get('is_result'):
self.answer_text = str(details.get('result'))
self.answer_text = str(details.get('result'))
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()

View File

@ -49,7 +49,7 @@ def valid_reference_value(_type, value, name):
def convert_value(name: str, value, _type, is_required, source, node):
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
if not is_required and value is None:
return None
if source == 'reference':
value = node.workflow_manage.get_reference_field(
@ -84,8 +84,7 @@ def convert_value(name: str, value, _type, is_required, source, node):
class BaseFunctionNodeNode(IFunctionNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
if self.node_params.get('is_result', False):
self.answer_text = str(details.get('result'))
self.answer_text = str(details.get('result'))
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),

View File

@ -16,8 +16,7 @@ class BaseImageGenerateNode(IImageGenerateNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
@ -25,8 +24,7 @@ class BaseImageGenerateNode(IImageGenerateNode):
**kwargs) -> NodeResult:
print(model_params_setting)
application = self.workflow_manage.work_flow_post_handler.chat_info.application
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)

View File

@ -69,8 +69,7 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
model_params_setting,

View File

@ -14,8 +14,7 @@ class BaseMcpNode(IMcpNode):
self.context['result'] = details.get('result')
self.context['tool_params'] = details.get('tool_params')
self.context['mcp_tool'] = details.get('mcp_tool')
if self.node_params.get('is_result', False):
self.answer_text = details.get('result')
self.answer_text = details.get('result')
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
servers = json.loads(mcp_servers)
@ -28,8 +27,7 @@ class BaseMcpNode(IMcpNode):
return s
res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params))
return NodeResult(
{'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
return NodeResult({'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
def handle_variables(self, tool_params):
# 处理参数中的变量

View File

@ -80,8 +80,7 @@ class BaseQuestionNode(IQuestionNode):
self.context['answer'] = details.get('answer')
self.context['message_tokens'] = details.get('message_tokens')
self.context['answer_tokens'] = details.get('answer_tokens')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
model_params_setting=None,

View File

@ -18,8 +18,7 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult:
stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id'))

View File

@ -40,13 +40,10 @@ class BaseStartStepNode(IStarNode):
self.context['document'] = details.get('document_list')
self.context['image'] = details.get('image_list')
self.context['audio'] = details.get('audio_list')
self.context['other'] = details.get('other_list')
self.status = details.get('status')
self.err_message = details.get('err_message')
for key, value in workflow_variable.items():
workflow_manage.context[key] = value
for item in details.get('global_fields', []):
workflow_manage.context[item.get('key')] = item.get('value')
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
pass
@ -62,8 +59,7 @@ class BaseStartStepNode(IStarNode):
'question': question,
'image': self.workflow_manage.image_list,
'document': self.workflow_manage.document_list,
'audio': self.workflow_manage.audio_list,
'other': self.workflow_manage.other_list,
'audio': self.workflow_manage.audio_list
}
return NodeResult(node_variable, workflow_variable)
@ -87,6 +83,5 @@ class BaseStartStepNode(IStarNode):
'image_list': self.context.get('image'),
'document_list': self.context.get('document'),
'audio_list': self.context.get('audio'),
'other_list': self.context.get('other'),
'global_fields': global_fields
}

View File

@ -37,8 +37,7 @@ def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
class BaseTextToSpeechNode(ITextToSpeechNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
self.answer_text = details.get('answer')
def execute(self, tts_model_id, chat_id,
content, model_params_setting=None,

View File

@ -238,7 +238,6 @@ class WorkflowManage:
base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None,
document_list=None,
audio_list=None,
other_list=None,
start_node_id=None,
start_node_data=None, chat_record=None, child_node=None):
if form_data is None:
@ -249,15 +248,12 @@ class WorkflowManage:
document_list = []
if audio_list is None:
audio_list = []
if other_list is None:
other_list = []
self.start_node_id = start_node_id
self.start_node = None
self.form_data = form_data
self.image_list = image_list
self.document_list = document_list
self.audio_list = audio_list
self.other_list = other_list
self.params = params
self.flow = flow
self.context = {}

View File

@ -11,7 +11,7 @@ import uuid
from django.contrib.postgres.fields import ArrayField
from django.db import models
from langchain.schema import HumanMessage, AIMessage
from django.utils.translation import gettext as _
from common.encoder.encoder import SystemEncoder
from common.mixins.app_model_mixin import AppModelMixin
from dataset.models.data_set import DataSet
@ -167,11 +167,7 @@ class ChatRecord(AppModelMixin):
return HumanMessage(content=self.problem_text)
def get_ai_message(self):
answer_text = self.answer_text
if answer_text is None or len(str(answer_text).strip()) == 0:
answer_text = _(
'Sorry, no relevant content was found. Please re-describe your problem or provide more information. ')
return AIMessage(content=answer_text)
return AIMessage(content=self.answer_text)
def get_node_details_runtime_node_id(self, runtime_node_id):
return self.details.get(runtime_node_id, None)

View File

@ -148,12 +148,10 @@ class ModelSettingSerializer(serializers.Serializer):
error_messages=ErrMessage.char(_("Thinking process switch")))
reasoning_content_start = serializers.CharField(required=False, allow_null=True, default="<think>",
allow_blank=True, max_length=256,
trim_whitespace=False,
error_messages=ErrMessage.char(
_("The thinking process begins to mark")))
reasoning_content_end = serializers.CharField(required=False, allow_null=True, allow_blank=True, default="</think>",
max_length=256,
trim_whitespace=False,
error_messages=ErrMessage.char(_("End of thinking process marker")))
@ -164,7 +162,7 @@ class ApplicationWorkflowSerializer(serializers.Serializer):
max_length=256, min_length=1,
error_messages=ErrMessage.char(_("Application Description")))
work_flow = serializers.DictField(required=False, error_messages=ErrMessage.dict(_("Workflow Objects")))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char(_("Opening remarks")))
@staticmethod
@ -227,7 +225,7 @@ class ApplicationSerializer(serializers.Serializer):
min_value=0,
max_value=1024,
error_messages=ErrMessage.integer(_("Historical chat records")))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char(_("Opening remarks")))
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
allow_null=True,
@ -495,7 +493,7 @@ class ApplicationSerializer(serializers.Serializer):
min_value=0,
max_value=1024,
error_messages=ErrMessage.integer(_("Historical chat records")))
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
error_messages=ErrMessage.char(_("Opening remarks")))
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
error_messages=ErrMessage.list(_("Related Knowledge Base"))
@ -1012,8 +1010,7 @@ class ApplicationSerializer(serializers.Serializer):
'stt_autosend': application.stt_autosend,
'file_upload_enable': application.file_upload_enable,
'file_upload_setting': application.file_upload_setting,
'work_flow': {'nodes': [node for node in ((application.work_flow or {}).get('nodes', []) or []) if
node.get('id') == 'base-node']},
'work_flow': application.work_flow,
'show_source': application_access_token.show_source,
'language': application_access_token.language,
**application_setting_dict})
@ -1074,7 +1071,6 @@ class ApplicationSerializer(serializers.Serializer):
for update_key in update_keys:
if update_key in instance and instance.get(update_key) is not None:
application.__setattr__(update_key, instance.get(update_key))
print(application.name)
application.save()
if 'dataset_id_list' in instance:
@ -1093,7 +1089,6 @@ class ApplicationSerializer(serializers.Serializer):
chat_cache.clear_by_application_id(application_id)
application_access_token = QuerySet(ApplicationAccessToken).filter(application_id=application_id).first()
# 更新缓存数据
print(application.name)
get_application_access_token(application_access_token.access_token, False)
return self.one(with_valid=False)
@ -1146,8 +1141,6 @@ class ApplicationSerializer(serializers.Serializer):
instance['file_upload_enable'] = node_data['file_upload_enable']
if 'file_upload_setting' in node_data:
instance['file_upload_setting'] = node_data['file_upload_setting']
if 'name' in node_data:
instance['name'] = node_data['name']
break
def speech_to_text(self, file, with_valid=True):
@ -1325,8 +1318,6 @@ class ApplicationSerializer(serializers.Serializer):
def get_mcp_servers(self, with_valid=True):
if with_valid:
self.is_valid(raise_exception=True)
if '"stdio"' in self.data.get('mcp_servers'):
raise AppApiException(500, _('stdio is not supported'))
servers = json.loads(self.data.get('mcp_servers'))
async def get_mcp_tools(servers):

View File

@ -213,21 +213,12 @@ class OpenAIChatSerializer(serializers.Serializer):
return instance.get('messages')[-1].get('content')
@staticmethod
def generate_chat(chat_id, application_id, message, client_id, asker=None):
def generate_chat(chat_id, application_id, message, client_id):
if chat_id is None:
chat_id = str(uuid.uuid1())
chat = QuerySet(Chat).filter(id=chat_id).first()
if chat is None:
asker_dict = {'user_name': '游客'}
if asker is not None:
if isinstance(asker, str):
asker_dict = {
'user_name': asker
}
elif isinstance(asker, dict):
asker_dict = asker
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id,
asker=asker_dict).save()
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id).save()
return chat_id
def chat(self, instance: Dict, with_valid=True):
@ -241,8 +232,7 @@ class OpenAIChatSerializer(serializers.Serializer):
application_id = self.data.get('application_id')
client_id = self.data.get('client_id')
client_type = self.data.get('client_type')
chat_id = self.generate_chat(chat_id, application_id, message, client_id,
asker=instance.get('form_data', {}).get("asker"))
chat_id = self.generate_chat(chat_id, application_id, message, client_id)
return ChatMessageSerializer(
data={
'chat_id': chat_id, 'message': message,
@ -255,7 +245,6 @@ class OpenAIChatSerializer(serializers.Serializer):
'image_list': instance.get('image_list', []),
'document_list': instance.get('document_list', []),
'audio_list': instance.get('audio_list', []),
'other_list': instance.get('other_list', []),
}
).chat(base_to_response=OpenaiToResponse())
@ -285,7 +274,6 @@ class ChatMessageSerializer(serializers.Serializer):
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
other_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Other")))
child_node = serializers.DictField(required=False, allow_null=True,
error_messages=ErrMessage.dict(_("Child Nodes")))
@ -384,7 +372,6 @@ class ChatMessageSerializer(serializers.Serializer):
image_list = self.data.get('image_list')
document_list = self.data.get('document_list')
audio_list = self.data.get('audio_list')
other_list = self.data.get('other_list')
user_id = chat_info.application.user_id
chat_record_id = self.data.get('chat_record_id')
chat_record = None
@ -401,7 +388,7 @@ class ChatMessageSerializer(serializers.Serializer):
'client_id': client_id,
'client_type': client_type,
'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type),
base_to_response, form_data, image_list, document_list, audio_list, other_list,
base_to_response, form_data, image_list, document_list, audio_list,
self.data.get('runtime_node_id'),
self.data.get('node_data'), chat_record, self.data.get('child_node'))
r = work_flow_manage.run()

View File

@ -174,14 +174,7 @@ class ChatSerializers(serializers.Serializer):
condition = base_condition & min_trample_query
else:
condition = base_condition
inner_queryset = QuerySet(Chat).filter(application_id=self.data.get("application_id"))
if 'abstract' in self.data and self.data.get('abstract') is not None:
inner_queryset = inner_queryset.filter(abstract__icontains=self.data.get('abstract'))
return {
'inner_queryset': inner_queryset,
'default_queryset': query_set.filter(condition).order_by("-application_chat.update_time")
}
return query_set.filter(condition).order_by("-application_chat.update_time")
def list(self, with_valid=True):
if with_valid:

View File

@ -23,8 +23,6 @@ FROM
chat_id
FROM
application_chat_record
WHERE chat_id IN (
SELECT id FROM application_chat ${inner_queryset})
GROUP BY
application_chat_record.chat_id
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
@ -37,5 +35,4 @@ FROM
END as improve_paragraph_list
FROM
application_chat_record application_chat_record
) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id"
${default_queryset}
) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id"

View File

@ -11,9 +11,6 @@ FROM
chat_id
FROM
application_chat_record
WHERE chat_id IN (
SELECT id FROM application_chat ${inner_queryset})
GROUP BY
application_chat_record.chat_id
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
${default_queryset}
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id

View File

@ -38,15 +38,6 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_STRING,
title=_("Application authentication token"),
description=_("Application authentication token"),
default="token"
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
@ -142,27 +133,6 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
description=_("Primary key id")),
'secret_key': openapi.Schema(type=openapi.TYPE_STRING, title=_("Secret key"),
description=_("Secret key")),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
description=_("Is activation")),
'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"),
description=_("Application ID")),
'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN,
title=_("Is cross-domain allowed"),
description=_("Is cross-domain allowed")),
'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'),
items=openapi.Schema(type=openapi.TYPE_STRING))
}
)
class AccessToken(ApiMixin):
@staticmethod
def get_request_params_api():
@ -201,37 +171,6 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=[],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
description=_("Primary key id")),
'access_token': openapi.Schema(type=openapi.TYPE_STRING, title=_("Access Token"),
description=_("Access Token")),
'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"),
description=_("Reset Token")),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
description=_("Is activation")),
'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"),
description=_("Number of visits")),
'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"),
description=_("Whether to enable whitelist")),
'white_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"),
description=_("Whitelist")),
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN,
title=_("Whether to display knowledge sources"),
description=_("Whether to display knowledge sources")),
'language': openapi.Schema(type=openapi.TYPE_STRING,
title=_("language"),
description=_("language"))
}
)
class Edit(ApiMixin):
@staticmethod
def get_request_body_api():
@ -428,56 +367,6 @@ class ApplicationApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type',
'work_flow'],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
description=_("Primary key id")),
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
description=_("Application Name")),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
description=_("Application Description")),
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
description=_("Model id")),
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
title=_("Number of multi-round conversations"),
description=_("Number of multi-round conversations")),
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
description=_("Opening remarks")),
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
items=openapi.Schema(type=openapi.TYPE_STRING),
title=_("List of associated knowledge base IDs"),
description=_("List of associated knowledge base IDs")),
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
description=_("Problem Optimization"), default=True),
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
description=_("Application Type SIMPLE | WORK_FLOW")),
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
title=_('Question optimization tips'),
description=_("Question optimization tips"),
default=_(
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"),
description=_("Text-to-speech model ID")),
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"),
description=_("Speech-to-text model id")),
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"),
description=_("Is speech-to-text enabled")),
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"),
description=_("Is text-to-speech enabled")),
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"),
description=_("Text-to-speech type")),
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
}
)
class Query(ApiMixin):
@staticmethod
def get_request_params_api():

View File

@ -319,15 +319,6 @@ class ChatApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_STRING,
title=_("Conversation ID"),
description=_("Conversation ID"),
default="chat_id"
)
@staticmethod
def get_request_params_api():
return [openapi.Parameter(name='application_id',

View File

@ -373,8 +373,7 @@ class Application(APIView):
operation_id=_("Modify application API_KEY"),
tags=[_('Application/API_KEY')],
manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api(),
request_body=ApplicationApi.ApiKey.Operate.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.ApiKey.Operate.get_response_body_api()))
request_body=ApplicationApi.ApiKey.Operate.get_request_body_api())
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
@ -416,8 +415,7 @@ class Application(APIView):
operation_id=_("Modify Application AccessToken"),
tags=[_('Application/Public Access')],
manual_parameters=ApplicationApi.AccessToken.get_request_params_api(),
request_body=ApplicationApi.AccessToken.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.AccessToken.get_response_body_api()))
request_body=ApplicationApi.AccessToken.get_request_body_api())
@has_permissions(ViewPermission(
[RoleConstants.ADMIN, RoleConstants.USER],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
@ -457,7 +455,6 @@ class Application(APIView):
@swagger_auto_schema(operation_summary=_("Application Certification"),
operation_id=_("Application Certification"),
request_body=ApplicationApi.Authentication.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.Authentication.get_response_body_api()),
tags=[_("Application/Certification")],
security=[])
def post(self, request: Request):
@ -475,7 +472,6 @@ class Application(APIView):
@swagger_auto_schema(operation_summary=_("Create an application"),
operation_id=_("Create an application"),
request_body=ApplicationApi.Create.get_request_body_api(),
responses=result.get_api_response(ApplicationApi.Create.get_response_body_api()),
tags=[_('Application')])
@has_permissions(PermissionConstants.APPLICATION_CREATE, compare=CompareConstants.AND)
@log(menu='Application', operate="Create an application",

View File

@ -94,7 +94,6 @@ class ChatView(APIView):
@swagger_auto_schema(operation_summary=_("Get the workflow temporary session id"),
operation_id=_("Get the workflow temporary session id"),
request_body=ChatApi.OpenWorkFlowTemp.get_request_body_api(),
responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
tags=[_("Application/Chat")])
def post(self, request: Request):
return result.success(ChatSerializers.OpenWorkFlowChat(
@ -107,7 +106,6 @@ class ChatView(APIView):
@swagger_auto_schema(operation_summary=_("Get a temporary session id"),
operation_id=_("Get a temporary session id"),
request_body=ChatApi.OpenTempChat.get_request_body_api(),
responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
tags=[_("Application/Chat")])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):
@ -146,8 +144,6 @@ class ChatView(APIView):
'document_list') if 'document_list' in request.data else [],
'audio_list': request.data.get(
'audio_list') if 'audio_list' in request.data else [],
'other_list': request.data.get(
'other_list') if 'other_list' in request.data else [],
'client_type': request.auth.client_type,
'node_id': request.data.get('node_id', None),
'runtime_node_id': request.data.get('runtime_node_id', None),
@ -241,10 +237,9 @@ class ChatView(APIView):
@swagger_auto_schema(operation_summary=_("Client modifies dialogue summary"),
operation_id=_("Client modifies dialogue summary"),
request_body=ChatClientHistoryApi.Operate.ReAbstract.get_request_body_api(),
responses=result.get_default_response(),
tags=[_("Application/Conversation Log")])
@has_permissions(ViewPermission(
[RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.ADMIN, RoleConstants.USER],
[RoleConstants.APPLICATION_ACCESS_TOKEN],
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
dynamic_tag=keywords.get('application_id'))],
compare=CompareConstants.AND),
@ -421,7 +416,6 @@ class ChatView(APIView):
operation_id=_("Add to Knowledge Base"),
manual_parameters=ImproveApi.get_request_params_api_post(),
request_body=ImproveApi.get_request_body_api_post(),
responses=result.get_default_response(),
tags=[_("Application/Conversation Log/Add to Knowledge Base")]
)
@has_permissions(

View File

@ -11,50 +11,35 @@ import time
from common.cache.mem_cache import MemCache
_lock = threading.Lock()
locks = {}
lock = threading.Lock()
class ModelManage:
cache = MemCache('model', {})
up_clear_time = time.time()
@staticmethod
def _get_lock(_id):
lock = locks.get(_id)
if lock is None:
with _lock:
lock = locks.get(_id)
if lock is None:
lock = threading.Lock()
locks[_id] = lock
return lock
@staticmethod
def get_model(_id, get_model):
model_instance = ModelManage.cache.get(_id)
if model_instance is None:
lock = ModelManage._get_lock(_id)
with lock:
model_instance = ModelManage.cache.get(_id)
if model_instance is None:
model_instance = get_model(_id)
ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8)
else:
if model_instance.is_cache_model():
ModelManage.cache.touch(_id, timeout=60 * 60 * 8)
else:
# 获取锁
lock.acquire()
try:
model_instance = ModelManage.cache.get(_id)
if model_instance is None or not model_instance.is_cache_model():
model_instance = get_model(_id)
ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8)
ModelManage.clear_timeout_cache()
return model_instance
ModelManage.cache.set(_id, model_instance, timeout=60 * 30)
return model_instance
# 续期
ModelManage.cache.touch(_id, timeout=60 * 30)
ModelManage.clear_timeout_cache()
return model_instance
finally:
# 释放锁
lock.release()
@staticmethod
def clear_timeout_cache():
if time.time() - ModelManage.up_clear_time > 60 * 60:
threading.Thread(target=lambda: ModelManage.cache.clear_timeout_data()).start()
ModelManage.up_clear_time = time.time()
if time.time() - ModelManage.up_clear_time > 60:
ModelManage.cache.clear_timeout_data()
@staticmethod
def delete_key(_id):

View File

@ -238,8 +238,11 @@ class ListenerManagement:
for key in params_dict:
_value_ = params_dict[key]
exec_sql = exec_sql.replace(key, str(_value_))
with lock:
lock.acquire()
try:
native_update(query_set, exec_sql)
finally:
lock.release()
@staticmethod
def embedding_by_document(document_id, embedding_model: Embeddings, state_list=None):
@ -269,6 +272,7 @@ class ListenerManagement:
ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
State.STARTED)
# 根据段落进行向量化处理
page_desc(QuerySet(Paragraph)
.annotate(

View File

@ -22,4 +22,3 @@ from .table_checkbox import *
from .radio_card_field import *
from .label import *
from .slider_field import *
from .switch_field import *

View File

@ -28,6 +28,6 @@ class SwitchField(BaseField):
@param props_info:
"""
super().__init__('SwitchInput', label, required, default_value, relation_show_field_dict,
super().__init__('Switch', label, required, default_value, relation_show_field_dict,
{},
TriggerType.OPTION_LIST, attrs, props_info)

View File

@ -112,7 +112,11 @@ def get_image_id_func():
title_font_list = [
[36, 100],
[30, 36]
[26, 36],
[24, 26],
[22, 24],
[18, 22],
[16, 18]
]
@ -126,7 +130,7 @@ def get_title_level(paragraph: Paragraph):
if len(paragraph.runs) == 1:
font_size = paragraph.runs[0].font.size
pt = font_size.pt
if pt >= 30:
if pt >= 16:
for _value, index in zip(title_font_list, range(len(title_font_list))):
if pt >= _value[0] and pt < _value[1]:
return index + 1

View File

@ -82,10 +82,7 @@ class XlsSplitHandle(BaseParseTableHandle):
for row in data:
# 将每个单元格中的内容替换换行符为 <br> 以保留原始格式
md_table += '| ' + ' | '.join(
[str(cell)
.replace('\r\n', '<br>')
.replace('\n', '<br>')
if cell else '' for cell in row]) + ' |\n'
[str(cell).replace('\n', '<br>') if cell else '' for cell in row]) + ' |\n'
md_tables += md_table + '\n\n'
return md_tables

View File

@ -19,24 +19,36 @@ class XlsxSplitHandle(BaseParseTableHandle):
def fill_merged_cells(self, sheet, image_dict):
data = []
# 获取第一行作为标题行
headers = []
for idx, cell in enumerate(sheet[1]):
if cell.value is None:
headers.append(' ' * (idx + 1))
else:
headers.append(cell.value)
# 从第二行开始遍历每一行
for row in sheet.iter_rows(values_only=False):
row_data = []
for row in sheet.iter_rows(min_row=2, values_only=False):
row_data = {}
for col_idx, cell in enumerate(row):
cell_value = cell.value
# 如果单元格为空,并且该单元格在合并单元格内,获取合并单元格的值
if cell_value is None:
for merged_range in sheet.merged_cells.ranges:
if cell.coordinate in merged_range:
cell_value = sheet[merged_range.min_row][merged_range.min_col - 1].value
break
image = image_dict.get(cell_value, None)
if image is not None:
cell_value = f'![](/api/image/{image.id})'
# 使用标题作为键,单元格的值作为值存入字典
row_data.insert(col_idx, cell_value)
row_data[headers[col_idx]] = cell_value
data.append(row_data)
for merged_range in sheet.merged_cells.ranges:
cell_value = data[merged_range.min_row - 1][merged_range.min_col - 1]
for row_index in range(merged_range.min_row, merged_range.max_row + 1):
for col_index in range(merged_range.min_col, merged_range.max_col + 1):
data[row_index - 1][col_index - 1] = cell_value
return data
def handle(self, file, get_buffer, save_image):
@ -53,13 +65,11 @@ class XlsxSplitHandle(BaseParseTableHandle):
paragraphs = []
ws = wb[sheetname]
data = self.fill_merged_cells(ws, image_dict)
if len(data) >= 2:
head_list = data[0]
for row_index in range(1, len(data)):
row_output = "; ".join(
[f"{head_list[col_index]}: {data[row_index][col_index]}" for col_index in
range(0, len(data[row_index]))])
paragraphs.append({'title': '', 'content': row_output})
for row in data:
row_output = "; ".join([f"{key}: {value}" for key, value in row.items()])
# print(row_output)
paragraphs.append({'title': '', 'content': row_output})
result.append({'name': sheetname, 'paragraphs': paragraphs})
@ -68,6 +78,7 @@ class XlsxSplitHandle(BaseParseTableHandle):
return [{'name': file.name, 'paragraphs': []}]
return result
def get_content(self, file, save_image):
try:
# 加载 Excel 文件
@ -83,18 +94,18 @@ class XlsxSplitHandle(BaseParseTableHandle):
# 如果未指定 sheet_name则使用第一个工作表
for sheetname in workbook.sheetnames:
sheet = workbook[sheetname] if sheetname else workbook.active
data = self.fill_merged_cells(sheet, image_dict)
if len(data) == 0:
rows = self.fill_merged_cells(sheet, image_dict)
if len(rows) == 0:
continue
# 提取表头和内容
headers = [f"{value}" for value in data[0]]
headers = [f"{key}" for key, value in rows[0].items()]
# 构建 Markdown 表格
md_table = '| ' + ' | '.join(headers) + ' |\n'
md_table += '| ' + ' | '.join(['---'] * len(headers)) + ' |\n'
for row_index in range(1, len(data)):
r = [f'{value}' for value in data[row_index]]
for row in rows:
r = [f'{value}' for key, value in row.items()]
md_table += '| ' + ' | '.join(
[str(cell).replace('\n', '<br>') if cell is not None else '' for cell in r]) + ' |\n'

View File

@ -14,7 +14,7 @@ from common.handle.base_split_handle import BaseSplitHandle
def post_cell(cell_value):
return cell_value.replace('\r\n', '<br>').replace('\n', '<br>').replace('|', '&#124;')
return cell_value.replace('\n', '<br>').replace('|', '&#124;')
def row_to_md(row):

View File

@ -24,13 +24,12 @@ class GunicornLocalModelService(BaseService):
os.environ.setdefault('SERVER_NAME', 'local_model')
log_format = '%(h)s %(t)s %(L)ss "%(r)s" %(s)s %(b)s '
bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
worker = CONFIG.get("LOCAL_MODEL_HOST_WORKER", 1)
cmd = [
'gunicorn', 'smartdoc.wsgi:application',
'-b', bind,
'-k', 'gthread',
'--threads', '200',
'-w', str(worker),
'-w', "1",
'--max-requests', '10240',
'--max-requests-jitter', '2048',
'--access-logformat', log_format,

View File

@ -11,7 +11,6 @@ import importlib
import io
import mimetypes
import pickle
import random
import re
import shutil
from functools import reduce
@ -298,14 +297,3 @@ def markdown_to_plain_text(md: str) -> str:
# 去除首尾空格
text = text.strip()
return text
SAFE_CHAR_SET = (
[chr(i) for i in range(65, 91) if chr(i) not in {'I', 'O'}] + # 大写字母 A-H, J-N, P-Z
[chr(i) for i in range(97, 123) if chr(i) not in {'i', 'l', 'o'}] + # 小写字母 a-h, j-n, p-z
[str(i) for i in range(10) if str(i) not in {'0', '1', '7'}] # 数字 2-6, 8-9
)
def get_random_chars(number=4):
return ''.join(random.choices(SAFE_CHAR_SET, k=number))

View File

@ -7,12 +7,13 @@
@desc:
"""
import os
import pickle
import subprocess
import sys
import uuid
from textwrap import dedent
from diskcache import Cache
from smartdoc.const import BASE_DIR
from smartdoc.const import PROJECT_DIR
@ -36,8 +37,6 @@ class FunctionExecutor:
old_mask = os.umask(0o077)
try:
os.makedirs(self.sandbox_path, 0o700, exist_ok=True)
os.makedirs(os.path.join(self.sandbox_path, 'execute'), 0o700, exist_ok=True)
os.makedirs(os.path.join(self.sandbox_path, 'result'), 0o700, exist_ok=True)
finally:
os.umask(old_mask)
@ -45,11 +44,10 @@ class FunctionExecutor:
_id = str(uuid.uuid1())
success = '{"code":200,"msg":"成功","data":exec_result}'
err = '{"code":500,"msg":str(e),"data":None}'
result_path = f'{self.sandbox_path}/result/{_id}.result'
path = r'' + self.sandbox_path + ''
_exec_code = f"""
try:
import os
import pickle
env = dict(os.environ)
for key in list(env.keys()):
if key in os.environ and (key.startswith('MAXKB') or key.startswith('POSTGRES') or key.startswith('PG')):
@ -62,11 +60,13 @@ try:
for local in locals_v:
globals_v[local] = locals_v[local]
exec_result=f(**keywords)
with open({result_path!a}, 'wb') as file:
file.write(pickle.dumps({success}))
from diskcache import Cache
cache = Cache({path!a})
cache.set({_id!a},{success})
except Exception as e:
with open({result_path!a}, 'wb') as file:
file.write(pickle.dumps({err}))
from diskcache import Cache
cache = Cache({path!a})
cache.set({_id!a},{err})
"""
if self.sandbox:
subprocess_result = self._exec_sandbox(_exec_code, _id)
@ -74,18 +74,18 @@ except Exception as e:
subprocess_result = self._exec(_exec_code)
if subprocess_result.returncode == 1:
raise Exception(subprocess_result.stderr)
with open(result_path, 'rb') as file:
result = pickle.loads(file.read())
os.remove(result_path)
cache = Cache(self.sandbox_path)
result = cache.get(_id)
cache.delete(_id)
if result.get('code') == 200:
return result.get('data')
raise Exception(result.get('msg'))
def _exec_sandbox(self, _code, _id):
exec_python_file = f'{self.sandbox_path}/execute/{_id}.py'
exec_python_file = f'{self.sandbox_path}/{_id}.py'
with open(exec_python_file, 'w') as file:
file.write(_code)
os.system(f"chown {self.user}:root {exec_python_file}")
os.system(f"chown {self.user}:{self.user} {exec_python_file}")
kwargs = {'cwd': BASE_DIR}
subprocess_result = subprocess.run(
['su', '-s', python_directory, '-c', "exec(open('" + exec_python_file + "').read())", self.user],

View File

@ -40,12 +40,15 @@ def generate():
def get_key_pair():
rsa_value = rsa_cache.get(cache_key)
if rsa_value is None:
with lock:
rsa_value = rsa_cache.get(cache_key)
if rsa_value is not None:
return rsa_value
lock.acquire()
rsa_value = rsa_cache.get(cache_key)
if rsa_value is not None:
return rsa_value
try:
rsa_value = get_key_pair_by_sql()
rsa_cache.set(cache_key, rsa_value)
finally:
lock.release()
return rsa_value

View File

@ -23,8 +23,6 @@ from django.db import transaction, models
from django.db.models import QuerySet, Count
from django.db.models.functions import Substr, Reverse
from django.http import HttpResponse
from django.utils.translation import get_language
from django.utils.translation import gettext_lazy as _, gettext, to_locale
from drf_yasg import openapi
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
from rest_framework import serializers
@ -66,6 +64,8 @@ from embedding.task.embedding import embedding_by_document, delete_embedding_by_
embedding_by_document_list
from setting.models import Model
from smartdoc.conf import PROJECT_DIR
from django.utils.translation import gettext_lazy as _, gettext, to_locale
from django.utils.translation import get_language
parse_qa_handle_list = [XlsParseQAHandle(), CsvParseQAHandle(), XlsxParseQAHandle(), ZipParseQAHandle()]
parse_table_handle_list = [CsvSplitTableHandle(), XlsSplitTableHandle(), XlsxSplitTableHandle()]
@ -661,8 +661,6 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
cell = worksheet.cell(row=row_idx + 1, column=col_idx + 1)
if isinstance(col, str):
col = re.sub(ILLEGAL_CHARACTERS_RE, '', col)
if col.startswith(('=', '+', '-', '@')):
col = '\ufeff' + col
cell.value = col
# 创建HttpResponse对象返回Excel文件
return workbook

View File

@ -28,9 +28,6 @@ mime_types = {"html": "text/html", "htm": "text/html", "shtml": "text/html", "cs
"woff2": "font/woff2", "jar": "application/java-archive", "war": "application/java-archive",
"ear": "application/java-archive", "json": "application/json", "hqx": "application/mac-binhex40",
"doc": "application/msword", "pdf": "application/pdf", "ps": "application/postscript",
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
"eps": "application/postscript", "ai": "application/postscript", "rtf": "application/rtf",
"m3u8": "application/vnd.apple.mpegurl", "kml": "application/vnd.google-earth.kml+xml",
"kmz": "application/vnd.google-earth.kmz", "xls": "application/vnd.ms-excel",
@ -90,4 +87,4 @@ class FileSerializer(serializers.Serializer):
'Content-Disposition': 'attachment; filename="{}"'.format(
file.file_name)})
return HttpResponse(file.get_byte(), status=200,
headers={'Content-Type': mime_types.get(file_type, 'text/plain')})
headers={'Content-Type': mime_types.get(file.file_name.split(".")[-1], 'text/plain')})

View File

@ -2,7 +2,6 @@ UPDATE "document"
SET "char_length" = ( SELECT CASE WHEN
"sum" ( "char_length" ( "content" ) ) IS NULL THEN
0 ELSE "sum" ( "char_length" ( "content" ) )
END FROM paragraph WHERE "document_id" = %s ),
"update_time" = CURRENT_TIMESTAMP
END FROM paragraph WHERE "document_id" = %s )
WHERE
"id" = %s

View File

@ -181,7 +181,6 @@ class Dataset(APIView):
@swagger_auto_schema(operation_summary=_('Generate related'), operation_id=_('Generate related'),
manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
request_body=GenerateRelatedSerializer.get_request_body_api(),
responses=result.get_default_response(),
tags=[_('Knowledge Base')]
)
@log(menu='document', operate="Generate related documents",

View File

@ -195,53 +195,6 @@ class FunctionLibApi(ApiMixin):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
required=['id', 'name', 'code', 'input_field_list', 'permission_type'],
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')),
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
description=_('function name')),
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
description=_('function description')),
'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
description=_('function content')),
'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
description=_('permission')),
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
description=_('Is active')),
'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
description=_('Input variable list'),
items=openapi.Schema(type=openapi.TYPE_OBJECT,
required=['name', 'is_required', 'source'],
properties={
'name': openapi.Schema(
type=openapi.TYPE_STRING,
title=_('variable name'),
description=_('variable name')),
'is_required': openapi.Schema(
type=openapi.TYPE_BOOLEAN,
title=_('required'),
description=_('required')),
'type': openapi.Schema(
type=openapi.TYPE_STRING,
title=_('type'),
description=_(
'Field type string|int|dict|array|float')
),
'source': openapi.Schema(
type=openapi.TYPE_STRING,
title=_('source'),
description=_(
'The source only supports custom|reference')),
}))
}
)
class Export(ApiMixin):
@staticmethod
def get_request_params_api():
@ -261,4 +214,4 @@ class FunctionLibApi(ApiMixin):
type=openapi.TYPE_FILE,
required=True,
description=_('Upload image files'))
]
]

View File

@ -44,7 +44,6 @@ class FunctionLibView(APIView):
@swagger_auto_schema(operation_summary=_('Create function'),
operation_id=_('Create function'),
request_body=FunctionLibApi.Create.get_request_body_api(),
responses=result.get_api_response(FunctionLibApi.Create.get_response_body_api()),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
@log(menu='Function', operate="Create function",
@ -59,7 +58,6 @@ class FunctionLibView(APIView):
@swagger_auto_schema(operation_summary=_('Debug function'),
operation_id=_('Debug function'),
request_body=FunctionLibApi.Debug.get_request_body_api(),
responses=result.get_default_response(),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):
@ -74,7 +72,6 @@ class FunctionLibView(APIView):
@swagger_auto_schema(operation_summary=_('Update function'),
operation_id=_('Update function'),
request_body=FunctionLibApi.Edit.get_request_body_api(),
responses=result.get_api_response(FunctionLibApi.Edit.get_request_body_api()),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
@log(menu='Function', operate="Update function",
@ -87,7 +84,6 @@ class FunctionLibView(APIView):
@action(methods=['DELETE'], detail=False)
@swagger_auto_schema(operation_summary=_('Delete function'),
operation_id=_('Delete function'),
responses=result.get_default_response(),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
@log(menu='Function', operate="Delete function",

View File

@ -26,7 +26,6 @@ class PyLintView(APIView):
@swagger_auto_schema(operation_summary=_('Check code'),
operation_id=_('Check code'),
request_body=PyLintApi.get_request_body_api(),
responses=result.get_api_response(PyLintApi.get_request_body_api()),
tags=[_('Function')])
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
def post(self, request: Request):

View File

@ -7490,13 +7490,4 @@ msgid "Field: {name} No value set"
msgstr ""
msgid "Generate related"
msgstr ""
msgid "Obtain graphical captcha"
msgstr ""
msgid "Captcha code error or expiration"
msgstr ""
msgid "captcha"
msgstr ""

View File

@ -4536,7 +4536,7 @@ msgstr "修改知识库信息"
#: community/apps/dataset/views/document.py:463
#: community/apps/dataset/views/document.py:464
msgid "Get the knowledge base paginated list"
msgstr "获取知识库文档分页列表"
msgstr "获取知识库分页列表"
#: community/apps/dataset/views/document.py:31
#: community/apps/dataset/views/document.py:32
@ -7653,13 +7653,4 @@ msgid "Field: {name} No value set"
msgstr "字段: {name} 未设置值"
msgid "Generate related"
msgstr "生成问题"
msgid "Obtain graphical captcha"
msgstr "获取图形验证码"
msgid "Captcha code error or expiration"
msgstr "验证码错误或过期"
msgid "captcha"
msgstr "验证码"
msgstr "生成问题"

View File

@ -4545,7 +4545,7 @@ msgstr "修改知識庫信息"
#: community/apps/dataset/views/document.py:463
#: community/apps/dataset/views/document.py:464
msgid "Get the knowledge base paginated list"
msgstr "獲取知識庫文档分頁列表"
msgstr "獲取知識庫分頁列表"
#: community/apps/dataset/views/document.py:31
#: community/apps/dataset/views/document.py:32
@ -7663,13 +7663,4 @@ msgid "Field: {name} No value set"
msgstr "欄位: {name} 未設定值"
msgid "Generate related"
msgstr "生成問題"
msgid "Obtain graphical captcha"
msgstr "獲取圖形驗證碼"
msgid "Captcha code error or expiration"
msgstr "驗證碼錯誤或過期"
msgid "captcha"
msgstr "驗證碼"
msgstr "生成問題"

View File

@ -106,10 +106,7 @@ class MaxKBBaseModel(ABC):
optional_params = {}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming', 'show_ref_label']:
if key == 'extra_body' and isinstance(value, dict):
optional_params = {**optional_params, **value}
else:
optional_params[key] = value
optional_params[key] = value
return optional_params

View File

@ -19,8 +19,6 @@ from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import
from setting.models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider
from setting.models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider
from setting.models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider
from setting.models_provider.impl.regolo_model_provider.regolo_model_provider import \
RegoloModelProvider
from setting.models_provider.impl.siliconCloud_model_provider.siliconCloud_model_provider import \
SiliconCloudModelProvider
from setting.models_provider.impl.tencent_cloud_model_provider.tencent_cloud_model_provider import \
@ -57,4 +55,3 @@ class ModelProvideConstants(Enum):
aliyun_bai_lian_model_provider = AliyunBaiLianModelProvider()
model_anthropic_provider = AnthropicModelProvider()
model_siliconCloud_provider = SiliconCloudModelProvider()
model_regolo_provider = RegoloModelProvider()

View File

@ -51,23 +51,6 @@ model_info_list = [ModelInfo('gte-rerank',
_("Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data."),
ModelTypeConst.EMBEDDING, aliyun_bai_lian_embedding_model_credential,
AliyunBaiLianEmbedding),
ModelInfo('qwen3-0.6b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-1.7b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-4b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-8b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-14b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-32b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-30b-a3b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen3-235b-a22b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
BaiLianChatModel),
ModelInfo('qwen-plus', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,

View File

@ -30,29 +30,6 @@ class BaiLianLLMModelParams(BaseForm):
precision=0)
class BaiLianLLMStreamModelParams(BaseForm):
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
required=True, default_value=0.7,
_min=0.1,
_max=1.0,
_step=0.01,
precision=2)
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=800,
_min=1,
_max=100000,
_step=1,
precision=0)
stream = forms.SwitchField(label=TooltipLabel(_('Is the answer in streaming mode'),
_('Is the answer in streaming mode')),
required=True, default_value=True)
class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
@ -70,11 +47,7 @@ class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
if model_params.get('stream'):
for res in model.stream([HumanMessage(content=gettext('Hello'))]):
pass
else:
model.invoke([HumanMessage(content=gettext('Hello'))])
model.invoke([HumanMessage(content=gettext('Hello'))])
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
@ -95,6 +68,4 @@ class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
api_key = forms.PasswordInputField('API Key', required=True)
def get_model_params_setting_form(self, model_name):
if 'qwen3' in model_name:
return BaiLianLLMStreamModelParams()
return BaiLianLLMModelParams()

View File

@ -15,8 +15,9 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)
return chat_tong_yi

View File

@ -20,5 +20,5 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
**optional_params
)

View File

@ -1,12 +1,10 @@
import os
import re
from typing import Dict, List
from typing import Dict
from botocore.config import Config
from langchain_community.chat_models import BedrockChat
from langchain_core.messages import BaseMessage, get_buffer_string
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -74,20 +72,6 @@ class BedrockModel(MaxKBBaseModel, BedrockChat):
config=config
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return super().get_num_tokens_from_messages(messages)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
def get_num_tokens(self, text: str) -> int:
try:
return super().get_num_tokens(text)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))
def _update_aws_credentials(profile_name, access_key_id, secret_access_key):
credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials")

View File

@ -1,16 +1,15 @@
# coding=utf-8
from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping
import warnings
from typing import List, Dict, Optional, Any, Iterator, cast, Type, Union
import openai
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \
SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk
from langchain_core.messages.ai import UsageMetadata
from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk
from langchain_core.outputs import ChatGenerationChunk
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, AIMessageChunk
from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
from langchain_core.runnables import RunnableConfig, ensure_config
from langchain_core.tools import BaseTool
from langchain_core.utils.pydantic import is_basemodel_subclass
from langchain_openai import ChatOpenAI
from langchain_openai.chat_models.base import _create_usage_metadata
from common.config.tokenizer_manage_config import TokenizerManage
@ -20,65 +19,6 @@ def custom_get_token_ids(text: str):
return tokenizer.encode(text)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
) -> BaseMessageChunk:
id_ = _dict.get("id")
role = cast(str, _dict.get("role"))
content = cast(str, _dict.get("content") or "")
additional_kwargs: dict = {}
if 'reasoning_content' in _dict:
additional_kwargs['reasoning_content'] = _dict.get('reasoning_content')
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
tool_call_chunks = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
try:
tool_call_chunks = [
tool_call_chunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc["index"],
)
for rtc in raw_tool_calls
]
except KeyError:
pass
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content, id=id_)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
id=id_,
tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
)
elif role in ("system", "developer") or default_class == SystemMessageChunk:
if role == "developer":
additional_kwargs = {"__openai_role__": "developer"}
else:
additional_kwargs = {}
return SystemMessageChunk(
content=content, id=id_, additional_kwargs=additional_kwargs
)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(
content=content, tool_call_id=_dict["tool_call_id"], id=id_
)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role, id=id_)
else:
return default_class(content=content, id=id_) # type: ignore
class BaseChatOpenAI(ChatOpenAI):
usage_metadata: dict = {}
custom_get_token_ids = custom_get_token_ids
@ -86,13 +26,7 @@ class BaseChatOpenAI(ChatOpenAI):
def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
return self.usage_metadata
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Optional[
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
] = None,
) -> int:
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
try:
return super().get_num_tokens_from_messages(messages)
@ -110,77 +44,114 @@ class BaseChatOpenAI(ChatOpenAI):
return len(tokenizer.encode(text))
return self.get_last_generation_info().get('output_tokens', 0)
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
kwargs['stream_usage'] = True
for chunk in super()._stream(*args, **kwargs):
if chunk.message.usage_metadata is not None:
self.usage_metadata = chunk.message.usage_metadata
yield chunk
def _convert_chunk_to_generation_chunk(
def _stream(
self,
chunk: dict,
default_chunk_class: type,
base_generation_info: Optional[dict],
) -> Optional[ChatGenerationChunk]:
if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
return None
token_usage = chunk.get("usage")
choices = (
chunk.get("choices", [])
# from beta.chat.completions.stream
or chunk.get("chunk", {}).get("choices", [])
)
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
kwargs["stream"] = True
kwargs["stream_options"] = {"include_usage": True}
"""Set default stream_options."""
stream_usage = self._should_stream_usage(kwargs.get('stream_usage'), **kwargs)
# Note: stream_options is not a valid parameter for Azure OpenAI.
# To support users proxying Azure through ChatOpenAI, here we only specify
# stream_options if include_usage is set to True.
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
# for release notes.
if stream_usage:
kwargs["stream_options"] = {"include_usage": stream_usage}
usage_metadata: Optional[UsageMetadata] = (
_create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None
)
if len(choices) == 0:
# logprobs is implicitly None
generation_chunk = ChatGenerationChunk(
message=default_chunk_class(content="", usage_metadata=usage_metadata)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
base_generation_info = {}
if "response_format" in payload and is_basemodel_subclass(
payload["response_format"]
):
# TODO: Add support for streaming with Pydantic response_format.
warnings.warn("Streaming with Pydantic response_format not yet supported.")
chat_result = self._generate(
messages, stop, run_manager=run_manager, **kwargs
)
return generation_chunk
msg = chat_result.generations[0].message
yield ChatGenerationChunk(
message=AIMessageChunk(
**msg.dict(exclude={"type", "additional_kwargs"}),
# preserve the "parsed" Pydantic object without converting to dict
additional_kwargs=msg.additional_kwargs,
),
generation_info=chat_result.generations[0].generation_info,
)
return
if self.include_response_headers:
raw_response = self.client.with_raw_response.create(**payload)
response = raw_response.parse()
base_generation_info = {"headers": dict(raw_response.headers)}
else:
response = self.client.create(**payload)
with response:
is_first_chunk = True
for chunk in response:
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
choice = choices[0]
if choice["delta"] is None:
return None
generation_chunk = super()._convert_chunk_to_generation_chunk(
chunk,
default_chunk_class,
base_generation_info if is_first_chunk else {},
)
if generation_chunk is None:
continue
message_chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
generation_info = {**base_generation_info} if base_generation_info else {}
# custom code
if len(chunk['choices']) > 0 and 'reasoning_content' in chunk['choices'][0]['delta']:
generation_chunk.message.additional_kwargs["reasoning_content"] = chunk['choices'][0]['delta'][
'reasoning_content']
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
if model_name := chunk.get("model"):
generation_info["model_name"] = model_name
if system_fingerprint := chunk.get("system_fingerprint"):
generation_info["system_fingerprint"] = system_fingerprint
default_chunk_class = generation_chunk.message.__class__
logprobs = (generation_chunk.generation_info or {}).get("logprobs")
if run_manager:
run_manager.on_llm_new_token(
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
)
is_first_chunk = False
# custom code
if generation_chunk.message.usage_metadata is not None:
self.usage_metadata = generation_chunk.message.usage_metadata
yield generation_chunk
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
message_chunk.usage_metadata = usage_metadata
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
return generation_chunk
def _create_chat_result(self,
response: Union[dict, openai.BaseModel],
generation_info: Optional[Dict] = None):
result = super()._create_chat_result(response, generation_info)
try:
reasoning_content = ''
reasoning_content_enable = False
for res in response.choices:
if 'reasoning_content' in res.message.model_extra:
reasoning_content_enable = True
_reasoning_content = res.message.model_extra.get('reasoning_content')
if _reasoning_content is not None:
reasoning_content += _reasoning_content
if reasoning_content_enable:
result.llm_output['reasoning_content'] = reasoning_content
except Exception as e:
pass
return result
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[list[str]] = None,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> BaseMessage:
config = ensure_config(config)
chat_result = cast(
"ChatGeneration",
ChatGeneration,
self.generate_prompt(
[self._convert_input(input)],
stop=stop,
@ -191,9 +162,7 @@ class BaseChatOpenAI(ChatOpenAI):
run_id=config.pop("run_id", None),
**kwargs,
).generations[0][0],
).message
self.usage_metadata = chat_result.response_metadata[
'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata
return chat_result

View File

@ -26,6 +26,6 @@ class DeepSeekChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base='https://api.deepseek.com',
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
**optional_params
)
return deepseek_chat_open_ai

View File

@ -13,7 +13,7 @@ from google.ai.generativelanguage_v1beta.types import (
Tool as GoogleTool,
)
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.messages import BaseMessage
from langchain_core.outputs import ChatGenerationChunk
from langchain_google_genai import ChatGoogleGenerativeAI
from langchain_google_genai._function_utils import _ToolConfigDict, _ToolDict
@ -22,8 +22,6 @@ from langchain_google_genai.chat_models import _chat_with_retry, _response_to_re
from langchain_google_genai._common import (
SafetySettingDict,
)
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -48,18 +46,10 @@ class GeminiChatModel(MaxKBBaseModel, ChatGoogleGenerativeAI):
return self.__dict__.get('_last_generation_info')
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return self.get_last_generation_info().get('input_tokens', 0)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
return self.get_last_generation_info().get('input_tokens', 0)
def get_num_tokens(self, text: str) -> int:
try:
return self.get_last_generation_info().get('output_tokens', 0)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))
return self.get_last_generation_info().get('output_tokens', 0)
def _stream(
self,

View File

@ -21,10 +21,11 @@ class KimiChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
kimi_chat_open_ai = KimiChatModel(
openai_api_base=model_credential['api_base'],
openai_api_key=model_credential['api_key'],
model_name=model_name,
extra_body=optional_params,
**optional_params
)
return kimi_chat_open_ai

View File

@ -25,7 +25,7 @@ class OllamaLLMModelParams(BaseForm):
_step=0.01,
precision=2)
num_predict = forms.SliderField(
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=1024,

View File

@ -28,5 +28,5 @@ class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)

View File

@ -16,5 +16,5 @@ class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)

View File

@ -9,6 +9,7 @@
from typing import List, Dict
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_openai.chat_models import ChatOpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -34,9 +35,9 @@ class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
streaming = False
azure_chat_open_ai = OpenAIChatModel(
model=model_name,
base_url=model_credential.get('api_base'),
api_key=model_credential.get('api_key'),
extra_body=optional_params,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params,
streaming=streaming,
custom_get_token_ids=custom_get_token_ids
)

View File

@ -18,8 +18,9 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)
return chat_tong_yi

View File

@ -26,6 +26,6 @@ class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)
return chat_tong_yi

View File

@ -1,8 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2024/3/28 16:25
@desc:
"""

View File

@ -1,52 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file embedding.py
@date2024/7/12 16:45
@desc:
"""
import traceback
from typing import Dict
from django.utils.translation import gettext as _
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
class RegoloEmbeddingCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=True):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
_('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential)
model.embed_query(_('Hello'))
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
_('Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_key = forms.PasswordInputField('API Key', required=True)

View File

@ -1,74 +0,0 @@
# coding=utf-8
import base64
import os
import traceback
from typing import Dict
from langchain_core.messages import HumanMessage
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
from django.utils.translation import gettext_lazy as _, gettext
class RegoloImageModelParams(BaseForm):
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
required=True, default_value=0.7,
_min=0.1,
_max=1.0,
_step=0.01,
precision=2)
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=800,
_min=1,
_max=100000,
_step=1,
precision=0)
class RegoloImageModelCredential(BaseForm, BaseModelCredential):
api_base = forms.TextInputField('API URL', required=True)
api_key = forms.PasswordInputField('API Key', required=True)
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
def get_model_params_setting_form(self, model_name):
return RegoloImageModelParams()

View File

@ -1,78 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file llm.py
@date2024/7/11 18:32
@desc:
"""
import traceback
from typing import Dict
from django.utils.translation import gettext_lazy as _, gettext
from langchain_core.messages import HumanMessage
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
class RegoloLLMModelParams(BaseForm):
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
required=True, default_value=0.7,
_min=0.1,
_max=1.0,
_step=0.01,
precision=2)
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=800,
_min=1,
_max=100000,
_step=1,
precision=0)
class RegoloLLMModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
model.invoke([HumanMessage(content=gettext('Hello'))])
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_key = forms.PasswordInputField('API Key', required=True)
def get_model_params_setting_form(self, model_name):
return RegoloLLMModelParams()

View File

@ -1,89 +0,0 @@
# coding=utf-8
import traceback
from typing import Dict
from django.utils.translation import gettext_lazy as _, gettext
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
class RegoloTTIModelParams(BaseForm):
size = forms.SingleSelect(
TooltipLabel(_('Image size'),
_('The image generation endpoint allows you to create raw images based on text prompts. ')),
required=True,
default_value='1024x1024',
option_list=[
{'value': '1024x1024', 'label': '1024x1024'},
{'value': '1024x1792', 'label': '1024x1792'},
{'value': '1792x1024', 'label': '1792x1024'},
],
text_field='label',
value_field='value'
)
quality = forms.SingleSelect(
TooltipLabel(_('Picture quality'), _('''
By default, images are produced in standard quality.
''')),
required=True,
default_value='standard',
option_list=[
{'value': 'standard', 'label': 'standard'},
{'value': 'hd', 'label': 'hd'},
],
text_field='label',
value_field='value'
)
n = forms.SliderField(
TooltipLabel(_('Number of pictures'),
_('1 as default')),
required=True, default_value=1,
_min=1,
_max=10,
_step=1,
precision=0)
class RegoloTextToImageModelCredential(BaseForm, BaseModelCredential):
api_key = forms.PasswordInputField('API Key', required=True)
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
def get_model_params_setting_form(self, model_name):
return RegoloTTIModelParams()

View File

@ -1,64 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<svg
id="Livello_2"
data-name="Livello 2"
viewBox="0 0 104.4 104.38"
version="1.1"
sodipodi:docname="Regolo_logo_positive.svg"
width="100%" height="100%"
inkscape:version="1.4 (e7c3feb100, 2024-10-09)"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns="http://www.w3.org/2000/svg"
xmlns:svg="http://www.w3.org/2000/svg">
<sodipodi:namedview
id="namedview13"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:showpageshadow="2"
inkscape:pageopacity="0.0"
inkscape:pagecheckerboard="0"
inkscape:deskcolor="#d1d1d1"
inkscape:zoom="2.1335227"
inkscape:cx="119.05193"
inkscape:cy="48.511318"
inkscape:window-width="1920"
inkscape:window-height="1025"
inkscape:window-x="0"
inkscape:window-y="0"
inkscape:window-maximized="1"
inkscape:current-layer="g13" />
<defs
id="defs1">
<style
id="style1">
.cls-1 {
fill: #303030;
}
.cls-2 {
fill: #59e389;
}
</style>
</defs>
<g
id="Grafica"
transform="translate(0,-40.87)">
<g
id="g13">
<path
class="cls-1"
d="m 104.39,105.96 v 36.18 c 0,0.32 -0.05,0.62 -0.14,0.91 -0.39,1.27 -1.58,2.2 -2.99,2.2 H 65.08 c -1.73,0 -3.13,-1.41 -3.13,-3.13 V 113.4 c 0,-0.15 0,-0.29 0,-0.44 v -7 c 0,-1.73 1.4,-3.13 3.13,-3.13 h 36.19 c 1.5,0 2.77,1.07 3.06,2.5 0.05,0.21 0.07,0.41 0.07,0.63 z"
id="path1" />
<path
class="cls-1"
d="m 104.39,105.96 v 36.18 c 0,0.32 -0.05,0.62 -0.14,0.91 -0.39,1.27 -1.58,2.2 -2.99,2.2 H 65.08 c -1.73,0 -3.13,-1.41 -3.13,-3.13 V 113.4 c 0,-0.15 0,-0.29 0,-0.44 v -7 c 0,-1.73 1.4,-3.13 3.13,-3.13 h 36.19 c 1.5,0 2.77,1.07 3.06,2.5 0.05,0.21 0.07,0.41 0.07,0.63 z"
id="path2" />
<path
class="cls-2"
d="M 101.27,40.88 H 65.09 c -1.73,0 -3.13,1.4 -3.13,3.13 v 28.71 c 0,4.71 -1.88,9.23 -5.2,12.56 L 44.42,97.61 c -3.32,3.33 -7.85,5.2 -12.55,5.2 H 18.98 c -2.21,0 -3.99,-1.79 -3.99,-3.99 V 87.29 c 0,-2.21 1.79,-3.99 3.99,-3.99 h 20.34 c 1.41,0 2.59,-0.93 2.99,-2.2 0.09,-0.29 0.14,-0.59 0.14,-0.91 V 44 c 0,-0.22 -0.02,-0.42 -0.07,-0.63 -0.29,-1.43 -1.56,-2.5 -3.06,-2.5 H 3.13 C 1.4,40.87 0,42.27 0,44 v 7 c 0,0.15 0,0.29 0,0.44 v 28.72 c 0,1.72 1.41,3.13 3.13,3.13 h 3.16 c 2.21,0 3.99,1.79 3.99,3.99 v 11.53 c 0,2.21 -1.79,3.99 -3.99,3.99 H 3.15 c -1.73,0 -3.13,1.4 -3.13,3.13 v 36.19 c 0,1.72 1.41,3.13 3.13,3.13 h 36.19 c 1.73,0 3.13,-1.41 3.13,-3.13 V 113.4 c 0,-4.7 1.87,-9.23 5.2,-12.55 L 60,88.51 c 3.33,-3.32 7.85,-5.2 12.56,-5.2 h 28.71 c 1.73,0 3.13,-1.4 3.13,-3.13 V 44 c 0,-1.73 -1.4,-3.13 -3.13,-3.13 z"
id="path3" />
</g>
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.8 KiB

View File

@ -1,23 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file embedding.py
@date2024/7/12 17:44
@desc:
"""
from typing import Dict
from langchain_community.embeddings import OpenAIEmbeddings
from setting.models_provider.base_model_provider import MaxKBBaseModel
class RegoloEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
return RegoloEmbeddingModel(
api_key=model_credential.get('api_key'),
model=model_name,
openai_api_base="https://api.regolo.ai/v1",
)

View File

@ -1,19 +0,0 @@
from typing import Dict
from setting.models_provider.base_model_provider import MaxKBBaseModel
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class RegoloImage(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
return RegoloImage(
model_name=model_name,
openai_api_base="https://api.regolo.ai/v1",
openai_api_key=model_credential.get('api_key'),
streaming=True,
stream_usage=True,
extra_body=optional_params
)

View File

@ -1,38 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file llm.py
@date2024/4/18 15:28
@desc:
"""
from typing import List, Dict
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_openai.chat_models import ChatOpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
def custom_get_token_ids(text: str):
tokenizer = TokenizerManage.get_tokenizer()
return tokenizer.encode(text)
class RegoloChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
return RegoloChatModel(
model=model_name,
openai_api_base="https://api.regolo.ai/v1",
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
)

View File

@ -1,58 +0,0 @@
from typing import Dict
from openai import OpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
from setting.models_provider.impl.base_tti import BaseTextToImage
def custom_get_token_ids(text: str):
tokenizer = TokenizerManage.get_tokenizer()
return tokenizer.encode(text)
class RegoloTextToImage(MaxKBBaseModel, BaseTextToImage):
api_base: str
api_key: str
model: str
params: dict
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.api_key = kwargs.get('api_key')
self.api_base = "https://api.regolo.ai/v1"
self.model = kwargs.get('model')
self.params = kwargs.get('params')
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming']:
optional_params['params'][key] = value
return RegoloTextToImage(
model=model_name,
api_base="https://api.regolo.ai/v1",
api_key=model_credential.get('api_key'),
**optional_params,
)
def is_cache_model(self):
return False
def check_auth(self):
chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
response_list = chat.models.with_raw_response.list()
# self.generate_image('生成一个小猫图片')
def generate_image(self, prompt: str, negative_prompt: str = None):
chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
res = chat.images.generate(model=self.model, prompt=prompt, **self.params)
file_urls = []
for content in res.data:
url = content.url
file_urls.append(url)
return file_urls

View File

@ -1,89 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file openai_model_provider.py
@date2024/3/28 16:26
@desc:
"""
import os
from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
ModelTypeConst, ModelInfoManage
from setting.models_provider.impl.regolo_model_provider.credential.embedding import \
RegoloEmbeddingCredential
from setting.models_provider.impl.regolo_model_provider.credential.llm import RegoloLLMModelCredential
from setting.models_provider.impl.regolo_model_provider.credential.tti import \
RegoloTextToImageModelCredential
from setting.models_provider.impl.regolo_model_provider.model.embedding import RegoloEmbeddingModel
from setting.models_provider.impl.regolo_model_provider.model.llm import RegoloChatModel
from setting.models_provider.impl.regolo_model_provider.model.tti import RegoloTextToImage
from smartdoc.conf import PROJECT_DIR
from django.utils.translation import gettext as _
openai_llm_model_credential = RegoloLLMModelCredential()
openai_tti_model_credential = RegoloTextToImageModelCredential()
model_info_list = [
ModelInfo('Phi-4', '', ModelTypeConst.LLM,
openai_llm_model_credential, RegoloChatModel
),
ModelInfo('DeepSeek-R1-Distill-Qwen-32B', '', ModelTypeConst.LLM,
openai_llm_model_credential,
RegoloChatModel),
ModelInfo('maestrale-chat-v0.4-beta', '',
ModelTypeConst.LLM, openai_llm_model_credential,
RegoloChatModel),
ModelInfo('Llama-3.3-70B-Instruct',
'',
ModelTypeConst.LLM, openai_llm_model_credential,
RegoloChatModel),
ModelInfo('Llama-3.1-8B-Instruct',
'',
ModelTypeConst.LLM, openai_llm_model_credential,
RegoloChatModel),
ModelInfo('DeepSeek-Coder-6.7B-Instruct', '',
ModelTypeConst.LLM, openai_llm_model_credential,
RegoloChatModel)
]
open_ai_embedding_credential = RegoloEmbeddingCredential()
model_info_embedding_list = [
ModelInfo('gte-Qwen2', '',
ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
RegoloEmbeddingModel),
]
model_info_tti_list = [
ModelInfo('FLUX.1-dev', '',
ModelTypeConst.TTI, openai_tti_model_credential,
RegoloTextToImage),
ModelInfo('sdxl-turbo', '',
ModelTypeConst.TTI, openai_tti_model_credential,
RegoloTextToImage),
]
model_info_manage = (
ModelInfoManage.builder()
.append_model_info_list(model_info_list)
.append_default_model_info(
ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
openai_llm_model_credential, RegoloChatModel
))
.append_model_info_list(model_info_embedding_list)
.append_default_model_info(model_info_embedding_list[0])
.append_model_info_list(model_info_tti_list)
.append_default_model_info(model_info_tti_list[0])
.build()
)
class RegoloModelProvider(IModelProvider):
def get_model_info_manage(self):
return model_info_manage
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_regolo_provider', name='Regolo', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'regolo_model_provider',
'icon',
'regolo_icon_svg')))

View File

@ -16,5 +16,5 @@ class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)

View File

@ -34,5 +34,5 @@ class SiliconCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
**optional_params
)

View File

@ -33,7 +33,21 @@ class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params,
**optional_params,
custom_get_token_ids=custom_get_token_ids
)
return azure_chat_open_ai
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return super().get_num_tokens_from_messages(messages)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
def get_num_tokens(self, text: str) -> int:
try:
return super().get_num_tokens(text)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))

View File

@ -16,5 +16,5 @@ class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)

View File

@ -19,7 +19,7 @@ class VllmImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)
def is_cache_model(self):

View File

@ -1,10 +1,9 @@
# coding=utf-8
from typing import Dict, Optional, Sequence, Union, Any, Callable
from typing import Dict, List
from urllib.parse import urlparse, ParseResult
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.tools import BaseTool
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -32,19 +31,13 @@ class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params,
streaming=True,
stream_usage=True,
extra_body=optional_params
)
return vllm_chat_open_ai
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Optional[
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
] = None,
) -> int:
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])

View File

@ -16,5 +16,5 @@ class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)

View File

@ -17,5 +17,5 @@ class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
**optional_params
)

View File

@ -27,7 +27,7 @@ class WenxinLLMModelParams(BaseForm):
_step=0.01,
precision=2)
max_output_tokens = forms.SliderField(
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=1024,

View File

@ -19,7 +19,7 @@ class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:

View File

@ -34,7 +34,7 @@ class XinferenceChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=base_url,
openai_api_key=model_credential.get('api_key'),
extra_body=optional_params
**optional_params
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:

View File

@ -22,9 +22,6 @@ class XInferenceReranker(MaxKBBaseModel, BaseDocumentCompressor):
"""UID of the launched model"""
api_key: Optional[str]
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
return XInferenceReranker(server_url=model_credential.get('server_url'), model_uid=model_name,

View File

@ -16,5 +16,5 @@ class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
extra_body=optional_params
**optional_params,
)

View File

@ -171,24 +171,6 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer):
}
)
@staticmethod
def get_response_body_api():
return openapi.Schema(
type=openapi.TYPE_OBJECT,
properties={
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')),
'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')),
'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')),
'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'),
description=_('Is active')),
'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')),
'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('member type'),
description=_('member type manage|member')),
}
)
@transaction.atomic
def batch_add_member(self, user_id_list: List[str], with_valid=True):
"""

View File

@ -38,7 +38,6 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Add member'),
operation_id=_('Add member'),
request_body=TeamMemberSerializer().get_request_body_api(),
responses=result.get_default_response(),
tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_CREATE)
@log(menu='Team', operate='Add member',
@ -54,7 +53,6 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Add members in batches'),
operation_id=_('Add members in batches'),
request_body=TeamMemberSerializer.get_bach_request_body_api(),
responses=result.get_api_array_response(TeamMemberSerializer.get_response_body_api()),
tags=[_('Team')])
@has_permissions(PermissionConstants.TEAM_CREATE)
@log(menu='Team', operate='Add members in batches',
@ -80,7 +78,6 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Update team member permissions'),
operation_id=_('Update team member permissions'),
request_body=UpdateTeamMemberPermissionSerializer().get_request_body_api(),
responses=result.get_default_response(),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
tags=[_('Team')]
)
@ -96,7 +93,6 @@ class TeamMember(APIView):
@swagger_auto_schema(operation_summary=_('Remove member'),
operation_id=_('Remove member'),
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
responses=result.get_default_response(),
tags=[_('Team')]
)
@has_permissions(PermissionConstants.TEAM_DELETE)

View File

@ -31,8 +31,7 @@ class Model(APIView):
@action(methods=['POST'], detail=False)
@swagger_auto_schema(operation_summary=_('Create model'),
operation_id=_('Create model'),
request_body=ModelCreateApi.get_request_body_api(),
manual_parameters=result.get_api_response(ModelCreateApi.get_request_body_api())
request_body=ModelCreateApi.get_request_body_api()
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
@log(menu='model', operate='Create model',
@ -46,8 +45,7 @@ class Model(APIView):
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary=_('Download model, trial only with Ollama platform'),
operation_id=_('Download model, trial only with Ollama platform'),
request_body=ModelCreateApi.get_request_body_api(),
responses=result.get_api_response(ModelCreateApi.get_request_body_api())
request_body=ModelCreateApi.get_request_body_api()
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
def put(self, request: Request):
@ -125,8 +123,7 @@ class Model(APIView):
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary=_('Update model'),
operation_id=_('Update model'),
request_body=ModelEditApi.get_request_body_api(),
responses=result.get_api_response(ModelEditApi.get_request_body_api())
request_body=ModelEditApi.get_request_body_api()
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_CREATE)
@log(menu='model', operate='Update model',
@ -169,8 +166,7 @@ class Provide(APIView):
@swagger_auto_schema(operation_summary=_('Call the supplier function to obtain form data'),
operation_id=_('Call the supplier function to obtain form data'),
manual_parameters=ProvideApi.get_request_params_api(),
request_body=ProvideApi.get_request_body_api(),
responses=result.get_api_response(ProvideApi.get_request_body_api())
request_body=ProvideApi.get_request_body_api()
, tags=[_('model')])
@has_permissions(PermissionConstants.MODEL_READ)
@log(menu='model', operate='Call the supplier function to obtain form data')

View File

@ -93,8 +93,7 @@ class Config(dict):
'SANDBOX': False,
'LOCAL_MODEL_HOST': '127.0.0.1',
'LOCAL_MODEL_PORT': '11636',
'LOCAL_MODEL_PROTOCOL': "http",
'LOCAL_MODEL_HOST_WORKER': 1
'LOCAL_MODEL_PROTOCOL': "http"
}
@ -114,8 +113,7 @@ class Config(dict):
"ENGINE": self.get('DB_ENGINE'),
"POOL_OPTIONS": {
"POOL_SIZE": 20,
"MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW')),
'RECYCLE': 30 * 60
"MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW'))
}
}

View File

@ -126,10 +126,6 @@ CACHES = {
"token_cache": {
'BACKEND': 'common.cache.file_cache.FileCache',
'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "token_cache") # 文件夹路径
},
'captcha_cache': {
'BACKEND': 'common.cache.file_cache.FileCache',
'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "captcha_cache") # 文件夹路径
}
}

View File

@ -6,22 +6,18 @@
@date2023/9/5 16:32
@desc:
"""
import base64
import datetime
import os
import random
import re
import uuid
from captcha.image import ImageCaptcha
from django.conf import settings
from django.core import validators, signing, cache
from django.core.mail import send_mail
from django.core.mail.backends.smtp import EmailBackend
from django.db import transaction
from django.db.models import Q, QuerySet, Prefetch
from django.utils.translation import get_language
from django.utils.translation import gettext_lazy as _, to_locale
from drf_yasg import openapi
from rest_framework import serializers
@ -34,7 +30,7 @@ from common.exception.app_exception import AppApiException
from common.mixins.api_mixin import ApiMixin
from common.models.db_model_manage import DBModelManage
from common.response.result import get_api_response
from common.util.common import valid_license, get_random_chars
from common.util.common import valid_license
from common.util.field_message import ErrMessage
from common.util.lock import lock
from dataset.models import DataSet, Document, Paragraph, Problem, ProblemParagraphMapping
@ -43,29 +39,9 @@ from function_lib.models.function import FunctionLib
from setting.models import Team, SystemSetting, SettingType, Model, TeamMember, TeamMemberPermission
from smartdoc.conf import PROJECT_DIR
from users.models.user import User, password_encrypt, get_user_dynamics_permission
from django.utils.translation import gettext_lazy as _, gettext, to_locale
from django.utils.translation import get_language
user_cache = cache.caches['user_cache']
captcha_cache = cache.caches['captcha_cache']
class CaptchaSerializer(ApiMixin, serializers.Serializer):
@staticmethod
def get_response_body_api():
return get_api_response(openapi.Schema(
type=openapi.TYPE_STRING,
title="captcha",
default="xxxx",
description="captcha"
))
@staticmethod
def generate():
chars = get_random_chars()
image = ImageCaptcha()
data = image.generate(chars)
captcha = base64.b64encode(data.getbuffer())
captcha_cache.set(f"LOGIN:{chars.lower()}", chars, timeout=5 * 60)
return 'data:image/png;base64,' + captcha.decode()
class SystemSerializer(ApiMixin, serializers.Serializer):
@ -95,8 +71,6 @@ class LoginSerializer(ApiMixin, serializers.Serializer):
password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")))
captcha = serializers.CharField(required=True, error_messages=ErrMessage.char(_("captcha")))
def is_valid(self, *, raise_exception=False):
"""
校验参数
@ -104,10 +78,6 @@ class LoginSerializer(ApiMixin, serializers.Serializer):
:return: User information
"""
super().is_valid(raise_exception=True)
captcha = self.data.get('captcha')
captcha_value = captcha_cache.get(f"LOGIN:{captcha.lower()}")
if captcha_value is None:
raise AppApiException(1005, _("Captcha code error or expiration"))
username = self.data.get("username")
password = password_encrypt(self.data.get("password"))
user = QuerySet(User).filter(Q(username=username,
@ -139,8 +109,7 @@ class LoginSerializer(ApiMixin, serializers.Serializer):
required=['username', 'password'],
properties={
'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")),
'captcha': openapi.Schema(type=openapi.TYPE_STRING, title=_("captcha"), description=_("captcha"))
'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password"))
}
)

View File

@ -6,7 +6,6 @@ app_name = "user"
urlpatterns = [
path('profile', views.Profile.as_view()),
path('user', views.User.as_view(), name="profile"),
path('user/captcha', views.CaptchaView.as_view(), name='captcha'),
path('user/language', views.SwitchUserLanguageView.as_view(), name='language'),
path('user/list', views.User.Query.as_view()),
path('user/login', views.Login.as_view(), name='login'),

View File

@ -26,7 +26,7 @@ from smartdoc.settings import JWT_AUTH
from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \
RePasswordSerializer, \
SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer, \
SwitchLanguageSerializer, CaptchaSerializer
SwitchLanguageSerializer
from users.views.common import get_user_operation_object, get_re_password_details
user_cache = cache.caches['user_cache']
@ -84,7 +84,7 @@ class SwitchUserLanguageView(APIView):
description=_("language")),
}
),
responses=result.get_default_response(),
responses=RePasswordSerializer().get_response_body_api(),
tags=[_("User management")])
@log(menu='User management', operate='Switch Language',
get_operation_object=lambda r, k: {'name': r.user.username})
@ -111,7 +111,7 @@ class ResetCurrentUserPasswordView(APIView):
description=_("Password"))
}
),
responses=result.get_default_response(),
responses=RePasswordSerializer().get_response_body_api(),
tags=[_("User management")])
@log(menu='User management', operate='Modify current user password',
get_operation_object=lambda r, k: {'name': r.user.username},
@ -170,18 +170,6 @@ def _get_details(request):
}
class CaptchaView(APIView):
@action(methods=['GET'], detail=False)
@swagger_auto_schema(operation_summary=_("Obtain graphical captcha"),
operation_id=_("Obtain graphical captcha"),
responses=CaptchaSerializer().get_response_body_api(),
security=[],
tags=[_("User management")])
def get(self, request: Request):
return result.success(CaptchaSerializer().generate())
class Login(APIView):
@action(methods=['POST'], detail=False)

Some files were not shown because too many files have changed in this diff Show More