mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
Compare commits
156 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
847755b1c2 | ||
|
|
b57455d0ee | ||
|
|
2a257edff9 | ||
|
|
d47699331c | ||
|
|
90c64d77dd | ||
|
|
e1ada3ffe2 | ||
|
|
b62c79fda6 | ||
|
|
3d9e7dd4b1 | ||
|
|
8ff15865a7 | ||
|
|
48899d55d1 | ||
|
|
1cc4107bfe | ||
|
|
b13cd03706 | ||
|
|
69f024492b | ||
|
|
a9c46cd7e0 | ||
|
|
a9e9f5b085 | ||
|
|
e12b1fe14e | ||
|
|
7ce66a7bf3 | ||
|
|
decd3395db | ||
|
|
9d7a383348 | ||
|
|
8f7d91798b | ||
|
|
81a3af2c8b | ||
|
|
2ec0d22b14 | ||
|
|
27a77dc657 | ||
|
|
187e9c1e4e | ||
|
|
e5bab10824 | ||
|
|
347f4a0b03 | ||
|
|
289ebf42a6 | ||
|
|
71fdce08d7 | ||
|
|
adc5af9cef | ||
|
|
ce2ab322f6 | ||
|
|
a7e31b94c7 | ||
|
|
8498687794 | ||
|
|
190ca3e198 | ||
|
|
c1ddec1a61 | ||
|
|
1ba8077e95 | ||
|
|
9a42bd2302 | ||
|
|
35b662a52d | ||
|
|
a4faf52261 | ||
|
|
a30316d87a | ||
|
|
a4d10cbe3b | ||
|
|
ceccf9f1fa | ||
|
|
3964db20dc | ||
|
|
57ada0708f | ||
|
|
a1a92a833a | ||
|
|
5e0d8048f9 | ||
|
|
8903b35aec | ||
|
|
fa4f7e99fd | ||
|
|
b0630b3ddd | ||
|
|
b2bf69740c | ||
|
|
8cf66b9eca | ||
|
|
1db8577ca6 | ||
|
|
949e4dea9e | ||
|
|
c12988bc8a | ||
|
|
2728453f6c | ||
|
|
ccf43bbcd9 | ||
|
|
8d8de53e38 | ||
|
|
7faf556771 | ||
|
|
76ec8ad6f6 | ||
|
|
0cf05a76a0 | ||
|
|
357edbfbe0 | ||
|
|
0609a9afc8 | ||
|
|
96e59a018f | ||
|
|
79b2de8893 | ||
|
|
0c7cca035e | ||
|
|
00a3e5ddc3 | ||
|
|
a6533c0db7 | ||
|
|
704077d066 | ||
|
|
59ee0c1270 | ||
|
|
b37cc3ba1c | ||
|
|
dfe6d0a91b | ||
|
|
5813eedd4f | ||
|
|
363150380d | ||
|
|
47f9c04664 | ||
|
|
17cd88edda | ||
|
|
d33b620dc8 | ||
|
|
b95cb20704 | ||
|
|
4ff1944b60 | ||
|
|
1c6b0f8a86 | ||
|
|
d85801fe58 | ||
|
|
e79e7d505d | ||
|
|
33b1cd65b0 | ||
|
|
8d503c8bf8 | ||
|
|
df7f922013 | ||
|
|
a0541203e4 | ||
|
|
fb64731cd8 | ||
|
|
d960a18711 | ||
|
|
ee35cc21e9 | ||
|
|
e4a60daa17 | ||
|
|
7bcb770ee5 | ||
|
|
b5b09dc8b4 | ||
|
|
b1f6092620 | ||
|
|
c8441cfd73 | ||
|
|
7e4b147576 | ||
|
|
9cd082089a | ||
|
|
d4541e23f9 | ||
|
|
5e7e91cced | ||
|
|
d9787bb548 | ||
|
|
131b5b3bbe | ||
|
|
e58f95832b | ||
|
|
f24337d5f3 | ||
|
|
d6f1d25b59 | ||
|
|
0ec198fa43 | ||
|
|
77e96624ee | ||
|
|
5e02809db2 | ||
|
|
6fe001fcf8 | ||
|
|
f646102262 | ||
|
|
b97f4e16ba | ||
|
|
0c14306889 | ||
|
|
f1d043f67b | ||
|
|
c39a6e81d7 | ||
|
|
9c56c7e198 | ||
|
|
6484fef8ea | ||
|
|
8a194481ac | ||
|
|
072b817792 | ||
|
|
d32f7d36a6 | ||
|
|
54c9d4e725 | ||
|
|
d2637c3de2 | ||
|
|
2550324003 | ||
|
|
bf52dd8174 | ||
|
|
2ecec57d2f | ||
|
|
b5fda0e020 | ||
|
|
45a60cd9a7 | ||
|
|
2b82675853 | ||
|
|
1d3bf1ca73 | ||
|
|
04cb9c96fe | ||
|
|
45d8ac2eee | ||
|
|
eb60331c88 | ||
|
|
8fc9b0a22d | ||
|
|
ec5fd9e343 | ||
|
|
1e49939c38 | ||
|
|
5a7b23aa00 | ||
|
|
791505b7b8 | ||
|
|
da10649adb | ||
|
|
a025e3960d | ||
|
|
98ed348de9 | ||
|
|
b9dcc36b31 | ||
|
|
7a3d3844ae | ||
|
|
6ea2cf149a | ||
|
|
c30677d8b0 | ||
|
|
a1a2fb5628 | ||
|
|
f9cb0e24d6 | ||
|
|
2dc42183cb | ||
|
|
c781c11d26 | ||
|
|
e178cfe5c0 | ||
|
|
3b24373cd0 | ||
|
|
125ed8aa7a | ||
|
|
0b60a03e5d | ||
|
|
bb3f17ebfe | ||
|
|
1695710cbe | ||
|
|
ebe8506c67 | ||
|
|
3c3bd9884f | ||
|
|
f188383fea | ||
|
|
bbab359813 | ||
|
|
8381ca5287 | ||
|
|
f5282bf1e7 | ||
|
|
c0ffc0aaf5 |
|
|
@ -0,0 +1,17 @@
|
|||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "pip"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
timezone: "Asia/Shanghai"
|
||||
day: "friday"
|
||||
target-branch: "v2"
|
||||
groups:
|
||||
python-dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
# ignore:
|
||||
# - dependency-name: "pymupdf"
|
||||
# versions: ["*"]
|
||||
|
||||
|
|
@ -7,7 +7,7 @@ on:
|
|||
inputs:
|
||||
dockerImageTag:
|
||||
description: 'Image Tag'
|
||||
default: 'v1.10.3-dev'
|
||||
default: 'v1.10.7-dev'
|
||||
required: true
|
||||
dockerImageTagWithLatest:
|
||||
description: '是否发布latest tag(正式发版时选择,测试版本切勿选择)'
|
||||
|
|
@ -36,7 +36,7 @@ on:
|
|||
jobs:
|
||||
build-and-push-to-fit2cloud-registry:
|
||||
if: ${{ contains(github.event.inputs.registry, 'fit2cloud') }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check Disk Space
|
||||
run: df -h
|
||||
|
|
@ -52,10 +52,6 @@ jobs:
|
|||
swap-storage: true
|
||||
- name: Check Disk Space
|
||||
run: df -h
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 8
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
|
@ -68,24 +64,17 @@ jobs:
|
|||
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
|
||||
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
|
||||
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
|
||||
else
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
|
||||
fi
|
||||
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
|
||||
${DOCKER_IMAGE_TAGS} .
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
# Until https://github.com/tonistiigi/binfmt/issues/215
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
buildkitd-config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 1
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
|
|
@ -100,11 +89,12 @@ jobs:
|
|||
password: ${{ secrets.FIT2CLOUD_REGISTRY_PASSWORD }}
|
||||
- name: Docker Buildx (build-and-push)
|
||||
run: |
|
||||
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
|
||||
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile
|
||||
|
||||
build-and-push-to-dockerhub:
|
||||
if: ${{ contains(github.event.inputs.registry, 'dockerhub') }}
|
||||
runs-on: ubuntu-22.04
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check Disk Space
|
||||
run: df -h
|
||||
|
|
@ -120,10 +110,6 @@ jobs:
|
|||
swap-storage: true
|
||||
- name: Check Disk Space
|
||||
run: df -h
|
||||
- name: Set Swap Space
|
||||
uses: pierotofy/set-swap-space@master
|
||||
with:
|
||||
swap-size-gb: 8
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
|
|
@ -136,24 +122,17 @@ jobs:
|
|||
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
|
||||
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
|
||||
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
|
||||
else
|
||||
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
|
||||
fi
|
||||
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
|
||||
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
|
||||
${DOCKER_IMAGE_TAGS} .
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
with:
|
||||
# Until https://github.com/tonistiigi/binfmt/issues/215
|
||||
image: tonistiigi/binfmt:qemu-v7.0.0-28
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
buildkitd-config-inline: |
|
||||
[worker.oci]
|
||||
max-parallelism = 1
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
|
|
@ -167,4 +146,5 @@ jobs:
|
|||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Docker Buildx (build-and-push)
|
||||
run: |
|
||||
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
|
||||
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile
|
||||
|
|
|
|||
11
README.md
11
README.md
|
|
@ -1,5 +1,6 @@
|
|||
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
|
||||
<h3 align="center">Ready-to-use AI Chatbot</h3>
|
||||
<h3 align="center">Open-source platform for building enterprise-grade agents</h3>
|
||||
<h3 align="center">强大易用的企业级智能体平台</h3>
|
||||
<p align="center"><a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a></p>
|
||||
<p align="center">
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
|
||||
|
|
@ -10,10 +11,10 @@
|
|||
</p>
|
||||
<hr/>
|
||||
|
||||
MaxKB = Max Knowledge Base, it is a ready-to-use AI chatbot that integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
|
||||
MaxKB = Max Knowledge Brain, it is an open-source platform for building enterprise-grade agents. MaxKB integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
|
||||
|
||||
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization, and RAG (Retrieval-Augmented Generation). This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
|
||||
- **Flexible Orchestration**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
|
||||
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
|
||||
- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
|
||||
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
|
||||
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
|
||||
- **Multi Modal**: Native support for input and output text, image, audio and video.
|
||||
|
|
@ -55,8 +56,6 @@ Access MaxKB web interface at `http://your_server_ip:8080` with default admin cr
|
|||
|
||||
## Feature Comparison
|
||||
|
||||
MaxKB is positioned as an Ready-to-use RAG (Retrieval-Augmented Generation) intelligent Q&A application, rather than a middleware platform for building large model applications. The following table is merely a comparison from a functional perspective.
|
||||
|
||||
<table style="width: 100%;">
|
||||
<tr>
|
||||
<th align="center">Feature</th>
|
||||
|
|
|
|||
20
README_CN.md
20
README_CN.md
|
|
@ -1,25 +1,25 @@
|
|||
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
|
||||
<h3 align="center">基于大模型和 RAG 的知识库问答系统</h3>
|
||||
<h4 align="center">Ready-to-use, flexible RAG Chatbot</h4>
|
||||
<h3 align="center">强大易用的企业级智能体平台</h3>
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: auto;" /></a>
|
||||
<a href="https://market.aliyun.com/products/53690006/cmjj00067609.html?userCode=kmemb8jp" target="_blank"><img src="https://img.alicdn.com/imgextra/i2/O1CN01H5JIwY1rZ0OobDjnJ_!!6000000005644-2-tps-1000-216.png" alt="1Panel-dev%2FMaxKB | Aliyun" style="width: 250px; height: auto;" /></a>
|
||||
</p>
|
||||
<p align="center">
|
||||
<a href="README_EN.md"><img src="https://img.shields.io/badge/English_README-blue" alt="English README"></a>
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb" alt="License: GPL v3"></a>
|
||||
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?style=flat-square" alt="Stars"></a>
|
||||
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
|
||||
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?style=flat-square" alt="Stars"></a>
|
||||
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
|
||||
<a href="https://gitee.com/fit2cloud-feizhiyun/MaxKB"><img src="https://gitee.com/fit2cloud-feizhiyun/MaxKB/badge/star.svg?theme=gvp" alt="Gitee Stars"></a>
|
||||
<a href="https://gitcode.com/feizhiyun/MaxKB"><img src="https://gitcode.com/feizhiyun/MaxKB/star/badge.svg" alt="GitCode Stars"></a>
|
||||
</p>
|
||||
<hr/>
|
||||
|
||||
MaxKB = Max Knowledge Base,是一款开箱即用的 RAG Chatbot,具备强大的工作流和 MCP 工具调用能力。它支持对接各种主流大语言模型(LLMs),广泛应用于智能客服、企业内部知识库、学术研究与教育等场景。
|
||||
MaxKB = Max Knowledge Brain,是一款强大易用的企业级智能体平台,支持 RAG 检索增强生成、工作流编排、MCP 工具调用能力。MaxKB 支持对接各种主流大语言模型,广泛应用于智能客服、企业内部知识库问答、员工助手、学术研究与教育等场景。
|
||||
|
||||
- **开箱即用**:支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化和 RAG(检索增强生成),有效减少大模型幻觉,智能问答交互体验好;
|
||||
- **模型中立**:支持对接各种大模型,包括本地私有大模型(DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等)和国外公共大模型(OpenAI / Claude / Gemini 等);
|
||||
- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果;
|
||||
- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求;
|
||||
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度。
|
||||
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度;
|
||||
- **模型中立**:支持对接各种大模型,包括本地私有大模型(DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等)和国外公共大模型(OpenAI / Claude / Gemini 等)。
|
||||
|
||||
MaxKB 三分钟视频介绍:https://www.bilibili.com/video/BV18JypYeEkj/
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,7 @@ tool_message_template = """
|
|||
|
||||
"""
|
||||
|
||||
|
||||
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
|
||||
reasoning_content: str):
|
||||
chat_model = node_variable.get('chat_model')
|
||||
|
|
@ -102,7 +103,6 @@ def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INo
|
|||
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
|
||||
|
||||
|
||||
|
||||
async def _yield_mcp_response(chat_model, message_list, mcp_servers):
|
||||
async with MultiServerMCPClient(json.loads(mcp_servers)) as client:
|
||||
agent = create_react_agent(chat_model, client.get_tools())
|
||||
|
|
@ -115,6 +115,7 @@ async def _yield_mcp_response(chat_model, message_list, mcp_servers):
|
|||
if isinstance(chunk[0], AIMessageChunk):
|
||||
yield chunk[0]
|
||||
|
||||
|
||||
def mcp_response_generator(chat_model, message_list, mcp_servers):
|
||||
loop = asyncio.new_event_loop()
|
||||
try:
|
||||
|
|
@ -130,6 +131,7 @@ def mcp_response_generator(chat_model, message_list, mcp_servers):
|
|||
finally:
|
||||
loop.close()
|
||||
|
||||
|
||||
async def anext_async(agen):
|
||||
return await agen.__anext__()
|
||||
|
||||
|
|
@ -186,7 +188,8 @@ class BaseChatNode(IChatNode):
|
|||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||
model_params_setting=None,
|
||||
|
|
@ -216,7 +219,7 @@ class BaseChatNode(IChatNode):
|
|||
message_list = self.generate_message_list(system, prompt, history_message)
|
||||
self.context['message_list'] = message_list
|
||||
|
||||
if mcp_enable and mcp_servers is not None:
|
||||
if mcp_enable and mcp_servers is not None and '"stdio"' not in mcp_servers:
|
||||
r = mcp_response_generator(chat_model, message_list, mcp_servers)
|
||||
return NodeResult(
|
||||
{'result': r, 'chat_model': chat_model, 'message_list': message_list,
|
||||
|
|
|
|||
|
|
@ -168,7 +168,8 @@ class BaseApplicationNode(IApplicationNode):
|
|||
self.context['question'] = details.get('question')
|
||||
self.context['type'] = details.get('type')
|
||||
self.context['reasoning_content'] = details.get('reasoning_content')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
|
||||
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
|
||||
|
|
@ -178,7 +179,8 @@ class BaseApplicationNode(IApplicationNode):
|
|||
current_chat_id = string_to_uuid(chat_id + application_id)
|
||||
Chat.objects.get_or_create(id=current_chat_id, defaults={
|
||||
'application_id': application_id,
|
||||
'abstract': message[0:1024]
|
||||
'abstract': message[0:1024],
|
||||
'client_id': client_id,
|
||||
})
|
||||
if app_document_list is None:
|
||||
app_document_list = []
|
||||
|
|
|
|||
|
|
@ -15,7 +15,9 @@ from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
|
|||
class BaseReplyNode(IReplyNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
|
||||
if reply_type == 'referencing':
|
||||
result = self.get_reference_content(fields)
|
||||
|
|
|
|||
|
|
@ -38,7 +38,8 @@ class BaseFormNode(IFormNode):
|
|||
self.context['start_time'] = details.get('start_time')
|
||||
self.context['form_data'] = form_data
|
||||
self.context['is_submit'] = details.get('is_submit')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('result')
|
||||
if form_data is not None:
|
||||
for key in form_data:
|
||||
self.context[key] = form_data[key]
|
||||
|
|
@ -70,7 +71,7 @@ class BaseFormNode(IFormNode):
|
|||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
'form_data': self.context.get('form_data', {}),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting,ensure_ascii=False)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
|
|
@ -85,7 +86,7 @@ class BaseFormNode(IFormNode):
|
|||
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
|
||||
'form_data': self.context.get('form_data', {}),
|
||||
"is_submit": self.context.get("is_submit", False)}
|
||||
form = f'<form_rander>{json.dumps(form_setting,ensure_ascii=False)}</form_rander>'
|
||||
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
|
||||
context = self.workflow_manage.get_workflow_content()
|
||||
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
|
||||
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ def valid_reference_value(_type, value, name):
|
|||
|
||||
|
||||
def convert_value(name: str, value, _type, is_required, source, node):
|
||||
if not is_required and value is None:
|
||||
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
|
||||
return None
|
||||
if not is_required and source == 'reference' and (value is None or len(value) == 0):
|
||||
return None
|
||||
|
|
@ -113,7 +113,8 @@ def valid_function(function_lib, user_id):
|
|||
class BaseFunctionLibNodeNode(IFunctionLibNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.answer_text = str(details.get('result'))
|
||||
if self.node_params.get('is_result'):
|
||||
self.answer_text = str(details.get('result'))
|
||||
|
||||
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
|
||||
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ def valid_reference_value(_type, value, name):
|
|||
|
||||
|
||||
def convert_value(name: str, value, _type, is_required, source, node):
|
||||
if not is_required and value is None:
|
||||
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
|
||||
return None
|
||||
if source == 'reference':
|
||||
value = node.workflow_manage.get_reference_field(
|
||||
|
|
@ -84,7 +84,8 @@ def convert_value(name: str, value, _type, is_required, source, node):
|
|||
class BaseFunctionNodeNode(IFunctionNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['result'] = details.get('result')
|
||||
self.answer_text = str(details.get('result'))
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = str(details.get('result'))
|
||||
|
||||
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
|
||||
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
|
||||
|
|
|
|||
|
|
@ -16,7 +16,8 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
|
||||
model_params_setting,
|
||||
|
|
@ -24,7 +25,8 @@ class BaseImageGenerateNode(IImageGenerateNode):
|
|||
**kwargs) -> NodeResult:
|
||||
print(model_params_setting)
|
||||
application = self.workflow_manage.work_flow_post_handler.chat_info.application
|
||||
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting)
|
||||
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
|
||||
**model_params_setting)
|
||||
history_message = self.get_history_message(history_chat_record, dialogue_number)
|
||||
self.context['history_message'] = history_message
|
||||
question = self.generate_prompt_question(prompt)
|
||||
|
|
|
|||
|
|
@ -69,7 +69,8 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
|
|||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.context['question'] = details.get('question')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
|
||||
model_params_setting,
|
||||
|
|
|
|||
|
|
@ -14,7 +14,8 @@ class BaseMcpNode(IMcpNode):
|
|||
self.context['result'] = details.get('result')
|
||||
self.context['tool_params'] = details.get('tool_params')
|
||||
self.context['mcp_tool'] = details.get('mcp_tool')
|
||||
self.answer_text = details.get('result')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('result')
|
||||
|
||||
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
|
||||
servers = json.loads(mcp_servers)
|
||||
|
|
@ -27,7 +28,8 @@ class BaseMcpNode(IMcpNode):
|
|||
return s
|
||||
|
||||
res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params))
|
||||
return NodeResult({'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
|
||||
return NodeResult(
|
||||
{'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
|
||||
|
||||
def handle_variables(self, tool_params):
|
||||
# 处理参数中的变量
|
||||
|
|
|
|||
|
|
@ -80,7 +80,8 @@ class BaseQuestionNode(IQuestionNode):
|
|||
self.context['answer'] = details.get('answer')
|
||||
self.context['message_tokens'] = details.get('message_tokens')
|
||||
self.context['answer_tokens'] = details.get('answer_tokens')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
|
||||
model_params_setting=None,
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ class BaseSpeechToTextNode(ISpeechToTextNode):
|
|||
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, stt_model_id, chat_id, audio, **kwargs) -> NodeResult:
|
||||
stt_model = get_model_instance_by_model_user_id(stt_model_id, self.flow_params_serializer.data.get('user_id'))
|
||||
|
|
|
|||
|
|
@ -40,10 +40,13 @@ class BaseStartStepNode(IStarNode):
|
|||
self.context['document'] = details.get('document_list')
|
||||
self.context['image'] = details.get('image_list')
|
||||
self.context['audio'] = details.get('audio_list')
|
||||
self.context['other'] = details.get('other_list')
|
||||
self.status = details.get('status')
|
||||
self.err_message = details.get('err_message')
|
||||
for key, value in workflow_variable.items():
|
||||
workflow_manage.context[key] = value
|
||||
for item in details.get('global_fields', []):
|
||||
workflow_manage.context[item.get('key')] = item.get('value')
|
||||
|
||||
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
|
||||
pass
|
||||
|
|
@ -59,7 +62,8 @@ class BaseStartStepNode(IStarNode):
|
|||
'question': question,
|
||||
'image': self.workflow_manage.image_list,
|
||||
'document': self.workflow_manage.document_list,
|
||||
'audio': self.workflow_manage.audio_list
|
||||
'audio': self.workflow_manage.audio_list,
|
||||
'other': self.workflow_manage.other_list,
|
||||
}
|
||||
return NodeResult(node_variable, workflow_variable)
|
||||
|
||||
|
|
@ -83,5 +87,6 @@ class BaseStartStepNode(IStarNode):
|
|||
'image_list': self.context.get('image'),
|
||||
'document_list': self.context.get('document'),
|
||||
'audio_list': self.context.get('audio'),
|
||||
'other_list': self.context.get('other'),
|
||||
'global_fields': global_fields
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,7 +37,8 @@ def bytes_to_uploaded_file(file_bytes, file_name="generated_audio.mp3"):
|
|||
class BaseTextToSpeechNode(ITextToSpeechNode):
|
||||
def save_context(self, details, workflow_manage):
|
||||
self.context['answer'] = details.get('answer')
|
||||
self.answer_text = details.get('answer')
|
||||
if self.node_params.get('is_result', False):
|
||||
self.answer_text = details.get('answer')
|
||||
|
||||
def execute(self, tts_model_id, chat_id,
|
||||
content, model_params_setting=None,
|
||||
|
|
|
|||
|
|
@ -238,6 +238,7 @@ class WorkflowManage:
|
|||
base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None,
|
||||
document_list=None,
|
||||
audio_list=None,
|
||||
other_list=None,
|
||||
start_node_id=None,
|
||||
start_node_data=None, chat_record=None, child_node=None):
|
||||
if form_data is None:
|
||||
|
|
@ -248,12 +249,15 @@ class WorkflowManage:
|
|||
document_list = []
|
||||
if audio_list is None:
|
||||
audio_list = []
|
||||
if other_list is None:
|
||||
other_list = []
|
||||
self.start_node_id = start_node_id
|
||||
self.start_node = None
|
||||
self.form_data = form_data
|
||||
self.image_list = image_list
|
||||
self.document_list = document_list
|
||||
self.audio_list = audio_list
|
||||
self.other_list = other_list
|
||||
self.params = params
|
||||
self.flow = flow
|
||||
self.context = {}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import uuid
|
|||
from django.contrib.postgres.fields import ArrayField
|
||||
from django.db import models
|
||||
from langchain.schema import HumanMessage, AIMessage
|
||||
|
||||
from django.utils.translation import gettext as _
|
||||
from common.encoder.encoder import SystemEncoder
|
||||
from common.mixins.app_model_mixin import AppModelMixin
|
||||
from dataset.models.data_set import DataSet
|
||||
|
|
@ -167,7 +167,11 @@ class ChatRecord(AppModelMixin):
|
|||
return HumanMessage(content=self.problem_text)
|
||||
|
||||
def get_ai_message(self):
|
||||
return AIMessage(content=self.answer_text)
|
||||
answer_text = self.answer_text
|
||||
if answer_text is None or len(str(answer_text).strip()) == 0:
|
||||
answer_text = _(
|
||||
'Sorry, no relevant content was found. Please re-describe your problem or provide more information. ')
|
||||
return AIMessage(content=answer_text)
|
||||
|
||||
def get_node_details_runtime_node_id(self, runtime_node_id):
|
||||
return self.details.get(runtime_node_id, None)
|
||||
|
|
|
|||
|
|
@ -148,10 +148,12 @@ class ModelSettingSerializer(serializers.Serializer):
|
|||
error_messages=ErrMessage.char(_("Thinking process switch")))
|
||||
reasoning_content_start = serializers.CharField(required=False, allow_null=True, default="<think>",
|
||||
allow_blank=True, max_length=256,
|
||||
trim_whitespace=False,
|
||||
error_messages=ErrMessage.char(
|
||||
_("The thinking process begins to mark")))
|
||||
reasoning_content_end = serializers.CharField(required=False, allow_null=True, allow_blank=True, default="</think>",
|
||||
max_length=256,
|
||||
trim_whitespace=False,
|
||||
error_messages=ErrMessage.char(_("End of thinking process marker")))
|
||||
|
||||
|
||||
|
|
@ -162,7 +164,7 @@ class ApplicationWorkflowSerializer(serializers.Serializer):
|
|||
max_length=256, min_length=1,
|
||||
error_messages=ErrMessage.char(_("Application Description")))
|
||||
work_flow = serializers.DictField(required=False, error_messages=ErrMessage.dict(_("Workflow Objects")))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char(_("Opening remarks")))
|
||||
|
||||
@staticmethod
|
||||
|
|
@ -225,7 +227,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
min_value=0,
|
||||
max_value=1024,
|
||||
error_messages=ErrMessage.integer(_("Historical chat records")))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char(_("Opening remarks")))
|
||||
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
|
||||
allow_null=True,
|
||||
|
|
@ -493,7 +495,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
min_value=0,
|
||||
max_value=1024,
|
||||
error_messages=ErrMessage.integer(_("Historical chat records")))
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=4096,
|
||||
prologue = serializers.CharField(required=False, allow_null=True, allow_blank=True, max_length=102400,
|
||||
error_messages=ErrMessage.char(_("Opening remarks")))
|
||||
dataset_id_list = serializers.ListSerializer(required=False, child=serializers.UUIDField(required=True),
|
||||
error_messages=ErrMessage.list(_("Related Knowledge Base"))
|
||||
|
|
@ -1010,7 +1012,8 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
'stt_autosend': application.stt_autosend,
|
||||
'file_upload_enable': application.file_upload_enable,
|
||||
'file_upload_setting': application.file_upload_setting,
|
||||
'work_flow': application.work_flow,
|
||||
'work_flow': {'nodes': [node for node in ((application.work_flow or {}).get('nodes', []) or []) if
|
||||
node.get('id') == 'base-node']},
|
||||
'show_source': application_access_token.show_source,
|
||||
'language': application_access_token.language,
|
||||
**application_setting_dict})
|
||||
|
|
@ -1071,6 +1074,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
for update_key in update_keys:
|
||||
if update_key in instance and instance.get(update_key) is not None:
|
||||
application.__setattr__(update_key, instance.get(update_key))
|
||||
print(application.name)
|
||||
application.save()
|
||||
|
||||
if 'dataset_id_list' in instance:
|
||||
|
|
@ -1089,6 +1093,7 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
chat_cache.clear_by_application_id(application_id)
|
||||
application_access_token = QuerySet(ApplicationAccessToken).filter(application_id=application_id).first()
|
||||
# 更新缓存数据
|
||||
print(application.name)
|
||||
get_application_access_token(application_access_token.access_token, False)
|
||||
return self.one(with_valid=False)
|
||||
|
||||
|
|
@ -1141,6 +1146,8 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
instance['file_upload_enable'] = node_data['file_upload_enable']
|
||||
if 'file_upload_setting' in node_data:
|
||||
instance['file_upload_setting'] = node_data['file_upload_setting']
|
||||
if 'name' in node_data:
|
||||
instance['name'] = node_data['name']
|
||||
break
|
||||
|
||||
def speech_to_text(self, file, with_valid=True):
|
||||
|
|
@ -1318,6 +1325,8 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
def get_mcp_servers(self, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
if '"stdio"' in self.data.get('mcp_servers'):
|
||||
raise AppApiException(500, _('stdio is not supported'))
|
||||
servers = json.loads(self.data.get('mcp_servers'))
|
||||
|
||||
async def get_mcp_tools(servers):
|
||||
|
|
|
|||
|
|
@ -213,12 +213,21 @@ class OpenAIChatSerializer(serializers.Serializer):
|
|||
return instance.get('messages')[-1].get('content')
|
||||
|
||||
@staticmethod
|
||||
def generate_chat(chat_id, application_id, message, client_id):
|
||||
def generate_chat(chat_id, application_id, message, client_id, asker=None):
|
||||
if chat_id is None:
|
||||
chat_id = str(uuid.uuid1())
|
||||
chat = QuerySet(Chat).filter(id=chat_id).first()
|
||||
if chat is None:
|
||||
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id).save()
|
||||
asker_dict = {'user_name': '游客'}
|
||||
if asker is not None:
|
||||
if isinstance(asker, str):
|
||||
asker_dict = {
|
||||
'user_name': asker
|
||||
}
|
||||
elif isinstance(asker, dict):
|
||||
asker_dict = asker
|
||||
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id,
|
||||
asker=asker_dict).save()
|
||||
return chat_id
|
||||
|
||||
def chat(self, instance: Dict, with_valid=True):
|
||||
|
|
@ -232,7 +241,8 @@ class OpenAIChatSerializer(serializers.Serializer):
|
|||
application_id = self.data.get('application_id')
|
||||
client_id = self.data.get('client_id')
|
||||
client_type = self.data.get('client_type')
|
||||
chat_id = self.generate_chat(chat_id, application_id, message, client_id)
|
||||
chat_id = self.generate_chat(chat_id, application_id, message, client_id,
|
||||
asker=instance.get('form_data', {}).get("asker"))
|
||||
return ChatMessageSerializer(
|
||||
data={
|
||||
'chat_id': chat_id, 'message': message,
|
||||
|
|
@ -245,6 +255,7 @@ class OpenAIChatSerializer(serializers.Serializer):
|
|||
'image_list': instance.get('image_list', []),
|
||||
'document_list': instance.get('document_list', []),
|
||||
'audio_list': instance.get('audio_list', []),
|
||||
'other_list': instance.get('other_list', []),
|
||||
}
|
||||
).chat(base_to_response=OpenaiToResponse())
|
||||
|
||||
|
|
@ -274,6 +285,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
|
||||
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
|
||||
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
|
||||
other_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Other")))
|
||||
child_node = serializers.DictField(required=False, allow_null=True,
|
||||
error_messages=ErrMessage.dict(_("Child Nodes")))
|
||||
|
||||
|
|
@ -372,6 +384,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
image_list = self.data.get('image_list')
|
||||
document_list = self.data.get('document_list')
|
||||
audio_list = self.data.get('audio_list')
|
||||
other_list = self.data.get('other_list')
|
||||
user_id = chat_info.application.user_id
|
||||
chat_record_id = self.data.get('chat_record_id')
|
||||
chat_record = None
|
||||
|
|
@ -388,7 +401,7 @@ class ChatMessageSerializer(serializers.Serializer):
|
|||
'client_id': client_id,
|
||||
'client_type': client_type,
|
||||
'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type),
|
||||
base_to_response, form_data, image_list, document_list, audio_list,
|
||||
base_to_response, form_data, image_list, document_list, audio_list, other_list,
|
||||
self.data.get('runtime_node_id'),
|
||||
self.data.get('node_data'), chat_record, self.data.get('child_node'))
|
||||
r = work_flow_manage.run()
|
||||
|
|
|
|||
|
|
@ -174,7 +174,14 @@ class ChatSerializers(serializers.Serializer):
|
|||
condition = base_condition & min_trample_query
|
||||
else:
|
||||
condition = base_condition
|
||||
return query_set.filter(condition).order_by("-application_chat.update_time")
|
||||
inner_queryset = QuerySet(Chat).filter(application_id=self.data.get("application_id"))
|
||||
if 'abstract' in self.data and self.data.get('abstract') is not None:
|
||||
inner_queryset = inner_queryset.filter(abstract__icontains=self.data.get('abstract'))
|
||||
|
||||
return {
|
||||
'inner_queryset': inner_queryset,
|
||||
'default_queryset': query_set.filter(condition).order_by("-application_chat.update_time")
|
||||
}
|
||||
|
||||
def list(self, with_valid=True):
|
||||
if with_valid:
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ FROM
|
|||
chat_id
|
||||
FROM
|
||||
application_chat_record
|
||||
WHERE chat_id IN (
|
||||
SELECT id FROM application_chat ${inner_queryset})
|
||||
GROUP BY
|
||||
application_chat_record.chat_id
|
||||
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
|
||||
|
|
@ -35,4 +37,5 @@ FROM
|
|||
END as improve_paragraph_list
|
||||
FROM
|
||||
application_chat_record application_chat_record
|
||||
) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id"
|
||||
) application_chat_record_temp ON application_chat_record_temp.chat_id = application_chat."id"
|
||||
${default_queryset}
|
||||
|
|
@ -11,6 +11,9 @@ FROM
|
|||
chat_id
|
||||
FROM
|
||||
application_chat_record
|
||||
WHERE chat_id IN (
|
||||
SELECT id FROM application_chat ${inner_queryset})
|
||||
GROUP BY
|
||||
application_chat_record.chat_id
|
||||
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
|
||||
) chat_record_temp ON application_chat."id" = chat_record_temp.chat_id
|
||||
${default_queryset}
|
||||
|
|
@ -38,6 +38,15 @@ class ApplicationApi(ApiMixin):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_("Application authentication token"),
|
||||
description=_("Application authentication token"),
|
||||
default="token"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
|
|
@ -133,6 +142,27 @@ class ApplicationApi(ApiMixin):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'secret_key': openapi.Schema(type=openapi.TYPE_STRING, title=_("Secret key"),
|
||||
description=_("Secret key")),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
|
||||
description=_("Is activation")),
|
||||
'application_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application ID"),
|
||||
description=_("Application ID")),
|
||||
'allow_cross_domain': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Is cross-domain allowed"),
|
||||
description=_("Is cross-domain allowed")),
|
||||
'cross_domain_list': openapi.Schema(type=openapi.TYPE_ARRAY, title=_('Cross-domain list'),
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING))
|
||||
}
|
||||
)
|
||||
|
||||
class AccessToken(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
|
|
@ -171,6 +201,37 @@ class ApplicationApi(ApiMixin):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=[],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'access_token': openapi.Schema(type=openapi.TYPE_STRING, title=_("Access Token"),
|
||||
description=_("Access Token")),
|
||||
'access_token_reset': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Reset Token"),
|
||||
description=_("Reset Token")),
|
||||
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Is activation"),
|
||||
description=_("Is activation")),
|
||||
'access_num': openapi.Schema(type=openapi.TYPE_NUMBER, title=_("Number of visits"),
|
||||
description=_("Number of visits")),
|
||||
'white_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Whether to enable whitelist"),
|
||||
description=_("Whether to enable whitelist")),
|
||||
'white_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING), title=_("Whitelist"),
|
||||
description=_("Whitelist")),
|
||||
'show_source': openapi.Schema(type=openapi.TYPE_BOOLEAN,
|
||||
title=_("Whether to display knowledge sources"),
|
||||
description=_("Whether to display knowledge sources")),
|
||||
'language': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_("language"),
|
||||
description=_("language"))
|
||||
}
|
||||
)
|
||||
|
||||
class Edit(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_body_api():
|
||||
|
|
@ -367,6 +428,56 @@ class ApplicationApi(ApiMixin):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=['id', 'name', 'desc', 'model_id', 'dialogue_number', 'dataset_setting', 'model_setting',
|
||||
'problem_optimization', 'stt_model_enable', 'stt_model_enable', 'tts_type',
|
||||
'work_flow'],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Primary key id"),
|
||||
description=_("Primary key id")),
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Name"),
|
||||
description=_("Application Name")),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Description"),
|
||||
description=_("Application Description")),
|
||||
'model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Model id"),
|
||||
description=_("Model id")),
|
||||
"dialogue_number": openapi.Schema(type=openapi.TYPE_NUMBER,
|
||||
title=_("Number of multi-round conversations"),
|
||||
description=_("Number of multi-round conversations")),
|
||||
'prologue': openapi.Schema(type=openapi.TYPE_STRING, title=_("Opening remarks"),
|
||||
description=_("Opening remarks")),
|
||||
'dataset_id_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title=_("List of associated knowledge base IDs"),
|
||||
description=_("List of associated knowledge base IDs")),
|
||||
'dataset_setting': ApplicationApi.DatasetSetting.get_request_body_api(),
|
||||
'model_setting': ApplicationApi.ModelSetting.get_request_body_api(),
|
||||
'problem_optimization': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_("Problem Optimization"),
|
||||
description=_("Problem Optimization"), default=True),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Application Type"),
|
||||
description=_("Application Type SIMPLE | WORK_FLOW")),
|
||||
'problem_optimization_prompt': openapi.Schema(type=openapi.TYPE_STRING,
|
||||
title=_('Question optimization tips'),
|
||||
description=_("Question optimization tips"),
|
||||
default=_(
|
||||
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")),
|
||||
'tts_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech model ID"),
|
||||
description=_("Text-to-speech model ID")),
|
||||
'stt_model_id': openapi.Schema(type=openapi.TYPE_STRING, title=_("Speech-to-text model id"),
|
||||
description=_("Speech-to-text model id")),
|
||||
'stt_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is speech-to-text enabled"),
|
||||
description=_("Is speech-to-text enabled")),
|
||||
'tts_model_enable': openapi.Schema(type=openapi.TYPE_STRING, title=_("Is text-to-speech enabled"),
|
||||
description=_("Is text-to-speech enabled")),
|
||||
'tts_type': openapi.Schema(type=openapi.TYPE_STRING, title=_("Text-to-speech type"),
|
||||
description=_("Text-to-speech type")),
|
||||
'work_flow': ApplicationApi.WorkFlow.get_request_body_api(),
|
||||
}
|
||||
)
|
||||
|
||||
class Query(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
|
|
|
|||
|
|
@ -319,6 +319,15 @@ class ChatApi(ApiMixin):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_("Conversation ID"),
|
||||
description=_("Conversation ID"),
|
||||
default="chat_id"
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
return [openapi.Parameter(name='application_id',
|
||||
|
|
|
|||
|
|
@ -373,7 +373,8 @@ class Application(APIView):
|
|||
operation_id=_("Modify application API_KEY"),
|
||||
tags=[_('Application/API_KEY')],
|
||||
manual_parameters=ApplicationApi.ApiKey.Operate.get_request_params_api(),
|
||||
request_body=ApplicationApi.ApiKey.Operate.get_request_body_api())
|
||||
request_body=ApplicationApi.ApiKey.Operate.get_request_body_api(),
|
||||
responses=result.get_api_response(ApplicationApi.ApiKey.Operate.get_response_body_api()))
|
||||
@has_permissions(ViewPermission(
|
||||
[RoleConstants.ADMIN, RoleConstants.USER],
|
||||
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
|
||||
|
|
@ -415,7 +416,8 @@ class Application(APIView):
|
|||
operation_id=_("Modify Application AccessToken"),
|
||||
tags=[_('Application/Public Access')],
|
||||
manual_parameters=ApplicationApi.AccessToken.get_request_params_api(),
|
||||
request_body=ApplicationApi.AccessToken.get_request_body_api())
|
||||
request_body=ApplicationApi.AccessToken.get_request_body_api(),
|
||||
responses=result.get_api_response(ApplicationApi.AccessToken.get_response_body_api()))
|
||||
@has_permissions(ViewPermission(
|
||||
[RoleConstants.ADMIN, RoleConstants.USER],
|
||||
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.MANAGE,
|
||||
|
|
@ -455,6 +457,7 @@ class Application(APIView):
|
|||
@swagger_auto_schema(operation_summary=_("Application Certification"),
|
||||
operation_id=_("Application Certification"),
|
||||
request_body=ApplicationApi.Authentication.get_request_body_api(),
|
||||
responses=result.get_api_response(ApplicationApi.Authentication.get_response_body_api()),
|
||||
tags=[_("Application/Certification")],
|
||||
security=[])
|
||||
def post(self, request: Request):
|
||||
|
|
@ -472,6 +475,7 @@ class Application(APIView):
|
|||
@swagger_auto_schema(operation_summary=_("Create an application"),
|
||||
operation_id=_("Create an application"),
|
||||
request_body=ApplicationApi.Create.get_request_body_api(),
|
||||
responses=result.get_api_response(ApplicationApi.Create.get_response_body_api()),
|
||||
tags=[_('Application')])
|
||||
@has_permissions(PermissionConstants.APPLICATION_CREATE, compare=CompareConstants.AND)
|
||||
@log(menu='Application', operate="Create an application",
|
||||
|
|
|
|||
|
|
@ -94,6 +94,7 @@ class ChatView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_("Get the workflow temporary session id"),
|
||||
operation_id=_("Get the workflow temporary session id"),
|
||||
request_body=ChatApi.OpenWorkFlowTemp.get_request_body_api(),
|
||||
responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
|
||||
tags=[_("Application/Chat")])
|
||||
def post(self, request: Request):
|
||||
return result.success(ChatSerializers.OpenWorkFlowChat(
|
||||
|
|
@ -106,6 +107,7 @@ class ChatView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_("Get a temporary session id"),
|
||||
operation_id=_("Get a temporary session id"),
|
||||
request_body=ChatApi.OpenTempChat.get_request_body_api(),
|
||||
responses=result.get_api_response(ChatApi.OpenTempChat.get_response_body_api()),
|
||||
tags=[_("Application/Chat")])
|
||||
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
|
||||
def post(self, request: Request):
|
||||
|
|
@ -144,6 +146,8 @@ class ChatView(APIView):
|
|||
'document_list') if 'document_list' in request.data else [],
|
||||
'audio_list': request.data.get(
|
||||
'audio_list') if 'audio_list' in request.data else [],
|
||||
'other_list': request.data.get(
|
||||
'other_list') if 'other_list' in request.data else [],
|
||||
'client_type': request.auth.client_type,
|
||||
'node_id': request.data.get('node_id', None),
|
||||
'runtime_node_id': request.data.get('runtime_node_id', None),
|
||||
|
|
@ -237,9 +241,10 @@ class ChatView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_("Client modifies dialogue summary"),
|
||||
operation_id=_("Client modifies dialogue summary"),
|
||||
request_body=ChatClientHistoryApi.Operate.ReAbstract.get_request_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_("Application/Conversation Log")])
|
||||
@has_permissions(ViewPermission(
|
||||
[RoleConstants.APPLICATION_ACCESS_TOKEN],
|
||||
[RoleConstants.APPLICATION_ACCESS_TOKEN, RoleConstants.ADMIN, RoleConstants.USER],
|
||||
[lambda r, keywords: Permission(group=Group.APPLICATION, operate=Operate.USE,
|
||||
dynamic_tag=keywords.get('application_id'))],
|
||||
compare=CompareConstants.AND),
|
||||
|
|
@ -416,6 +421,7 @@ class ChatView(APIView):
|
|||
operation_id=_("Add to Knowledge Base"),
|
||||
manual_parameters=ImproveApi.get_request_params_api_post(),
|
||||
request_body=ImproveApi.get_request_body_api_post(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_("Application/Conversation Log/Add to Knowledge Base")]
|
||||
)
|
||||
@has_permissions(
|
||||
|
|
|
|||
|
|
@ -11,35 +11,50 @@ import time
|
|||
|
||||
from common.cache.mem_cache import MemCache
|
||||
|
||||
lock = threading.Lock()
|
||||
_lock = threading.Lock()
|
||||
locks = {}
|
||||
|
||||
|
||||
class ModelManage:
|
||||
cache = MemCache('model', {})
|
||||
up_clear_time = time.time()
|
||||
|
||||
@staticmethod
|
||||
def _get_lock(_id):
|
||||
lock = locks.get(_id)
|
||||
if lock is None:
|
||||
with _lock:
|
||||
lock = locks.get(_id)
|
||||
if lock is None:
|
||||
lock = threading.Lock()
|
||||
locks[_id] = lock
|
||||
|
||||
return lock
|
||||
|
||||
@staticmethod
|
||||
def get_model(_id, get_model):
|
||||
# 获取锁
|
||||
lock.acquire()
|
||||
try:
|
||||
model_instance = ModelManage.cache.get(_id)
|
||||
if model_instance is None or not model_instance.is_cache_model():
|
||||
model_instance = ModelManage.cache.get(_id)
|
||||
if model_instance is None:
|
||||
lock = ModelManage._get_lock(_id)
|
||||
with lock:
|
||||
model_instance = ModelManage.cache.get(_id)
|
||||
if model_instance is None:
|
||||
model_instance = get_model(_id)
|
||||
ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8)
|
||||
else:
|
||||
if model_instance.is_cache_model():
|
||||
ModelManage.cache.touch(_id, timeout=60 * 60 * 8)
|
||||
else:
|
||||
model_instance = get_model(_id)
|
||||
ModelManage.cache.set(_id, model_instance, timeout=60 * 30)
|
||||
return model_instance
|
||||
# 续期
|
||||
ModelManage.cache.touch(_id, timeout=60 * 30)
|
||||
ModelManage.clear_timeout_cache()
|
||||
return model_instance
|
||||
finally:
|
||||
# 释放锁
|
||||
lock.release()
|
||||
ModelManage.cache.set(_id, model_instance, timeout=60 * 60 * 8)
|
||||
ModelManage.clear_timeout_cache()
|
||||
return model_instance
|
||||
|
||||
@staticmethod
|
||||
def clear_timeout_cache():
|
||||
if time.time() - ModelManage.up_clear_time > 60:
|
||||
ModelManage.cache.clear_timeout_data()
|
||||
if time.time() - ModelManage.up_clear_time > 60 * 60:
|
||||
threading.Thread(target=lambda: ModelManage.cache.clear_timeout_data()).start()
|
||||
ModelManage.up_clear_time = time.time()
|
||||
|
||||
@staticmethod
|
||||
def delete_key(_id):
|
||||
|
|
|
|||
|
|
@ -238,11 +238,8 @@ class ListenerManagement:
|
|||
for key in params_dict:
|
||||
_value_ = params_dict[key]
|
||||
exec_sql = exec_sql.replace(key, str(_value_))
|
||||
lock.acquire()
|
||||
try:
|
||||
with lock:
|
||||
native_update(query_set, exec_sql)
|
||||
finally:
|
||||
lock.release()
|
||||
|
||||
@staticmethod
|
||||
def embedding_by_document(document_id, embedding_model: Embeddings, state_list=None):
|
||||
|
|
@ -272,7 +269,6 @@ class ListenerManagement:
|
|||
ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
|
||||
State.STARTED)
|
||||
|
||||
|
||||
# 根据段落进行向量化处理
|
||||
page_desc(QuerySet(Paragraph)
|
||||
.annotate(
|
||||
|
|
|
|||
|
|
@ -22,3 +22,4 @@ from .table_checkbox import *
|
|||
from .radio_card_field import *
|
||||
from .label import *
|
||||
from .slider_field import *
|
||||
from .switch_field import *
|
||||
|
|
|
|||
|
|
@ -28,6 +28,6 @@ class SwitchField(BaseField):
|
|||
@param props_info:
|
||||
"""
|
||||
|
||||
super().__init__('Switch', label, required, default_value, relation_show_field_dict,
|
||||
super().__init__('SwitchInput', label, required, default_value, relation_show_field_dict,
|
||||
{},
|
||||
TriggerType.OPTION_LIST, attrs, props_info)
|
||||
|
|
|
|||
|
|
@ -112,11 +112,7 @@ def get_image_id_func():
|
|||
|
||||
title_font_list = [
|
||||
[36, 100],
|
||||
[26, 36],
|
||||
[24, 26],
|
||||
[22, 24],
|
||||
[18, 22],
|
||||
[16, 18]
|
||||
[30, 36]
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -130,7 +126,7 @@ def get_title_level(paragraph: Paragraph):
|
|||
if len(paragraph.runs) == 1:
|
||||
font_size = paragraph.runs[0].font.size
|
||||
pt = font_size.pt
|
||||
if pt >= 16:
|
||||
if pt >= 30:
|
||||
for _value, index in zip(title_font_list, range(len(title_font_list))):
|
||||
if pt >= _value[0] and pt < _value[1]:
|
||||
return index + 1
|
||||
|
|
|
|||
|
|
@ -82,7 +82,10 @@ class XlsSplitHandle(BaseParseTableHandle):
|
|||
for row in data:
|
||||
# 将每个单元格中的内容替换换行符为 <br> 以保留原始格式
|
||||
md_table += '| ' + ' | '.join(
|
||||
[str(cell).replace('\n', '<br>') if cell else '' for cell in row]) + ' |\n'
|
||||
[str(cell)
|
||||
.replace('\r\n', '<br>')
|
||||
.replace('\n', '<br>')
|
||||
if cell else '' for cell in row]) + ' |\n'
|
||||
md_tables += md_table + '\n\n'
|
||||
|
||||
return md_tables
|
||||
|
|
|
|||
|
|
@ -19,36 +19,24 @@ class XlsxSplitHandle(BaseParseTableHandle):
|
|||
|
||||
def fill_merged_cells(self, sheet, image_dict):
|
||||
data = []
|
||||
|
||||
# 获取第一行作为标题行
|
||||
headers = []
|
||||
for idx, cell in enumerate(sheet[1]):
|
||||
if cell.value is None:
|
||||
headers.append(' ' * (idx + 1))
|
||||
else:
|
||||
headers.append(cell.value)
|
||||
|
||||
# 从第二行开始遍历每一行
|
||||
for row in sheet.iter_rows(min_row=2, values_only=False):
|
||||
row_data = {}
|
||||
for row in sheet.iter_rows(values_only=False):
|
||||
row_data = []
|
||||
for col_idx, cell in enumerate(row):
|
||||
cell_value = cell.value
|
||||
|
||||
# 如果单元格为空,并且该单元格在合并单元格内,获取合并单元格的值
|
||||
if cell_value is None:
|
||||
for merged_range in sheet.merged_cells.ranges:
|
||||
if cell.coordinate in merged_range:
|
||||
cell_value = sheet[merged_range.min_row][merged_range.min_col - 1].value
|
||||
break
|
||||
|
||||
image = image_dict.get(cell_value, None)
|
||||
if image is not None:
|
||||
cell_value = f''
|
||||
|
||||
# 使用标题作为键,单元格的值作为值存入字典
|
||||
row_data[headers[col_idx]] = cell_value
|
||||
row_data.insert(col_idx, cell_value)
|
||||
data.append(row_data)
|
||||
|
||||
for merged_range in sheet.merged_cells.ranges:
|
||||
cell_value = data[merged_range.min_row - 1][merged_range.min_col - 1]
|
||||
for row_index in range(merged_range.min_row, merged_range.max_row + 1):
|
||||
for col_index in range(merged_range.min_col, merged_range.max_col + 1):
|
||||
data[row_index - 1][col_index - 1] = cell_value
|
||||
return data
|
||||
|
||||
def handle(self, file, get_buffer, save_image):
|
||||
|
|
@ -65,11 +53,13 @@ class XlsxSplitHandle(BaseParseTableHandle):
|
|||
paragraphs = []
|
||||
ws = wb[sheetname]
|
||||
data = self.fill_merged_cells(ws, image_dict)
|
||||
|
||||
for row in data:
|
||||
row_output = "; ".join([f"{key}: {value}" for key, value in row.items()])
|
||||
# print(row_output)
|
||||
paragraphs.append({'title': '', 'content': row_output})
|
||||
if len(data) >= 2:
|
||||
head_list = data[0]
|
||||
for row_index in range(1, len(data)):
|
||||
row_output = "; ".join(
|
||||
[f"{head_list[col_index]}: {data[row_index][col_index]}" for col_index in
|
||||
range(0, len(data[row_index]))])
|
||||
paragraphs.append({'title': '', 'content': row_output})
|
||||
|
||||
result.append({'name': sheetname, 'paragraphs': paragraphs})
|
||||
|
||||
|
|
@ -78,7 +68,6 @@ class XlsxSplitHandle(BaseParseTableHandle):
|
|||
return [{'name': file.name, 'paragraphs': []}]
|
||||
return result
|
||||
|
||||
|
||||
def get_content(self, file, save_image):
|
||||
try:
|
||||
# 加载 Excel 文件
|
||||
|
|
@ -94,18 +83,18 @@ class XlsxSplitHandle(BaseParseTableHandle):
|
|||
# 如果未指定 sheet_name,则使用第一个工作表
|
||||
for sheetname in workbook.sheetnames:
|
||||
sheet = workbook[sheetname] if sheetname else workbook.active
|
||||
rows = self.fill_merged_cells(sheet, image_dict)
|
||||
if len(rows) == 0:
|
||||
data = self.fill_merged_cells(sheet, image_dict)
|
||||
if len(data) == 0:
|
||||
continue
|
||||
# 提取表头和内容
|
||||
|
||||
headers = [f"{key}" for key, value in rows[0].items()]
|
||||
headers = [f"{value}" for value in data[0]]
|
||||
|
||||
# 构建 Markdown 表格
|
||||
md_table = '| ' + ' | '.join(headers) + ' |\n'
|
||||
md_table += '| ' + ' | '.join(['---'] * len(headers)) + ' |\n'
|
||||
for row in rows:
|
||||
r = [f'{value}' for key, value in row.items()]
|
||||
for row_index in range(1, len(data)):
|
||||
r = [f'{value}' for value in data[row_index]]
|
||||
md_table += '| ' + ' | '.join(
|
||||
[str(cell).replace('\n', '<br>') if cell is not None else '' for cell in r]) + ' |\n'
|
||||
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ from common.handle.base_split_handle import BaseSplitHandle
|
|||
|
||||
|
||||
def post_cell(cell_value):
|
||||
return cell_value.replace('\n', '<br>').replace('|', '|')
|
||||
return cell_value.replace('\r\n', '<br>').replace('\n', '<br>').replace('|', '|')
|
||||
|
||||
|
||||
def row_to_md(row):
|
||||
|
|
|
|||
|
|
@ -24,12 +24,13 @@ class GunicornLocalModelService(BaseService):
|
|||
os.environ.setdefault('SERVER_NAME', 'local_model')
|
||||
log_format = '%(h)s %(t)s %(L)ss "%(r)s" %(s)s %(b)s '
|
||||
bind = f'{CONFIG.get("LOCAL_MODEL_HOST")}:{CONFIG.get("LOCAL_MODEL_PORT")}'
|
||||
worker = CONFIG.get("LOCAL_MODEL_HOST_WORKER", 1)
|
||||
cmd = [
|
||||
'gunicorn', 'smartdoc.wsgi:application',
|
||||
'-b', bind,
|
||||
'-k', 'gthread',
|
||||
'--threads', '200',
|
||||
'-w', "1",
|
||||
'-w', str(worker),
|
||||
'--max-requests', '10240',
|
||||
'--max-requests-jitter', '2048',
|
||||
'--access-logformat', log_format,
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@ import importlib
|
|||
import io
|
||||
import mimetypes
|
||||
import pickle
|
||||
import random
|
||||
import re
|
||||
import shutil
|
||||
from functools import reduce
|
||||
|
|
@ -297,3 +298,14 @@ def markdown_to_plain_text(md: str) -> str:
|
|||
# 去除首尾空格
|
||||
text = text.strip()
|
||||
return text
|
||||
|
||||
|
||||
SAFE_CHAR_SET = (
|
||||
[chr(i) for i in range(65, 91) if chr(i) not in {'I', 'O'}] + # 大写字母 A-H, J-N, P-Z
|
||||
[chr(i) for i in range(97, 123) if chr(i) not in {'i', 'l', 'o'}] + # 小写字母 a-h, j-n, p-z
|
||||
[str(i) for i in range(10) if str(i) not in {'0', '1', '7'}] # 数字 2-6, 8-9
|
||||
)
|
||||
|
||||
|
||||
def get_random_chars(number=4):
|
||||
return ''.join(random.choices(SAFE_CHAR_SET, k=number))
|
||||
|
|
|
|||
|
|
@ -7,13 +7,12 @@
|
|||
@desc:
|
||||
"""
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
import sys
|
||||
import uuid
|
||||
from textwrap import dedent
|
||||
|
||||
from diskcache import Cache
|
||||
|
||||
from smartdoc.const import BASE_DIR
|
||||
from smartdoc.const import PROJECT_DIR
|
||||
|
||||
|
|
@ -37,6 +36,8 @@ class FunctionExecutor:
|
|||
old_mask = os.umask(0o077)
|
||||
try:
|
||||
os.makedirs(self.sandbox_path, 0o700, exist_ok=True)
|
||||
os.makedirs(os.path.join(self.sandbox_path, 'execute'), 0o700, exist_ok=True)
|
||||
os.makedirs(os.path.join(self.sandbox_path, 'result'), 0o700, exist_ok=True)
|
||||
finally:
|
||||
os.umask(old_mask)
|
||||
|
||||
|
|
@ -44,10 +45,11 @@ class FunctionExecutor:
|
|||
_id = str(uuid.uuid1())
|
||||
success = '{"code":200,"msg":"成功","data":exec_result}'
|
||||
err = '{"code":500,"msg":str(e),"data":None}'
|
||||
path = r'' + self.sandbox_path + ''
|
||||
result_path = f'{self.sandbox_path}/result/{_id}.result'
|
||||
_exec_code = f"""
|
||||
try:
|
||||
import os
|
||||
import pickle
|
||||
env = dict(os.environ)
|
||||
for key in list(env.keys()):
|
||||
if key in os.environ and (key.startswith('MAXKB') or key.startswith('POSTGRES') or key.startswith('PG')):
|
||||
|
|
@ -60,13 +62,11 @@ try:
|
|||
for local in locals_v:
|
||||
globals_v[local] = locals_v[local]
|
||||
exec_result=f(**keywords)
|
||||
from diskcache import Cache
|
||||
cache = Cache({path!a})
|
||||
cache.set({_id!a},{success})
|
||||
with open({result_path!a}, 'wb') as file:
|
||||
file.write(pickle.dumps({success}))
|
||||
except Exception as e:
|
||||
from diskcache import Cache
|
||||
cache = Cache({path!a})
|
||||
cache.set({_id!a},{err})
|
||||
with open({result_path!a}, 'wb') as file:
|
||||
file.write(pickle.dumps({err}))
|
||||
"""
|
||||
if self.sandbox:
|
||||
subprocess_result = self._exec_sandbox(_exec_code, _id)
|
||||
|
|
@ -74,18 +74,18 @@ except Exception as e:
|
|||
subprocess_result = self._exec(_exec_code)
|
||||
if subprocess_result.returncode == 1:
|
||||
raise Exception(subprocess_result.stderr)
|
||||
cache = Cache(self.sandbox_path)
|
||||
result = cache.get(_id)
|
||||
cache.delete(_id)
|
||||
with open(result_path, 'rb') as file:
|
||||
result = pickle.loads(file.read())
|
||||
os.remove(result_path)
|
||||
if result.get('code') == 200:
|
||||
return result.get('data')
|
||||
raise Exception(result.get('msg'))
|
||||
|
||||
def _exec_sandbox(self, _code, _id):
|
||||
exec_python_file = f'{self.sandbox_path}/{_id}.py'
|
||||
exec_python_file = f'{self.sandbox_path}/execute/{_id}.py'
|
||||
with open(exec_python_file, 'w') as file:
|
||||
file.write(_code)
|
||||
os.system(f"chown {self.user}:{self.user} {exec_python_file}")
|
||||
os.system(f"chown {self.user}:root {exec_python_file}")
|
||||
kwargs = {'cwd': BASE_DIR}
|
||||
subprocess_result = subprocess.run(
|
||||
['su', '-s', python_directory, '-c', "exec(open('" + exec_python_file + "').read())", self.user],
|
||||
|
|
|
|||
|
|
@ -40,15 +40,12 @@ def generate():
|
|||
def get_key_pair():
|
||||
rsa_value = rsa_cache.get(cache_key)
|
||||
if rsa_value is None:
|
||||
lock.acquire()
|
||||
rsa_value = rsa_cache.get(cache_key)
|
||||
if rsa_value is not None:
|
||||
return rsa_value
|
||||
try:
|
||||
with lock:
|
||||
rsa_value = rsa_cache.get(cache_key)
|
||||
if rsa_value is not None:
|
||||
return rsa_value
|
||||
rsa_value = get_key_pair_by_sql()
|
||||
rsa_cache.set(cache_key, rsa_value)
|
||||
finally:
|
||||
lock.release()
|
||||
return rsa_value
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -23,6 +23,8 @@ from django.db import transaction, models
|
|||
from django.db.models import QuerySet, Count
|
||||
from django.db.models.functions import Substr, Reverse
|
||||
from django.http import HttpResponse
|
||||
from django.utils.translation import get_language
|
||||
from django.utils.translation import gettext_lazy as _, gettext, to_locale
|
||||
from drf_yasg import openapi
|
||||
from openpyxl.cell.cell import ILLEGAL_CHARACTERS_RE
|
||||
from rest_framework import serializers
|
||||
|
|
@ -64,8 +66,6 @@ from embedding.task.embedding import embedding_by_document, delete_embedding_by_
|
|||
embedding_by_document_list
|
||||
from setting.models import Model
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from django.utils.translation import gettext_lazy as _, gettext, to_locale
|
||||
from django.utils.translation import get_language
|
||||
|
||||
parse_qa_handle_list = [XlsParseQAHandle(), CsvParseQAHandle(), XlsxParseQAHandle(), ZipParseQAHandle()]
|
||||
parse_table_handle_list = [CsvSplitTableHandle(), XlsSplitTableHandle(), XlsxSplitTableHandle()]
|
||||
|
|
@ -661,6 +661,8 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
|
|||
cell = worksheet.cell(row=row_idx + 1, column=col_idx + 1)
|
||||
if isinstance(col, str):
|
||||
col = re.sub(ILLEGAL_CHARACTERS_RE, '', col)
|
||||
if col.startswith(('=', '+', '-', '@')):
|
||||
col = '\ufeff' + col
|
||||
cell.value = col
|
||||
# 创建HttpResponse对象返回Excel文件
|
||||
return workbook
|
||||
|
|
|
|||
|
|
@ -28,6 +28,9 @@ mime_types = {"html": "text/html", "htm": "text/html", "shtml": "text/html", "cs
|
|||
"woff2": "font/woff2", "jar": "application/java-archive", "war": "application/java-archive",
|
||||
"ear": "application/java-archive", "json": "application/json", "hqx": "application/mac-binhex40",
|
||||
"doc": "application/msword", "pdf": "application/pdf", "ps": "application/postscript",
|
||||
"docx": "application/vnd.openxmlformats-officedocument.wordprocessingml.document",
|
||||
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
|
||||
"pptx": "application/vnd.openxmlformats-officedocument.presentationml.presentation",
|
||||
"eps": "application/postscript", "ai": "application/postscript", "rtf": "application/rtf",
|
||||
"m3u8": "application/vnd.apple.mpegurl", "kml": "application/vnd.google-earth.kml+xml",
|
||||
"kmz": "application/vnd.google-earth.kmz", "xls": "application/vnd.ms-excel",
|
||||
|
|
@ -87,4 +90,4 @@ class FileSerializer(serializers.Serializer):
|
|||
'Content-Disposition': 'attachment; filename="{}"'.format(
|
||||
file.file_name)})
|
||||
return HttpResponse(file.get_byte(), status=200,
|
||||
headers={'Content-Type': mime_types.get(file.file_name.split(".")[-1], 'text/plain')})
|
||||
headers={'Content-Type': mime_types.get(file_type, 'text/plain')})
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ UPDATE "document"
|
|||
SET "char_length" = ( SELECT CASE WHEN
|
||||
"sum" ( "char_length" ( "content" ) ) IS NULL THEN
|
||||
0 ELSE "sum" ( "char_length" ( "content" ) )
|
||||
END FROM paragraph WHERE "document_id" = %s )
|
||||
END FROM paragraph WHERE "document_id" = %s ),
|
||||
"update_time" = CURRENT_TIMESTAMP
|
||||
WHERE
|
||||
"id" = %s
|
||||
|
|
@ -181,6 +181,7 @@ class Dataset(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Generate related'), operation_id=_('Generate related'),
|
||||
manual_parameters=DataSetSerializers.Operate.get_request_params_api(),
|
||||
request_body=GenerateRelatedSerializer.get_request_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_('Knowledge Base')]
|
||||
)
|
||||
@log(menu='document', operate="Generate related documents",
|
||||
|
|
|
|||
|
|
@ -195,6 +195,53 @@ class FunctionLibApi(ApiMixin):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
required=['id', 'name', 'code', 'input_field_list', 'permission_type'],
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title="", description=_('ID')),
|
||||
|
||||
'name': openapi.Schema(type=openapi.TYPE_STRING, title=_('function name'),
|
||||
description=_('function name')),
|
||||
'desc': openapi.Schema(type=openapi.TYPE_STRING, title=_('function description'),
|
||||
description=_('function description')),
|
||||
'code': openapi.Schema(type=openapi.TYPE_STRING, title=_('function content'),
|
||||
description=_('function content')),
|
||||
'permission_type': openapi.Schema(type=openapi.TYPE_STRING, title=_('permission'),
|
||||
description=_('permission')),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_BOOLEAN, title=_('Is active'),
|
||||
description=_('Is active')),
|
||||
'input_field_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
description=_('Input variable list'),
|
||||
items=openapi.Schema(type=openapi.TYPE_OBJECT,
|
||||
required=['name', 'is_required', 'source'],
|
||||
properties={
|
||||
'name': openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_('variable name'),
|
||||
description=_('variable name')),
|
||||
'is_required': openapi.Schema(
|
||||
type=openapi.TYPE_BOOLEAN,
|
||||
title=_('required'),
|
||||
description=_('required')),
|
||||
'type': openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_('type'),
|
||||
description=_(
|
||||
'Field type string|int|dict|array|float')
|
||||
),
|
||||
'source': openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title=_('source'),
|
||||
description=_(
|
||||
'The source only supports custom|reference')),
|
||||
|
||||
}))
|
||||
}
|
||||
)
|
||||
|
||||
class Export(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_params_api():
|
||||
|
|
@ -214,4 +261,4 @@ class FunctionLibApi(ApiMixin):
|
|||
type=openapi.TYPE_FILE,
|
||||
required=True,
|
||||
description=_('Upload image files'))
|
||||
]
|
||||
]
|
||||
|
|
|
|||
|
|
@ -44,6 +44,7 @@ class FunctionLibView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Create function'),
|
||||
operation_id=_('Create function'),
|
||||
request_body=FunctionLibApi.Create.get_request_body_api(),
|
||||
responses=result.get_api_response(FunctionLibApi.Create.get_response_body_api()),
|
||||
tags=[_('Function')])
|
||||
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
|
||||
@log(menu='Function', operate="Create function",
|
||||
|
|
@ -58,6 +59,7 @@ class FunctionLibView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Debug function'),
|
||||
operation_id=_('Debug function'),
|
||||
request_body=FunctionLibApi.Debug.get_request_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_('Function')])
|
||||
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
|
||||
def post(self, request: Request):
|
||||
|
|
@ -72,6 +74,7 @@ class FunctionLibView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Update function'),
|
||||
operation_id=_('Update function'),
|
||||
request_body=FunctionLibApi.Edit.get_request_body_api(),
|
||||
responses=result.get_api_response(FunctionLibApi.Edit.get_request_body_api()),
|
||||
tags=[_('Function')])
|
||||
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
|
||||
@log(menu='Function', operate="Update function",
|
||||
|
|
@ -84,6 +87,7 @@ class FunctionLibView(APIView):
|
|||
@action(methods=['DELETE'], detail=False)
|
||||
@swagger_auto_schema(operation_summary=_('Delete function'),
|
||||
operation_id=_('Delete function'),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_('Function')])
|
||||
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
|
||||
@log(menu='Function', operate="Delete function",
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ class PyLintView(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Check code'),
|
||||
operation_id=_('Check code'),
|
||||
request_body=PyLintApi.get_request_body_api(),
|
||||
responses=result.get_api_response(PyLintApi.get_request_body_api()),
|
||||
tags=[_('Function')])
|
||||
@has_permissions(RoleConstants.ADMIN, RoleConstants.USER)
|
||||
def post(self, request: Request):
|
||||
|
|
|
|||
|
|
@ -7490,4 +7490,13 @@ msgid "Field: {name} No value set"
|
|||
msgstr ""
|
||||
|
||||
msgid "Generate related"
|
||||
msgstr ""
|
||||
|
||||
msgid "Obtain graphical captcha"
|
||||
msgstr ""
|
||||
|
||||
msgid "Captcha code error or expiration"
|
||||
msgstr ""
|
||||
|
||||
msgid "captcha"
|
||||
msgstr ""
|
||||
|
|
@ -4536,7 +4536,7 @@ msgstr "修改知识库信息"
|
|||
#: community/apps/dataset/views/document.py:463
|
||||
#: community/apps/dataset/views/document.py:464
|
||||
msgid "Get the knowledge base paginated list"
|
||||
msgstr "获取知识库分页列表"
|
||||
msgstr "获取知识库文档分页列表"
|
||||
|
||||
#: community/apps/dataset/views/document.py:31
|
||||
#: community/apps/dataset/views/document.py:32
|
||||
|
|
@ -7653,4 +7653,13 @@ msgid "Field: {name} No value set"
|
|||
msgstr "字段: {name} 未设置值"
|
||||
|
||||
msgid "Generate related"
|
||||
msgstr "生成问题"
|
||||
msgstr "生成问题"
|
||||
|
||||
msgid "Obtain graphical captcha"
|
||||
msgstr "获取图形验证码"
|
||||
|
||||
msgid "Captcha code error or expiration"
|
||||
msgstr "验证码错误或过期"
|
||||
|
||||
msgid "captcha"
|
||||
msgstr "验证码"
|
||||
|
|
@ -4545,7 +4545,7 @@ msgstr "修改知識庫信息"
|
|||
#: community/apps/dataset/views/document.py:463
|
||||
#: community/apps/dataset/views/document.py:464
|
||||
msgid "Get the knowledge base paginated list"
|
||||
msgstr "獲取知識庫分頁列表"
|
||||
msgstr "獲取知識庫文档分頁列表"
|
||||
|
||||
#: community/apps/dataset/views/document.py:31
|
||||
#: community/apps/dataset/views/document.py:32
|
||||
|
|
@ -7663,4 +7663,13 @@ msgid "Field: {name} No value set"
|
|||
msgstr "欄位: {name} 未設定值"
|
||||
|
||||
msgid "Generate related"
|
||||
msgstr "生成問題"
|
||||
msgstr "生成問題"
|
||||
|
||||
msgid "Obtain graphical captcha"
|
||||
msgstr "獲取圖形驗證碼"
|
||||
|
||||
msgid "Captcha code error or expiration"
|
||||
msgstr "驗證碼錯誤或過期"
|
||||
|
||||
msgid "captcha"
|
||||
msgstr "驗證碼"
|
||||
|
|
@ -106,7 +106,10 @@ class MaxKBBaseModel(ABC):
|
|||
optional_params = {}
|
||||
for key, value in model_kwargs.items():
|
||||
if key not in ['model_id', 'use_local', 'streaming', 'show_ref_label']:
|
||||
optional_params[key] = value
|
||||
if key == 'extra_body' and isinstance(value, dict):
|
||||
optional_params = {**optional_params, **value}
|
||||
else:
|
||||
optional_params[key] = value
|
||||
return optional_params
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -19,6 +19,8 @@ from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import
|
|||
from setting.models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider
|
||||
from setting.models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider
|
||||
from setting.models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider
|
||||
from setting.models_provider.impl.regolo_model_provider.regolo_model_provider import \
|
||||
RegoloModelProvider
|
||||
from setting.models_provider.impl.siliconCloud_model_provider.siliconCloud_model_provider import \
|
||||
SiliconCloudModelProvider
|
||||
from setting.models_provider.impl.tencent_cloud_model_provider.tencent_cloud_model_provider import \
|
||||
|
|
@ -55,3 +57,4 @@ class ModelProvideConstants(Enum):
|
|||
aliyun_bai_lian_model_provider = AliyunBaiLianModelProvider()
|
||||
model_anthropic_provider = AnthropicModelProvider()
|
||||
model_siliconCloud_provider = SiliconCloudModelProvider()
|
||||
model_regolo_provider = RegoloModelProvider()
|
||||
|
|
|
|||
|
|
@ -51,6 +51,23 @@ model_info_list = [ModelInfo('gte-rerank',
|
|||
_("Universal text vector is Tongyi Lab's multi-language text unified vector model based on the LLM base. It provides high-level vector services for multiple mainstream languages around the world and helps developers quickly convert text data into high-quality vector data."),
|
||||
ModelTypeConst.EMBEDDING, aliyun_bai_lian_embedding_model_credential,
|
||||
AliyunBaiLianEmbedding),
|
||||
ModelInfo('qwen3-0.6b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-1.7b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-4b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-8b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-14b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-32b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-30b-a3b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen3-235b-a22b', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
|
||||
ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
BaiLianChatModel),
|
||||
ModelInfo('qwen-plus', '', ModelTypeConst.LLM, aliyun_bai_lian_llm_model_credential,
|
||||
|
|
|
|||
|
|
@ -30,6 +30,29 @@ class BaiLianLLMModelParams(BaseForm):
|
|||
precision=0)
|
||||
|
||||
|
||||
class BaiLianLLMStreamModelParams(BaseForm):
|
||||
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
|
||||
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
|
||||
required=True, default_value=0.7,
|
||||
_min=0.1,
|
||||
_max=1.0,
|
||||
_step=0.01,
|
||||
precision=2)
|
||||
|
||||
max_tokens = forms.SliderField(
|
||||
TooltipLabel(_('Output the maximum Tokens'),
|
||||
_('Specify the maximum number of tokens that the model can generate')),
|
||||
required=True, default_value=800,
|
||||
_min=1,
|
||||
_max=100000,
|
||||
_step=1,
|
||||
precision=0)
|
||||
|
||||
stream = forms.SwitchField(label=TooltipLabel(_('Is the answer in streaming mode'),
|
||||
_('Is the answer in streaming mode')),
|
||||
required=True, default_value=True)
|
||||
|
||||
|
||||
class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
|
||||
|
||||
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
|
||||
|
|
@ -47,7 +70,11 @@ class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
|
|||
return False
|
||||
try:
|
||||
model = provider.get_model(model_type, model_name, model_credential, **model_params)
|
||||
model.invoke([HumanMessage(content=gettext('Hello'))])
|
||||
if model_params.get('stream'):
|
||||
for res in model.stream([HumanMessage(content=gettext('Hello'))]):
|
||||
pass
|
||||
else:
|
||||
model.invoke([HumanMessage(content=gettext('Hello'))])
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, AppApiException):
|
||||
|
|
@ -68,4 +95,6 @@ class BaiLianLLMModelCredential(BaseForm, BaseModelCredential):
|
|||
api_key = forms.PasswordInputField('API Key', required=True)
|
||||
|
||||
def get_model_params_setting_form(self, model_name):
|
||||
if 'qwen3' in model_name:
|
||||
return BaiLianLLMStreamModelParams()
|
||||
return BaiLianLLMModelParams()
|
||||
|
|
|
|||
|
|
@ -15,9 +15,8 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model_name=model_name,
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -20,5 +20,5 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base=model_credential.get('api_base'),
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
import os
|
||||
import re
|
||||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from botocore.config import Config
|
||||
from langchain_community.chat_models import BedrockChat
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
||||
|
||||
|
|
@ -72,6 +74,20 @@ class BedrockModel(MaxKBBaseModel, BedrockChat):
|
|||
config=config
|
||||
)
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
try:
|
||||
return super().get_num_tokens_from_messages(messages)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
try:
|
||||
return super().get_num_tokens(text)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
|
||||
|
||||
def _update_aws_credentials(profile_name, access_key_id, secret_access_key):
|
||||
credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials")
|
||||
|
|
|
|||
|
|
@ -1,15 +1,16 @@
|
|||
# coding=utf-8
|
||||
import warnings
|
||||
from typing import List, Dict, Optional, Any, Iterator, cast, Type, Union
|
||||
from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping
|
||||
|
||||
import openai
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.language_models import LanguageModelInput
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, AIMessageChunk
|
||||
from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \
|
||||
SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk
|
||||
from langchain_core.messages.ai import UsageMetadata
|
||||
from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk
|
||||
from langchain_core.outputs import ChatGenerationChunk
|
||||
from langchain_core.runnables import RunnableConfig, ensure_config
|
||||
from langchain_core.utils.pydantic import is_basemodel_subclass
|
||||
from langchain_core.tools import BaseTool
|
||||
from langchain_openai import ChatOpenAI
|
||||
from langchain_openai.chat_models.base import _create_usage_metadata
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
|
||||
|
|
@ -19,6 +20,65 @@ def custom_get_token_ids(text: str):
|
|||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
def _convert_delta_to_message_chunk(
|
||||
_dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
|
||||
) -> BaseMessageChunk:
|
||||
id_ = _dict.get("id")
|
||||
role = cast(str, _dict.get("role"))
|
||||
content = cast(str, _dict.get("content") or "")
|
||||
additional_kwargs: dict = {}
|
||||
if 'reasoning_content' in _dict:
|
||||
additional_kwargs['reasoning_content'] = _dict.get('reasoning_content')
|
||||
if _dict.get("function_call"):
|
||||
function_call = dict(_dict["function_call"])
|
||||
if "name" in function_call and function_call["name"] is None:
|
||||
function_call["name"] = ""
|
||||
additional_kwargs["function_call"] = function_call
|
||||
tool_call_chunks = []
|
||||
if raw_tool_calls := _dict.get("tool_calls"):
|
||||
additional_kwargs["tool_calls"] = raw_tool_calls
|
||||
try:
|
||||
tool_call_chunks = [
|
||||
tool_call_chunk(
|
||||
name=rtc["function"].get("name"),
|
||||
args=rtc["function"].get("arguments"),
|
||||
id=rtc.get("id"),
|
||||
index=rtc["index"],
|
||||
)
|
||||
for rtc in raw_tool_calls
|
||||
]
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if role == "user" or default_class == HumanMessageChunk:
|
||||
return HumanMessageChunk(content=content, id=id_)
|
||||
elif role == "assistant" or default_class == AIMessageChunk:
|
||||
return AIMessageChunk(
|
||||
content=content,
|
||||
additional_kwargs=additional_kwargs,
|
||||
id=id_,
|
||||
tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
|
||||
)
|
||||
elif role in ("system", "developer") or default_class == SystemMessageChunk:
|
||||
if role == "developer":
|
||||
additional_kwargs = {"__openai_role__": "developer"}
|
||||
else:
|
||||
additional_kwargs = {}
|
||||
return SystemMessageChunk(
|
||||
content=content, id=id_, additional_kwargs=additional_kwargs
|
||||
)
|
||||
elif role == "function" or default_class == FunctionMessageChunk:
|
||||
return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
|
||||
elif role == "tool" or default_class == ToolMessageChunk:
|
||||
return ToolMessageChunk(
|
||||
content=content, tool_call_id=_dict["tool_call_id"], id=id_
|
||||
)
|
||||
elif role or default_class == ChatMessageChunk:
|
||||
return ChatMessageChunk(content=content, role=role, id=id_)
|
||||
else:
|
||||
return default_class(content=content, id=id_) # type: ignore
|
||||
|
||||
|
||||
class BaseChatOpenAI(ChatOpenAI):
|
||||
usage_metadata: dict = {}
|
||||
custom_get_token_ids = custom_get_token_ids
|
||||
|
|
@ -26,7 +86,13 @@ class BaseChatOpenAI(ChatOpenAI):
|
|||
def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
|
||||
return self.usage_metadata
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
def get_num_tokens_from_messages(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
tools: Optional[
|
||||
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
|
||||
] = None,
|
||||
) -> int:
|
||||
if self.usage_metadata is None or self.usage_metadata == {}:
|
||||
try:
|
||||
return super().get_num_tokens_from_messages(messages)
|
||||
|
|
@ -44,114 +110,77 @@ class BaseChatOpenAI(ChatOpenAI):
|
|||
return len(tokenizer.encode(text))
|
||||
return self.get_last_generation_info().get('output_tokens', 0)
|
||||
|
||||
def _stream(
|
||||
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
|
||||
kwargs['stream_usage'] = True
|
||||
for chunk in super()._stream(*args, **kwargs):
|
||||
if chunk.message.usage_metadata is not None:
|
||||
self.usage_metadata = chunk.message.usage_metadata
|
||||
yield chunk
|
||||
|
||||
def _convert_chunk_to_generation_chunk(
|
||||
self,
|
||||
messages: List[BaseMessage],
|
||||
stop: Optional[List[str]] = None,
|
||||
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
||||
**kwargs: Any,
|
||||
) -> Iterator[ChatGenerationChunk]:
|
||||
kwargs["stream"] = True
|
||||
kwargs["stream_options"] = {"include_usage": True}
|
||||
"""Set default stream_options."""
|
||||
stream_usage = self._should_stream_usage(kwargs.get('stream_usage'), **kwargs)
|
||||
# Note: stream_options is not a valid parameter for Azure OpenAI.
|
||||
# To support users proxying Azure through ChatOpenAI, here we only specify
|
||||
# stream_options if include_usage is set to True.
|
||||
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
|
||||
# for release notes.
|
||||
if stream_usage:
|
||||
kwargs["stream_options"] = {"include_usage": stream_usage}
|
||||
chunk: dict,
|
||||
default_chunk_class: type,
|
||||
base_generation_info: Optional[dict],
|
||||
) -> Optional[ChatGenerationChunk]:
|
||||
if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
|
||||
return None
|
||||
token_usage = chunk.get("usage")
|
||||
choices = (
|
||||
chunk.get("choices", [])
|
||||
# from beta.chat.completions.stream
|
||||
or chunk.get("chunk", {}).get("choices", [])
|
||||
)
|
||||
|
||||
payload = self._get_request_payload(messages, stop=stop, **kwargs)
|
||||
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
|
||||
base_generation_info = {}
|
||||
|
||||
if "response_format" in payload and is_basemodel_subclass(
|
||||
payload["response_format"]
|
||||
):
|
||||
# TODO: Add support for streaming with Pydantic response_format.
|
||||
warnings.warn("Streaming with Pydantic response_format not yet supported.")
|
||||
chat_result = self._generate(
|
||||
messages, stop, run_manager=run_manager, **kwargs
|
||||
usage_metadata: Optional[UsageMetadata] = (
|
||||
_create_usage_metadata(token_usage) if token_usage and token_usage.get("prompt_tokens") else None
|
||||
)
|
||||
if len(choices) == 0:
|
||||
# logprobs is implicitly None
|
||||
generation_chunk = ChatGenerationChunk(
|
||||
message=default_chunk_class(content="", usage_metadata=usage_metadata)
|
||||
)
|
||||
msg = chat_result.generations[0].message
|
||||
yield ChatGenerationChunk(
|
||||
message=AIMessageChunk(
|
||||
**msg.dict(exclude={"type", "additional_kwargs"}),
|
||||
# preserve the "parsed" Pydantic object without converting to dict
|
||||
additional_kwargs=msg.additional_kwargs,
|
||||
),
|
||||
generation_info=chat_result.generations[0].generation_info,
|
||||
)
|
||||
return
|
||||
if self.include_response_headers:
|
||||
raw_response = self.client.with_raw_response.create(**payload)
|
||||
response = raw_response.parse()
|
||||
base_generation_info = {"headers": dict(raw_response.headers)}
|
||||
else:
|
||||
response = self.client.create(**payload)
|
||||
with response:
|
||||
is_first_chunk = True
|
||||
for chunk in response:
|
||||
if not isinstance(chunk, dict):
|
||||
chunk = chunk.model_dump()
|
||||
return generation_chunk
|
||||
|
||||
generation_chunk = super()._convert_chunk_to_generation_chunk(
|
||||
chunk,
|
||||
default_chunk_class,
|
||||
base_generation_info if is_first_chunk else {},
|
||||
)
|
||||
if generation_chunk is None:
|
||||
continue
|
||||
choice = choices[0]
|
||||
if choice["delta"] is None:
|
||||
return None
|
||||
|
||||
# custom code
|
||||
if len(chunk['choices']) > 0 and 'reasoning_content' in chunk['choices'][0]['delta']:
|
||||
generation_chunk.message.additional_kwargs["reasoning_content"] = chunk['choices'][0]['delta'][
|
||||
'reasoning_content']
|
||||
message_chunk = _convert_delta_to_message_chunk(
|
||||
choice["delta"], default_chunk_class
|
||||
)
|
||||
generation_info = {**base_generation_info} if base_generation_info else {}
|
||||
|
||||
default_chunk_class = generation_chunk.message.__class__
|
||||
logprobs = (generation_chunk.generation_info or {}).get("logprobs")
|
||||
if run_manager:
|
||||
run_manager.on_llm_new_token(
|
||||
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
|
||||
)
|
||||
is_first_chunk = False
|
||||
# custom code
|
||||
if generation_chunk.message.usage_metadata is not None:
|
||||
self.usage_metadata = generation_chunk.message.usage_metadata
|
||||
yield generation_chunk
|
||||
if finish_reason := choice.get("finish_reason"):
|
||||
generation_info["finish_reason"] = finish_reason
|
||||
if model_name := chunk.get("model"):
|
||||
generation_info["model_name"] = model_name
|
||||
if system_fingerprint := chunk.get("system_fingerprint"):
|
||||
generation_info["system_fingerprint"] = system_fingerprint
|
||||
|
||||
def _create_chat_result(self,
|
||||
response: Union[dict, openai.BaseModel],
|
||||
generation_info: Optional[Dict] = None):
|
||||
result = super()._create_chat_result(response, generation_info)
|
||||
try:
|
||||
reasoning_content = ''
|
||||
reasoning_content_enable = False
|
||||
for res in response.choices:
|
||||
if 'reasoning_content' in res.message.model_extra:
|
||||
reasoning_content_enable = True
|
||||
_reasoning_content = res.message.model_extra.get('reasoning_content')
|
||||
if _reasoning_content is not None:
|
||||
reasoning_content += _reasoning_content
|
||||
if reasoning_content_enable:
|
||||
result.llm_output['reasoning_content'] = reasoning_content
|
||||
except Exception as e:
|
||||
pass
|
||||
return result
|
||||
logprobs = choice.get("logprobs")
|
||||
if logprobs:
|
||||
generation_info["logprobs"] = logprobs
|
||||
|
||||
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
|
||||
message_chunk.usage_metadata = usage_metadata
|
||||
|
||||
generation_chunk = ChatGenerationChunk(
|
||||
message=message_chunk, generation_info=generation_info or None
|
||||
)
|
||||
return generation_chunk
|
||||
|
||||
def invoke(
|
||||
self,
|
||||
input: LanguageModelInput,
|
||||
config: Optional[RunnableConfig] = None,
|
||||
*,
|
||||
stop: Optional[List[str]] = None,
|
||||
stop: Optional[list[str]] = None,
|
||||
**kwargs: Any,
|
||||
) -> BaseMessage:
|
||||
config = ensure_config(config)
|
||||
chat_result = cast(
|
||||
ChatGeneration,
|
||||
"ChatGeneration",
|
||||
self.generate_prompt(
|
||||
[self._convert_input(input)],
|
||||
stop=stop,
|
||||
|
|
@ -162,7 +191,9 @@ class BaseChatOpenAI(ChatOpenAI):
|
|||
run_id=config.pop("run_id", None),
|
||||
**kwargs,
|
||||
).generations[0][0],
|
||||
|
||||
).message
|
||||
|
||||
self.usage_metadata = chat_result.response_metadata[
|
||||
'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata
|
||||
return chat_result
|
||||
|
|
|
|||
|
|
@ -26,6 +26,6 @@ class DeepSeekChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base='https://api.deepseek.com',
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params
|
||||
extra_body=optional_params
|
||||
)
|
||||
return deepseek_chat_open_ai
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ from google.ai.generativelanguage_v1beta.types import (
|
|||
Tool as GoogleTool,
|
||||
)
|
||||
from langchain_core.callbacks import CallbackManagerForLLMRun
|
||||
from langchain_core.messages import BaseMessage
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_core.outputs import ChatGenerationChunk
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
from langchain_google_genai._function_utils import _ToolConfigDict, _ToolDict
|
||||
|
|
@ -22,6 +22,8 @@ from langchain_google_genai.chat_models import _chat_with_retry, _response_to_re
|
|||
from langchain_google_genai._common import (
|
||||
SafetySettingDict,
|
||||
)
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
||||
|
||||
|
|
@ -46,10 +48,18 @@ class GeminiChatModel(MaxKBBaseModel, ChatGoogleGenerativeAI):
|
|||
return self.__dict__.get('_last_generation_info')
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
return self.get_last_generation_info().get('input_tokens', 0)
|
||||
try:
|
||||
return self.get_last_generation_info().get('input_tokens', 0)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
return self.get_last_generation_info().get('output_tokens', 0)
|
||||
try:
|
||||
return self.get_last_generation_info().get('output_tokens', 0)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
|
||||
def _stream(
|
||||
self,
|
||||
|
|
|
|||
|
|
@ -21,11 +21,10 @@ class KimiChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
|
||||
|
||||
kimi_chat_open_ai = KimiChatModel(
|
||||
openai_api_base=model_credential['api_base'],
|
||||
openai_api_key=model_credential['api_key'],
|
||||
model_name=model_name,
|
||||
**optional_params
|
||||
extra_body=optional_params,
|
||||
)
|
||||
return kimi_chat_open_ai
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class OllamaLLMModelParams(BaseForm):
|
|||
_step=0.01,
|
||||
precision=2)
|
||||
|
||||
max_tokens = forms.SliderField(
|
||||
num_predict = forms.SliderField(
|
||||
TooltipLabel(_('Output the maximum Tokens'),
|
||||
_('Specify the maximum number of tokens that the model can generate')),
|
||||
required=True, default_value=1024,
|
||||
|
|
|
|||
|
|
@ -28,5 +28,5 @@ class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -16,5 +16,5 @@ class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@
|
|||
from typing import List, Dict
|
||||
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
|
@ -35,9 +34,9 @@ class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
streaming = False
|
||||
azure_chat_open_ai = OpenAIChatModel(
|
||||
model=model_name,
|
||||
openai_api_base=model_credential.get('api_base'),
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params,
|
||||
base_url=model_credential.get('api_base'),
|
||||
api_key=model_credential.get('api_key'),
|
||||
extra_body=optional_params,
|
||||
streaming=streaming,
|
||||
custom_get_token_ids=custom_get_token_ids
|
||||
)
|
||||
|
|
|
|||
|
|
@ -18,9 +18,8 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model_name=model_name,
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -26,6 +26,6 @@ class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: maxkb
|
||||
@Author:虎
|
||||
@file: __init__.py.py
|
||||
@date:2024/3/28 16:25
|
||||
@desc:
|
||||
"""
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎
|
||||
@file: embedding.py
|
||||
@date:2024/7/12 16:45
|
||||
@desc:
|
||||
"""
|
||||
import traceback
|
||||
from typing import Dict
|
||||
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
from common import forms
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.forms import BaseForm
|
||||
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
|
||||
|
||||
|
||||
class RegoloEmbeddingCredential(BaseForm, BaseModelCredential):
|
||||
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
|
||||
raise_exception=True):
|
||||
model_type_list = provider.get_model_type_list()
|
||||
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
_('{model_type} Model type is not supported').format(model_type=model_type))
|
||||
|
||||
for key in ['api_key']:
|
||||
if key not in model_credential:
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value, _('{key} is required').format(key=key))
|
||||
else:
|
||||
return False
|
||||
try:
|
||||
model = provider.get_model(model_type, model_name, model_credential)
|
||||
model.embed_query(_('Hello'))
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, AppApiException):
|
||||
raise e
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
_('Verification failed, please check whether the parameters are correct: {error}').format(
|
||||
error=str(e)))
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def encryption_dict(self, model: Dict[str, object]):
|
||||
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
|
||||
|
||||
api_key = forms.PasswordInputField('API Key', required=True)
|
||||
|
|
@ -0,0 +1,74 @@
|
|||
# coding=utf-8
|
||||
import base64
|
||||
import os
|
||||
import traceback
|
||||
from typing import Dict
|
||||
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from common import forms
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.forms import BaseForm, TooltipLabel
|
||||
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
|
||||
from django.utils.translation import gettext_lazy as _, gettext
|
||||
|
||||
|
||||
class RegoloImageModelParams(BaseForm):
|
||||
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
|
||||
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
|
||||
required=True, default_value=0.7,
|
||||
_min=0.1,
|
||||
_max=1.0,
|
||||
_step=0.01,
|
||||
precision=2)
|
||||
|
||||
max_tokens = forms.SliderField(
|
||||
TooltipLabel(_('Output the maximum Tokens'),
|
||||
_('Specify the maximum number of tokens that the model can generate')),
|
||||
required=True, default_value=800,
|
||||
_min=1,
|
||||
_max=100000,
|
||||
_step=1,
|
||||
precision=0)
|
||||
|
||||
|
||||
class RegoloImageModelCredential(BaseForm, BaseModelCredential):
|
||||
api_base = forms.TextInputField('API URL', required=True)
|
||||
api_key = forms.PasswordInputField('API Key', required=True)
|
||||
|
||||
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
|
||||
raise_exception=False):
|
||||
model_type_list = provider.get_model_type_list()
|
||||
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
gettext('{model_type} Model type is not supported').format(model_type=model_type))
|
||||
|
||||
for key in ['api_key']:
|
||||
if key not in model_credential:
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
|
||||
else:
|
||||
return False
|
||||
try:
|
||||
model = provider.get_model(model_type, model_name, model_credential, **model_params)
|
||||
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
|
||||
for chunk in res:
|
||||
print(chunk)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, AppApiException):
|
||||
raise e
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
gettext(
|
||||
'Verification failed, please check whether the parameters are correct: {error}').format(
|
||||
error=str(e)))
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def encryption_dict(self, model: Dict[str, object]):
|
||||
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
|
||||
|
||||
def get_model_params_setting_form(self, model_name):
|
||||
return RegoloImageModelParams()
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎
|
||||
@file: llm.py
|
||||
@date:2024/7/11 18:32
|
||||
@desc:
|
||||
"""
|
||||
import traceback
|
||||
from typing import Dict
|
||||
|
||||
from django.utils.translation import gettext_lazy as _, gettext
|
||||
from langchain_core.messages import HumanMessage
|
||||
|
||||
from common import forms
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.forms import BaseForm, TooltipLabel
|
||||
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
|
||||
|
||||
|
||||
class RegoloLLMModelParams(BaseForm):
|
||||
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
|
||||
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
|
||||
required=True, default_value=0.7,
|
||||
_min=0.1,
|
||||
_max=1.0,
|
||||
_step=0.01,
|
||||
precision=2)
|
||||
|
||||
max_tokens = forms.SliderField(
|
||||
TooltipLabel(_('Output the maximum Tokens'),
|
||||
_('Specify the maximum number of tokens that the model can generate')),
|
||||
required=True, default_value=800,
|
||||
_min=1,
|
||||
_max=100000,
|
||||
_step=1,
|
||||
precision=0)
|
||||
|
||||
|
||||
class RegoloLLMModelCredential(BaseForm, BaseModelCredential):
|
||||
|
||||
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
|
||||
raise_exception=False):
|
||||
model_type_list = provider.get_model_type_list()
|
||||
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
gettext('{model_type} Model type is not supported').format(model_type=model_type))
|
||||
|
||||
for key in ['api_key']:
|
||||
if key not in model_credential:
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
|
||||
else:
|
||||
return False
|
||||
try:
|
||||
|
||||
model = provider.get_model(model_type, model_name, model_credential, **model_params)
|
||||
model.invoke([HumanMessage(content=gettext('Hello'))])
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, AppApiException):
|
||||
raise e
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
gettext(
|
||||
'Verification failed, please check whether the parameters are correct: {error}').format(
|
||||
error=str(e)))
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def encryption_dict(self, model: Dict[str, object]):
|
||||
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
|
||||
|
||||
api_key = forms.PasswordInputField('API Key', required=True)
|
||||
|
||||
def get_model_params_setting_form(self, model_name):
|
||||
return RegoloLLMModelParams()
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
# coding=utf-8
|
||||
import traceback
|
||||
from typing import Dict
|
||||
|
||||
from django.utils.translation import gettext_lazy as _, gettext
|
||||
|
||||
from common import forms
|
||||
from common.exception.app_exception import AppApiException
|
||||
from common.forms import BaseForm, TooltipLabel
|
||||
from setting.models_provider.base_model_provider import BaseModelCredential, ValidCode
|
||||
|
||||
|
||||
class RegoloTTIModelParams(BaseForm):
|
||||
size = forms.SingleSelect(
|
||||
TooltipLabel(_('Image size'),
|
||||
_('The image generation endpoint allows you to create raw images based on text prompts. ')),
|
||||
required=True,
|
||||
default_value='1024x1024',
|
||||
option_list=[
|
||||
{'value': '1024x1024', 'label': '1024x1024'},
|
||||
{'value': '1024x1792', 'label': '1024x1792'},
|
||||
{'value': '1792x1024', 'label': '1792x1024'},
|
||||
],
|
||||
text_field='label',
|
||||
value_field='value'
|
||||
)
|
||||
|
||||
quality = forms.SingleSelect(
|
||||
TooltipLabel(_('Picture quality'), _('''
|
||||
By default, images are produced in standard quality.
|
||||
''')),
|
||||
required=True,
|
||||
default_value='standard',
|
||||
option_list=[
|
||||
{'value': 'standard', 'label': 'standard'},
|
||||
{'value': 'hd', 'label': 'hd'},
|
||||
],
|
||||
text_field='label',
|
||||
value_field='value'
|
||||
)
|
||||
|
||||
n = forms.SliderField(
|
||||
TooltipLabel(_('Number of pictures'),
|
||||
_('1 as default')),
|
||||
required=True, default_value=1,
|
||||
_min=1,
|
||||
_max=10,
|
||||
_step=1,
|
||||
precision=0)
|
||||
|
||||
|
||||
class RegoloTextToImageModelCredential(BaseForm, BaseModelCredential):
|
||||
api_key = forms.PasswordInputField('API Key', required=True)
|
||||
|
||||
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
|
||||
raise_exception=False):
|
||||
model_type_list = provider.get_model_type_list()
|
||||
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
gettext('{model_type} Model type is not supported').format(model_type=model_type))
|
||||
|
||||
for key in ['api_key']:
|
||||
if key not in model_credential:
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
|
||||
else:
|
||||
return False
|
||||
try:
|
||||
model = provider.get_model(model_type, model_name, model_credential, **model_params)
|
||||
res = model.check_auth()
|
||||
print(res)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
if isinstance(e, AppApiException):
|
||||
raise e
|
||||
if raise_exception:
|
||||
raise AppApiException(ValidCode.valid_error.value,
|
||||
gettext(
|
||||
'Verification failed, please check whether the parameters are correct: {error}').format(
|
||||
error=str(e)))
|
||||
else:
|
||||
return False
|
||||
return True
|
||||
|
||||
def encryption_dict(self, model: Dict[str, object]):
|
||||
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
|
||||
|
||||
def get_model_params_setting_form(self, model_name):
|
||||
return RegoloTTIModelParams()
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
id="Livello_2"
|
||||
data-name="Livello 2"
|
||||
viewBox="0 0 104.4 104.38"
|
||||
version="1.1"
|
||||
sodipodi:docname="Regolo_logo_positive.svg"
|
||||
width="100%" height="100%"
|
||||
inkscape:version="1.4 (e7c3feb100, 2024-10-09)"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg">
|
||||
<sodipodi:namedview
|
||||
id="namedview13"
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1.0"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pageopacity="0.0"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:zoom="2.1335227"
|
||||
inkscape:cx="119.05193"
|
||||
inkscape:cy="48.511318"
|
||||
inkscape:window-width="1920"
|
||||
inkscape:window-height="1025"
|
||||
inkscape:window-x="0"
|
||||
inkscape:window-y="0"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="g13" />
|
||||
<defs
|
||||
id="defs1">
|
||||
<style
|
||||
id="style1">
|
||||
.cls-1 {
|
||||
fill: #303030;
|
||||
}
|
||||
|
||||
.cls-2 {
|
||||
fill: #59e389;
|
||||
}
|
||||
</style>
|
||||
</defs>
|
||||
<g
|
||||
id="Grafica"
|
||||
transform="translate(0,-40.87)">
|
||||
<g
|
||||
id="g13">
|
||||
<path
|
||||
class="cls-1"
|
||||
d="m 104.39,105.96 v 36.18 c 0,0.32 -0.05,0.62 -0.14,0.91 -0.39,1.27 -1.58,2.2 -2.99,2.2 H 65.08 c -1.73,0 -3.13,-1.41 -3.13,-3.13 V 113.4 c 0,-0.15 0,-0.29 0,-0.44 v -7 c 0,-1.73 1.4,-3.13 3.13,-3.13 h 36.19 c 1.5,0 2.77,1.07 3.06,2.5 0.05,0.21 0.07,0.41 0.07,0.63 z"
|
||||
id="path1" />
|
||||
<path
|
||||
class="cls-1"
|
||||
d="m 104.39,105.96 v 36.18 c 0,0.32 -0.05,0.62 -0.14,0.91 -0.39,1.27 -1.58,2.2 -2.99,2.2 H 65.08 c -1.73,0 -3.13,-1.41 -3.13,-3.13 V 113.4 c 0,-0.15 0,-0.29 0,-0.44 v -7 c 0,-1.73 1.4,-3.13 3.13,-3.13 h 36.19 c 1.5,0 2.77,1.07 3.06,2.5 0.05,0.21 0.07,0.41 0.07,0.63 z"
|
||||
id="path2" />
|
||||
<path
|
||||
class="cls-2"
|
||||
d="M 101.27,40.88 H 65.09 c -1.73,0 -3.13,1.4 -3.13,3.13 v 28.71 c 0,4.71 -1.88,9.23 -5.2,12.56 L 44.42,97.61 c -3.32,3.33 -7.85,5.2 -12.55,5.2 H 18.98 c -2.21,0 -3.99,-1.79 -3.99,-3.99 V 87.29 c 0,-2.21 1.79,-3.99 3.99,-3.99 h 20.34 c 1.41,0 2.59,-0.93 2.99,-2.2 0.09,-0.29 0.14,-0.59 0.14,-0.91 V 44 c 0,-0.22 -0.02,-0.42 -0.07,-0.63 -0.29,-1.43 -1.56,-2.5 -3.06,-2.5 H 3.13 C 1.4,40.87 0,42.27 0,44 v 7 c 0,0.15 0,0.29 0,0.44 v 28.72 c 0,1.72 1.41,3.13 3.13,3.13 h 3.16 c 2.21,0 3.99,1.79 3.99,3.99 v 11.53 c 0,2.21 -1.79,3.99 -3.99,3.99 H 3.15 c -1.73,0 -3.13,1.4 -3.13,3.13 v 36.19 c 0,1.72 1.41,3.13 3.13,3.13 h 36.19 c 1.73,0 3.13,-1.41 3.13,-3.13 V 113.4 c 0,-4.7 1.87,-9.23 5.2,-12.55 L 60,88.51 c 3.33,-3.32 7.85,-5.2 12.56,-5.2 h 28.71 c 1.73,0 3.13,-1.4 3.13,-3.13 V 44 c 0,-1.73 -1.4,-3.13 -3.13,-3.13 z"
|
||||
id="path3" />
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 2.8 KiB |
|
|
@ -0,0 +1,23 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: MaxKB
|
||||
@Author:虎
|
||||
@file: embedding.py
|
||||
@date:2024/7/12 17:44
|
||||
@desc:
|
||||
"""
|
||||
from typing import Dict
|
||||
|
||||
from langchain_community.embeddings import OpenAIEmbeddings
|
||||
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
||||
|
||||
class RegoloEmbeddingModel(MaxKBBaseModel, OpenAIEmbeddings):
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
return RegoloEmbeddingModel(
|
||||
api_key=model_credential.get('api_key'),
|
||||
model=model_name,
|
||||
openai_api_base="https://api.regolo.ai/v1",
|
||||
)
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
from typing import Dict
|
||||
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
class RegoloImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
|
||||
return RegoloImage(
|
||||
model_name=model_name,
|
||||
openai_api_base="https://api.regolo.ai/v1",
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: maxkb
|
||||
@Author:虎
|
||||
@file: llm.py
|
||||
@date:2024/4/18 15:28
|
||||
@desc:
|
||||
"""
|
||||
from typing import List, Dict
|
||||
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class RegoloChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def is_cache_model():
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
|
||||
return RegoloChatModel(
|
||||
model=model_name,
|
||||
openai_api_base="https://api.regolo.ai/v1",
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Dict
|
||||
|
||||
from openai import OpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_tti import BaseTextToImage
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class RegoloTextToImage(MaxKBBaseModel, BaseTextToImage):
|
||||
api_base: str
|
||||
api_key: str
|
||||
model: str
|
||||
params: dict
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super().__init__(**kwargs)
|
||||
self.api_key = kwargs.get('api_key')
|
||||
self.api_base = "https://api.regolo.ai/v1"
|
||||
self.model = kwargs.get('model')
|
||||
self.params = kwargs.get('params')
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
optional_params = {'params': {'size': '1024x1024', 'quality': 'standard', 'n': 1}}
|
||||
for key, value in model_kwargs.items():
|
||||
if key not in ['model_id', 'use_local', 'streaming']:
|
||||
optional_params['params'][key] = value
|
||||
return RegoloTextToImage(
|
||||
model=model_name,
|
||||
api_base="https://api.regolo.ai/v1",
|
||||
api_key=model_credential.get('api_key'),
|
||||
**optional_params,
|
||||
)
|
||||
|
||||
def is_cache_model(self):
|
||||
return False
|
||||
|
||||
def check_auth(self):
|
||||
chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
|
||||
response_list = chat.models.with_raw_response.list()
|
||||
|
||||
# self.generate_image('生成一个小猫图片')
|
||||
|
||||
def generate_image(self, prompt: str, negative_prompt: str = None):
|
||||
chat = OpenAI(api_key=self.api_key, base_url=self.api_base)
|
||||
res = chat.images.generate(model=self.model, prompt=prompt, **self.params)
|
||||
file_urls = []
|
||||
for content in res.data:
|
||||
url = content.url
|
||||
file_urls.append(url)
|
||||
|
||||
return file_urls
|
||||
|
|
@ -0,0 +1,89 @@
|
|||
# coding=utf-8
|
||||
"""
|
||||
@project: maxkb
|
||||
@Author:虎
|
||||
@file: openai_model_provider.py
|
||||
@date:2024/3/28 16:26
|
||||
@desc:
|
||||
"""
|
||||
import os
|
||||
|
||||
from common.util.file_util import get_file_content
|
||||
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, ModelInfo, \
|
||||
ModelTypeConst, ModelInfoManage
|
||||
from setting.models_provider.impl.regolo_model_provider.credential.embedding import \
|
||||
RegoloEmbeddingCredential
|
||||
from setting.models_provider.impl.regolo_model_provider.credential.llm import RegoloLLMModelCredential
|
||||
from setting.models_provider.impl.regolo_model_provider.credential.tti import \
|
||||
RegoloTextToImageModelCredential
|
||||
from setting.models_provider.impl.regolo_model_provider.model.embedding import RegoloEmbeddingModel
|
||||
from setting.models_provider.impl.regolo_model_provider.model.llm import RegoloChatModel
|
||||
from setting.models_provider.impl.regolo_model_provider.model.tti import RegoloTextToImage
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from django.utils.translation import gettext as _
|
||||
|
||||
openai_llm_model_credential = RegoloLLMModelCredential()
|
||||
openai_tti_model_credential = RegoloTextToImageModelCredential()
|
||||
model_info_list = [
|
||||
ModelInfo('Phi-4', '', ModelTypeConst.LLM,
|
||||
openai_llm_model_credential, RegoloChatModel
|
||||
),
|
||||
ModelInfo('DeepSeek-R1-Distill-Qwen-32B', '', ModelTypeConst.LLM,
|
||||
openai_llm_model_credential,
|
||||
RegoloChatModel),
|
||||
ModelInfo('maestrale-chat-v0.4-beta', '',
|
||||
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||
RegoloChatModel),
|
||||
ModelInfo('Llama-3.3-70B-Instruct',
|
||||
'',
|
||||
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||
RegoloChatModel),
|
||||
ModelInfo('Llama-3.1-8B-Instruct',
|
||||
'',
|
||||
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||
RegoloChatModel),
|
||||
ModelInfo('DeepSeek-Coder-6.7B-Instruct', '',
|
||||
ModelTypeConst.LLM, openai_llm_model_credential,
|
||||
RegoloChatModel)
|
||||
]
|
||||
open_ai_embedding_credential = RegoloEmbeddingCredential()
|
||||
model_info_embedding_list = [
|
||||
ModelInfo('gte-Qwen2', '',
|
||||
ModelTypeConst.EMBEDDING, open_ai_embedding_credential,
|
||||
RegoloEmbeddingModel),
|
||||
]
|
||||
|
||||
model_info_tti_list = [
|
||||
ModelInfo('FLUX.1-dev', '',
|
||||
ModelTypeConst.TTI, openai_tti_model_credential,
|
||||
RegoloTextToImage),
|
||||
ModelInfo('sdxl-turbo', '',
|
||||
ModelTypeConst.TTI, openai_tti_model_credential,
|
||||
RegoloTextToImage),
|
||||
]
|
||||
model_info_manage = (
|
||||
ModelInfoManage.builder()
|
||||
.append_model_info_list(model_info_list)
|
||||
.append_default_model_info(
|
||||
ModelInfo('gpt-3.5-turbo', _('The latest gpt-3.5-turbo, updated with OpenAI adjustments'), ModelTypeConst.LLM,
|
||||
openai_llm_model_credential, RegoloChatModel
|
||||
))
|
||||
.append_model_info_list(model_info_embedding_list)
|
||||
.append_default_model_info(model_info_embedding_list[0])
|
||||
.append_model_info_list(model_info_tti_list)
|
||||
.append_default_model_info(model_info_tti_list[0])
|
||||
|
||||
.build()
|
||||
)
|
||||
|
||||
|
||||
class RegoloModelProvider(IModelProvider):
|
||||
|
||||
def get_model_info_manage(self):
|
||||
return model_info_manage
|
||||
|
||||
def get_model_provide_info(self):
|
||||
return ModelProvideInfo(provider='model_regolo_provider', name='Regolo', icon=get_file_content(
|
||||
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'regolo_model_provider',
|
||||
'icon',
|
||||
'regolo_icon_svg')))
|
||||
|
|
@ -16,5 +16,5 @@ class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -34,5 +34,5 @@ class SiliconCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base=model_credential.get('api_base'),
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -33,21 +33,7 @@ class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base=model_credential.get('api_base'),
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params,
|
||||
extra_body=optional_params,
|
||||
custom_get_token_ids=custom_get_token_ids
|
||||
)
|
||||
return azure_chat_open_ai
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
try:
|
||||
return super().get_num_tokens_from_messages(messages)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
try:
|
||||
return super().get_num_tokens(text)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
|
|
|
|||
|
|
@ -16,5 +16,5 @@ class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class VllmImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
||||
def is_cache_model(self):
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
# coding=utf-8
|
||||
|
||||
from typing import Dict, List
|
||||
from typing import Dict, Optional, Sequence, Union, Any, Callable
|
||||
from urllib.parse import urlparse, ParseResult
|
||||
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_core.tools import BaseTool
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
|
@ -31,13 +32,19 @@ class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base=model_credential.get('api_base'),
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params,
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
extra_body=optional_params
|
||||
)
|
||||
return vllm_chat_open_ai
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
def get_num_tokens_from_messages(
|
||||
self,
|
||||
messages: list[BaseMessage],
|
||||
tools: Optional[
|
||||
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
|
||||
] = None,
|
||||
) -> int:
|
||||
if self.usage_metadata is None or self.usage_metadata == {}:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
|
|
|
|||
|
|
@ -16,5 +16,5 @@ class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -17,5 +17,5 @@ class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base=model_credential.get('api_base'),
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ class WenxinLLMModelParams(BaseForm):
|
|||
_step=0.01,
|
||||
precision=2)
|
||||
|
||||
max_tokens = forms.SliderField(
|
||||
max_output_tokens = forms.SliderField(
|
||||
TooltipLabel(_('Output the maximum Tokens'),
|
||||
_('Specify the maximum number of tokens that the model can generate')),
|
||||
required=True, default_value=1024,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ class XinferenceChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
|||
model=model_name,
|
||||
openai_api_base=base_url,
|
||||
openai_api_key=model_credential.get('api_key'),
|
||||
**optional_params
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
|
|
|
|||
|
|
@ -22,6 +22,9 @@ class XInferenceReranker(MaxKBBaseModel, BaseDocumentCompressor):
|
|||
"""UID of the launched model"""
|
||||
api_key: Optional[str]
|
||||
|
||||
@staticmethod
|
||||
def is_cache_model():
|
||||
return False
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
return XInferenceReranker(server_url=model_credential.get('server_url'), model_uid=model_name,
|
||||
|
|
|
|||
|
|
@ -16,5 +16,5 @@ class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
|
|||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
extra_body=optional_params
|
||||
)
|
||||
|
|
|
|||
|
|
@ -171,6 +171,24 @@ class TeamMemberSerializer(ApiMixin, serializers.Serializer):
|
|||
}
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
'id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
|
||||
'username': openapi.Schema(type=openapi.TYPE_STRING, title=_('Username'), description=_('Username')),
|
||||
'email': openapi.Schema(type=openapi.TYPE_STRING, title=_('Email'), description=_('Email')),
|
||||
'role': openapi.Schema(type=openapi.TYPE_STRING, title=_('Role'), description=_('Role')),
|
||||
'is_active': openapi.Schema(type=openapi.TYPE_STRING, title=_('Is active'),
|
||||
description=_('Is active')),
|
||||
'team_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('team id'), description=_('team id')),
|
||||
'user_id': openapi.Schema(type=openapi.TYPE_STRING, title=_('user id'), description=_('user id')),
|
||||
'type': openapi.Schema(type=openapi.TYPE_STRING, title=_('member type'),
|
||||
description=_('member type manage|member')),
|
||||
}
|
||||
)
|
||||
|
||||
@transaction.atomic
|
||||
def batch_add_member(self, user_id_list: List[str], with_valid=True):
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -38,6 +38,7 @@ class TeamMember(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Add member'),
|
||||
operation_id=_('Add member'),
|
||||
request_body=TeamMemberSerializer().get_request_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_('Team')])
|
||||
@has_permissions(PermissionConstants.TEAM_CREATE)
|
||||
@log(menu='Team', operate='Add member',
|
||||
|
|
@ -53,6 +54,7 @@ class TeamMember(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Add members in batches'),
|
||||
operation_id=_('Add members in batches'),
|
||||
request_body=TeamMemberSerializer.get_bach_request_body_api(),
|
||||
responses=result.get_api_array_response(TeamMemberSerializer.get_response_body_api()),
|
||||
tags=[_('Team')])
|
||||
@has_permissions(PermissionConstants.TEAM_CREATE)
|
||||
@log(menu='Team', operate='Add members in batches',
|
||||
|
|
@ -78,6 +80,7 @@ class TeamMember(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Update team member permissions'),
|
||||
operation_id=_('Update team member permissions'),
|
||||
request_body=UpdateTeamMemberPermissionSerializer().get_request_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
|
||||
tags=[_('Team')]
|
||||
)
|
||||
|
|
@ -93,6 +96,7 @@ class TeamMember(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Remove member'),
|
||||
operation_id=_('Remove member'),
|
||||
manual_parameters=TeamMemberSerializer.Operate.get_request_params_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_('Team')]
|
||||
)
|
||||
@has_permissions(PermissionConstants.TEAM_DELETE)
|
||||
|
|
|
|||
|
|
@ -31,7 +31,8 @@ class Model(APIView):
|
|||
@action(methods=['POST'], detail=False)
|
||||
@swagger_auto_schema(operation_summary=_('Create model'),
|
||||
operation_id=_('Create model'),
|
||||
request_body=ModelCreateApi.get_request_body_api()
|
||||
request_body=ModelCreateApi.get_request_body_api(),
|
||||
manual_parameters=result.get_api_response(ModelCreateApi.get_request_body_api())
|
||||
, tags=[_('model')])
|
||||
@has_permissions(PermissionConstants.MODEL_CREATE)
|
||||
@log(menu='model', operate='Create model',
|
||||
|
|
@ -45,7 +46,8 @@ class Model(APIView):
|
|||
@action(methods=['PUT'], detail=False)
|
||||
@swagger_auto_schema(operation_summary=_('Download model, trial only with Ollama platform'),
|
||||
operation_id=_('Download model, trial only with Ollama platform'),
|
||||
request_body=ModelCreateApi.get_request_body_api()
|
||||
request_body=ModelCreateApi.get_request_body_api(),
|
||||
responses=result.get_api_response(ModelCreateApi.get_request_body_api())
|
||||
, tags=[_('model')])
|
||||
@has_permissions(PermissionConstants.MODEL_CREATE)
|
||||
def put(self, request: Request):
|
||||
|
|
@ -123,7 +125,8 @@ class Model(APIView):
|
|||
@action(methods=['PUT'], detail=False)
|
||||
@swagger_auto_schema(operation_summary=_('Update model'),
|
||||
operation_id=_('Update model'),
|
||||
request_body=ModelEditApi.get_request_body_api()
|
||||
request_body=ModelEditApi.get_request_body_api(),
|
||||
responses=result.get_api_response(ModelEditApi.get_request_body_api())
|
||||
, tags=[_('model')])
|
||||
@has_permissions(PermissionConstants.MODEL_CREATE)
|
||||
@log(menu='model', operate='Update model',
|
||||
|
|
@ -166,7 +169,8 @@ class Provide(APIView):
|
|||
@swagger_auto_schema(operation_summary=_('Call the supplier function to obtain form data'),
|
||||
operation_id=_('Call the supplier function to obtain form data'),
|
||||
manual_parameters=ProvideApi.get_request_params_api(),
|
||||
request_body=ProvideApi.get_request_body_api()
|
||||
request_body=ProvideApi.get_request_body_api(),
|
||||
responses=result.get_api_response(ProvideApi.get_request_body_api())
|
||||
, tags=[_('model')])
|
||||
@has_permissions(PermissionConstants.MODEL_READ)
|
||||
@log(menu='model', operate='Call the supplier function to obtain form data')
|
||||
|
|
|
|||
|
|
@ -93,7 +93,8 @@ class Config(dict):
|
|||
'SANDBOX': False,
|
||||
'LOCAL_MODEL_HOST': '127.0.0.1',
|
||||
'LOCAL_MODEL_PORT': '11636',
|
||||
'LOCAL_MODEL_PROTOCOL': "http"
|
||||
'LOCAL_MODEL_PROTOCOL': "http",
|
||||
'LOCAL_MODEL_HOST_WORKER': 1
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -113,7 +114,8 @@ class Config(dict):
|
|||
"ENGINE": self.get('DB_ENGINE'),
|
||||
"POOL_OPTIONS": {
|
||||
"POOL_SIZE": 20,
|
||||
"MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW'))
|
||||
"MAX_OVERFLOW": int(self.get('DB_MAX_OVERFLOW')),
|
||||
'RECYCLE': 30 * 60
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,6 +126,10 @@ CACHES = {
|
|||
"token_cache": {
|
||||
'BACKEND': 'common.cache.file_cache.FileCache',
|
||||
'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "token_cache") # 文件夹路径
|
||||
},
|
||||
'captcha_cache': {
|
||||
'BACKEND': 'common.cache.file_cache.FileCache',
|
||||
'LOCATION': os.path.join(PROJECT_DIR, 'data', 'cache', "captcha_cache") # 文件夹路径
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -6,18 +6,22 @@
|
|||
@date:2023/9/5 16:32
|
||||
@desc:
|
||||
"""
|
||||
import base64
|
||||
import datetime
|
||||
import os
|
||||
import random
|
||||
import re
|
||||
import uuid
|
||||
|
||||
from captcha.image import ImageCaptcha
|
||||
from django.conf import settings
|
||||
from django.core import validators, signing, cache
|
||||
from django.core.mail import send_mail
|
||||
from django.core.mail.backends.smtp import EmailBackend
|
||||
from django.db import transaction
|
||||
from django.db.models import Q, QuerySet, Prefetch
|
||||
from django.utils.translation import get_language
|
||||
from django.utils.translation import gettext_lazy as _, to_locale
|
||||
from drf_yasg import openapi
|
||||
from rest_framework import serializers
|
||||
|
||||
|
|
@ -30,7 +34,7 @@ from common.exception.app_exception import AppApiException
|
|||
from common.mixins.api_mixin import ApiMixin
|
||||
from common.models.db_model_manage import DBModelManage
|
||||
from common.response.result import get_api_response
|
||||
from common.util.common import valid_license
|
||||
from common.util.common import valid_license, get_random_chars
|
||||
from common.util.field_message import ErrMessage
|
||||
from common.util.lock import lock
|
||||
from dataset.models import DataSet, Document, Paragraph, Problem, ProblemParagraphMapping
|
||||
|
|
@ -39,9 +43,29 @@ from function_lib.models.function import FunctionLib
|
|||
from setting.models import Team, SystemSetting, SettingType, Model, TeamMember, TeamMemberPermission
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
from users.models.user import User, password_encrypt, get_user_dynamics_permission
|
||||
from django.utils.translation import gettext_lazy as _, gettext, to_locale
|
||||
from django.utils.translation import get_language
|
||||
|
||||
user_cache = cache.caches['user_cache']
|
||||
captcha_cache = cache.caches['captcha_cache']
|
||||
|
||||
|
||||
class CaptchaSerializer(ApiMixin, serializers.Serializer):
|
||||
@staticmethod
|
||||
def get_response_body_api():
|
||||
return get_api_response(openapi.Schema(
|
||||
type=openapi.TYPE_STRING,
|
||||
title="captcha",
|
||||
default="xxxx",
|
||||
description="captcha"
|
||||
))
|
||||
|
||||
@staticmethod
|
||||
def generate():
|
||||
chars = get_random_chars()
|
||||
image = ImageCaptcha()
|
||||
data = image.generate(chars)
|
||||
captcha = base64.b64encode(data.getbuffer())
|
||||
captcha_cache.set(f"LOGIN:{chars.lower()}", chars, timeout=5 * 60)
|
||||
return 'data:image/png;base64,' + captcha.decode()
|
||||
|
||||
|
||||
class SystemSerializer(ApiMixin, serializers.Serializer):
|
||||
|
|
@ -71,6 +95,8 @@ class LoginSerializer(ApiMixin, serializers.Serializer):
|
|||
|
||||
password = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Password")))
|
||||
|
||||
captcha = serializers.CharField(required=True, error_messages=ErrMessage.char(_("captcha")))
|
||||
|
||||
def is_valid(self, *, raise_exception=False):
|
||||
"""
|
||||
校验参数
|
||||
|
|
@ -78,6 +104,10 @@ class LoginSerializer(ApiMixin, serializers.Serializer):
|
|||
:return: User information
|
||||
"""
|
||||
super().is_valid(raise_exception=True)
|
||||
captcha = self.data.get('captcha')
|
||||
captcha_value = captcha_cache.get(f"LOGIN:{captcha.lower()}")
|
||||
if captcha_value is None:
|
||||
raise AppApiException(1005, _("Captcha code error or expiration"))
|
||||
username = self.data.get("username")
|
||||
password = password_encrypt(self.data.get("password"))
|
||||
user = QuerySet(User).filter(Q(username=username,
|
||||
|
|
@ -109,7 +139,8 @@ class LoginSerializer(ApiMixin, serializers.Serializer):
|
|||
required=['username', 'password'],
|
||||
properties={
|
||||
'username': openapi.Schema(type=openapi.TYPE_STRING, title=_("Username"), description=_("Username")),
|
||||
'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password"))
|
||||
'password': openapi.Schema(type=openapi.TYPE_STRING, title=_("Password"), description=_("Password")),
|
||||
'captcha': openapi.Schema(type=openapi.TYPE_STRING, title=_("captcha"), description=_("captcha"))
|
||||
}
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ app_name = "user"
|
|||
urlpatterns = [
|
||||
path('profile', views.Profile.as_view()),
|
||||
path('user', views.User.as_view(), name="profile"),
|
||||
path('user/captcha', views.CaptchaView.as_view(), name='captcha'),
|
||||
path('user/language', views.SwitchUserLanguageView.as_view(), name='language'),
|
||||
path('user/list', views.User.Query.as_view()),
|
||||
path('user/login', views.Login.as_view(), name='login'),
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ from smartdoc.settings import JWT_AUTH
|
|||
from users.serializers.user_serializers import RegisterSerializer, LoginSerializer, CheckCodeSerializer, \
|
||||
RePasswordSerializer, \
|
||||
SendEmailSerializer, UserProfile, UserSerializer, UserManageSerializer, UserInstanceSerializer, SystemSerializer, \
|
||||
SwitchLanguageSerializer
|
||||
SwitchLanguageSerializer, CaptchaSerializer
|
||||
from users.views.common import get_user_operation_object, get_re_password_details
|
||||
|
||||
user_cache = cache.caches['user_cache']
|
||||
|
|
@ -84,7 +84,7 @@ class SwitchUserLanguageView(APIView):
|
|||
description=_("language")),
|
||||
}
|
||||
),
|
||||
responses=RePasswordSerializer().get_response_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_("User management")])
|
||||
@log(menu='User management', operate='Switch Language',
|
||||
get_operation_object=lambda r, k: {'name': r.user.username})
|
||||
|
|
@ -111,7 +111,7 @@ class ResetCurrentUserPasswordView(APIView):
|
|||
description=_("Password"))
|
||||
}
|
||||
),
|
||||
responses=RePasswordSerializer().get_response_body_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=[_("User management")])
|
||||
@log(menu='User management', operate='Modify current user password',
|
||||
get_operation_object=lambda r, k: {'name': r.user.username},
|
||||
|
|
@ -170,6 +170,18 @@ def _get_details(request):
|
|||
}
|
||||
|
||||
|
||||
class CaptchaView(APIView):
|
||||
|
||||
@action(methods=['GET'], detail=False)
|
||||
@swagger_auto_schema(operation_summary=_("Obtain graphical captcha"),
|
||||
operation_id=_("Obtain graphical captcha"),
|
||||
responses=CaptchaSerializer().get_response_body_api(),
|
||||
security=[],
|
||||
tags=[_("User management")])
|
||||
def get(self, request: Request):
|
||||
return result.success(CaptchaSerializer().generate())
|
||||
|
||||
|
||||
class Login(APIView):
|
||||
|
||||
@action(methods=['POST'], detail=False)
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ RUN python3 -m venv /opt/py3 && \
|
|||
pip install poetry==1.8.5 --break-system-packages && \
|
||||
poetry config virtualenvs.create false && \
|
||||
. /opt/py3/bin/activate && \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "^2.6.0+cpu", source = "pytorch"}/g' pyproject.toml; fi && \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "2.6.0+cpu", source = "pytorch"}/g' pyproject.toml; fi && \
|
||||
poetry install && \
|
||||
export MAXKB_CONFIG_TYPE=ENV && python3 /opt/maxkb/app/apps/manage.py compilemessages
|
||||
|
||||
|
|
@ -70,7 +70,8 @@ RUN chmod 755 /opt/maxkb/app/installer/run-maxkb.sh && \
|
|||
useradd --no-create-home --home /opt/maxkb/app/sandbox sandbox -g root && \
|
||||
chown -R sandbox:root /opt/maxkb/app/sandbox && \
|
||||
chmod g-x /usr/local/bin/* /usr/bin/* /bin/* /usr/sbin/* /sbin/* /usr/lib/postgresql/15/bin/* && \
|
||||
chmod g+x /usr/local/bin/python*
|
||||
chmod g+x /usr/local/bin/python* && \
|
||||
find /etc/ -type f ! -path '/etc/resolv.conf' ! -path '/etc/hosts' | xargs chmod g-rx
|
||||
|
||||
EXPOSE 8080
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue