Compare commits

..

3 Commits

Author SHA1 Message Date
zhangshaohu 7c001221b1 Merge branch 'main' into release-1.0 2024-04-19 16:54:57 +08:00
shaohuzhang1 78cd31ea60 Merge branch 'main' into release-1.0 2024-04-18 17:07:31 +08:00
shaohuzhang1 02050a2cca Merge branch 'main' into release-1.0 2024-04-18 16:19:45 +08:00
1194 changed files with 8834 additions and 109102 deletions

View File

@ -1,50 +1,61 @@
name: 'Bug Report'
description: 'Report an Bug'
title: "[Bug] "
assignees: zyyfit
name: BUG 提交
description: 提交产品缺陷帮助我们更好的改进
title: "[BUG]"
labels: "类型: 缺陷"
assignees: baixin513
body:
- type: markdown
id: contacts_title
attributes:
value: "## Contact Information"
value: "## 联系方式"
- type: input
id: contacts
validations:
required: false
attributes:
label: "Contact Information"
description: "The ways to quickly contact you: WeChat group number and nickname, email, etc."
label: "联系方式"
description: "可以快速联系到您的方式:交流群号及昵称、邮箱等"
- type: markdown
id: environment
attributes:
value: "## Environment Information"
value: "## 环境信息"
- type: input
id: version
validations:
required: true
attributes:
label: "MaxKB Version"
description: "Log in to the MaxKB Web Console and check the current version on the `About` page in the top right corner."
label: "MaxKB 版本"
description: "登录 MaxKB Web 控制台,在右上角关于页面查看当前版本。"
- type: markdown
id: details
attributes:
value: "## Detailed information"
value: "## 详细信息"
- type: textarea
id: what-happened
attributes:
label: "Problem Description"
description: "Briefly describe the issue youve encountered."
label: "问题描述"
description: "简要描述您碰到的问题"
validations:
required: true
- type: textarea
id: how-happened
attributes:
label: "Steps to Reproduce"
description: "How can this issue be reproduced."
label: "重现步骤"
description: "如果操作可以重现该问题"
validations:
required: true
- type: textarea
id: expect
attributes:
label: "The expected correct result"
label: "期待的正确结果"
- type: textarea
id: logs
attributes:
label: "Related log output"
description: "Please paste any relevant log output here. It will automatically be formatted as code, so no backticks are necessary."
label: "相关日志输出"
description: "请复制并粘贴任何相关的日志输出。 这将自动格式化为代码,因此无需反引号。"
render: shell
- type: textarea
id: additional-information
attributes:
label: "Additional Information"
description: "If you have any additional information to provide, you can include it here (screenshots, videos, etc., are welcome)."
label: "附加信息"
description: "如果你还有其他需要提供的信息,可以在这里填写(可以提供截图、视频等)。"

View File

@ -1,5 +1,5 @@
blank_issues_enabled: false
contact_links:
- name: Questions & Discussions
url: https://github.com/1Panel-dev/MaxKB/discussions
about: Raise questions about the installation, deployment, use and other aspects of the project.
- name: 对 MaxKB 项目有其他问题
url: https://bbs.fit2cloud.com/c/mk/11
about: 如果你对 MaxKB 有其他想要提问的,我们欢迎到我们的官方社区进行提问。

View File

@ -1,29 +1,36 @@
name: 'Feature Request'
description: 'Suggest an idea'
title: '[Feature] '
name: 需求建议
description: 提出针对本项目的想法和建议
title: "[FEATURE]"
labels: enhancement
assignees: baixin513
body:
- type: markdown
id: environment
attributes:
value: "## Environment Information"
value: "## 环境信息"
- type: input
id: version
validations:
required: true
attributes:
label: "MaxKB Version"
description: "Log in to the MaxKB Web Console and check the current version on the `About` page in the top right corner."
label: "MaxKB 版本"
description: "登录 MaxKB Web 控制台,在右上角关于页面查看当前版本。"
- type: markdown
id: details
attributes:
value: "## Detailed information"
value: "## 详细信息"
- type: textarea
id: description
attributes:
label: "Please describe your needs or suggestions for improvements"
label: "请描述您的需求或者改进建议"
validations:
required: true
- type: textarea
id: solution
attributes:
label: "Please describe the solution you suggest"
label: "请描述你建议的实现方案"
- type: textarea
id: additional-information
attributes:
label: "Additional Information"
description: "If you have any additional information to provide, you can include it here (screenshots, videos, etc., are welcome)."
label: "附加信息"
description: "如果你还有其他需要提供的信息,可以在这里填写(可以提供截图、视频等)。"

View File

@ -1,17 +0,0 @@
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
timezone: "Asia/Shanghai"
day: "friday"
target-branch: "v2"
groups:
python-dependencies:
patterns:
- "*"
# ignore:
# - dependency-name: "pymupdf"
# versions: ["*"]

View File

@ -14,7 +14,7 @@ on:
- linux/amd64,linux/arm64
jobs:
build-and-push-python-pg-to-ghcr:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- name: Check Disk Space
run: df -h
@ -39,7 +39,7 @@ jobs:
run: |
DOCKER_IMAGE=ghcr.io/1panel-dev/maxkb-python-pg
DOCKER_PLATFORMS=${{ github.event.inputs.architecture }}
TAG_NAME=python3.11-pg15.8
TAG_NAME=python3.11-pg15.6
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
echo ::set-output name=docker_image::${DOCKER_IMAGE}
echo ::set-output name=version::${TAG_NAME}
@ -50,9 +50,6 @@ jobs:
${DOCKER_IMAGE_TAGS} .
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
# Until https://github.com/tonistiigi/binfmt/issues/215
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry

View File

@ -19,7 +19,7 @@ on:
jobs:
build-and-push-vector-model-to-ghcr:
runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps:
- name: Check Disk Space
run: df -h
@ -55,9 +55,6 @@ jobs:
${DOCKER_IMAGE_TAGS} .
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
with:
# Until https://github.com/tonistiigi/binfmt/issues/215
image: tonistiigi/binfmt:qemu-v7.0.0-28
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry

View File

@ -1,19 +1,12 @@
name: build-and-push
run-name: 构建镜像并推送仓库 ${{ github.event.inputs.dockerImageTag }} (${{ github.event.inputs.registry }})
on:
workflow_dispatch:
inputs:
dockerImageTag:
description: 'Image Tag'
default: 'v1.10.7-dev'
description: 'Docker Image Tag'
default: 'v1.1.0-dev'
required: true
dockerImageTagWithLatest:
description: '是否发布latest tag正式发版时选择测试版本切勿选择'
default: false
required: true
type: boolean
architecture:
description: 'Architecture'
required: true
@ -26,11 +19,11 @@ on:
registry:
description: 'Push To Registry'
required: true
default: 'fit2cloud-registry'
default: 'dockerhub'
type: choice
options:
- fit2cloud-registry
- dockerhub
- fit2cloud-registry
- dockerhub, fit2cloud-registry
jobs:
@ -59,17 +52,16 @@ jobs:
- name: Prepare
id: prepare
run: |
DOCKER_IMAGE=${{ secrets.FIT2CLOUD_REGISTRY_HOST }}/maxkb/maxkb
DOCKER_IMAGE=registry-hkproxy.fit2cloud.com/maxkb/maxkb
DOCKER_PLATFORMS=${{ github.event.inputs.architecture }}
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
else
if [[ ${TAG_NAME} == *dev* ]]; then
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
else
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
fi
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
${DOCKER_IMAGE_TAGS} .
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -84,12 +76,11 @@ jobs:
- name: Login to FIT2CLOUD Registry
uses: docker/login-action@v3
with:
registry: ${{ secrets.FIT2CLOUD_REGISTRY_HOST }}
registry: registry-hkproxy.fit2cloud.com
username: ${{ secrets.FIT2CLOUD_REGISTRY_USERNAME }}
password: ${{ secrets.FIT2CLOUD_REGISTRY_PASSWORD }}
- name: Docker Buildx (build-and-push)
run: |
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile
build-and-push-to-dockerhub:
@ -119,15 +110,14 @@ jobs:
run: |
DOCKER_IMAGE=1panel/maxkb
DOCKER_PLATFORMS=${{ github.event.inputs.architecture }}
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
TAG_NAME_WITH_LATEST=${{ github.event.inputs.dockerImageTagWithLatest }}
if [[ ${TAG_NAME_WITH_LATEST} == 'true' ]]; then
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:${TAG_NAME%%.*}"
else
TAG_NAME=${{ github.event.inputs.dockerImageTag }}
if [[ ${TAG_NAME} == *dev* ]]; then
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME}"
else
DOCKER_IMAGE_TAGS="--tag ${DOCKER_IMAGE}:${TAG_NAME} --tag ${DOCKER_IMAGE}:latest"
fi
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} --memory-swap -1 \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=`git rev-parse --short HEAD` --no-cache \
echo ::set-output name=buildx_args::--platform ${DOCKER_PLATFORMS} \
--build-arg DOCKER_IMAGE_TAG=${{ github.event.inputs.dockerImageTag }} --build-arg BUILD_AT=$(TZ=Asia/Shanghai date +'%Y-%m-%dT%H:%M') --build-arg GITHUB_COMMIT=${GITHUB_SHA::8} --no-cache \
${DOCKER_IMAGE_TAGS} .
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
@ -146,5 +136,4 @@ jobs:
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Docker Buildx (build-and-push)
run: |
sudo sync && echo 3 | sudo tee /proc/sys/vm/drop_caches && free -m
docker buildx build --output "type=image,push=true" ${{ steps.prepare.outputs.buildx_args }} -f installer/Dockerfile

View File

@ -1,17 +0,0 @@
on:
push:
branches:
- 'pr@**'
- 'repr@**'
name: 针对特定分支名自动创建 PR
jobs:
generic_handler:
name: 自动创建 PR
runs-on: ubuntu-latest
steps:
- name: Create pull request
uses: jumpserver/action-generic-handler@master
env:
GITHUB_TOKEN: ${{ secrets.GH_TOKEN }}

View File

@ -1,14 +0,0 @@
name: Issue Translator
on:
issue_comment:
types: [created]
issues:
types: [opened]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: usthe/issues-translate-action@v2.7
with:
IS_MODIFY_TITLE: true
BOT_GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }}

View File

@ -1,28 +0,0 @@
name: LLM Code Review
permissions:
contents: read
pull-requests: write
on:
pull_request:
types: [opened, reopened, synchronize]
jobs:
llm-code-review:
runs-on: ubuntu-latest
steps:
- uses: fit2cloud/LLM-CodeReview-Action@main
env:
GITHUB_TOKEN: ${{ secrets.FIT2CLOUDRD_LLM_CODE_REVIEW_TOKEN }}
OPENAI_API_KEY: ${{ secrets.ALIYUN_LLM_API_KEY }}
LANGUAGE: English
OPENAI_API_ENDPOINT: https://dashscope.aliyuncs.com/compatible-mode/v1
MODEL: qwen2.5-coder-3b-instruct
PROMPT: "Please check the following code for any irregularities, potential issues, or optimization suggestions, and provide your answers in English."
top_p: 1
temperature: 1
# max_tokens: 10000
MAX_PATCH_LENGTH: 10000
IGNORE_PATTERNS: "/node_modules,*.md,/dist,/.github"
FILE_PATTERNS: "*.java,*.go,*.py,*.vue,*.ts,*.js,*.css,*.scss,*.html"

View File

@ -1,10 +1,5 @@
name: Typos Check
on:
push:
branches:
- main
pull_request:
types: [opened, synchronize, reopened]
on: [push, pull_request]
jobs:
run:

4
.gitignore vendored
View File

@ -178,10 +178,6 @@ ui/node_modules
ui/dist
apps/static
models/
apps/xpack
!apps/**/models/
data
.dev
poetry.lock
apps/setting/models_provider/impl/*/icon/
tmp/

View File

@ -1,4 +0,0 @@
[files]
extend-exclude = [
'apps/setting/models_provider/impl/*/icon/*'
]

View File

@ -1,128 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
We as members, contributors, and leaders pledge to make participation in our
community a harassment-free experience for everyone, regardless of age, body
size, visible or invisible disability, ethnicity, sex characteristics, gender
identity and expression, level of experience, education, socio-economic status,
nationality, personal appearance, race, religion, or sexual identity
and orientation.
We pledge to act and interact in ways that contribute to an open, welcoming,
diverse, inclusive, and healthy community.
## Our Standards
Examples of behavior that contributes to a positive environment for our
community include:
* Demonstrating empathy and kindness toward other people
* Being respectful of differing opinions, viewpoints, and experiences
* Giving and gracefully accepting constructive feedback
* Accepting responsibility and apologizing to those affected by our mistakes,
and learning from the experience
* Focusing on what is best not just for us as individuals, but for the
overall community
Examples of unacceptable behavior include:
* The use of sexualized language or imagery, and sexual attention or
advances of any kind
* Trolling, insulting or derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or email
address, without their explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Enforcement Responsibilities
Community leaders are responsible for clarifying and enforcing our standards of
acceptable behavior and will take appropriate and fair corrective action in
response to any behavior that they deem inappropriate, threatening, offensive,
or harmful.
Community leaders have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are
not aligned to this Code of Conduct, and will communicate reasons for moderation
decisions when appropriate.
## Scope
This Code of Conduct applies within all community spaces, and also applies when
an individual is officially representing the community in public spaces.
Examples of representing our community include using an official e-mail address,
posting via an official social media account, or acting as an appointed
representative at an online or offline event.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported to the community leaders responsible for enforcement at
support@fit2cloud.com.
All complaints will be reviewed and investigated promptly and fairly.
All community leaders are obligated to respect the privacy and security of the
reporter of any incident.
## Enforcement Guidelines
Community leaders will follow these Community Impact Guidelines in determining
the consequences for any action they deem in violation of this Code of Conduct:
### 1. Correction
**Community Impact**: Use of inappropriate language or other behavior deemed
unprofessional or unwelcome in the community.
**Consequence**: A private, written warning from community leaders, providing
clarity around the nature of the violation and an explanation of why the
behavior was inappropriate. A public apology may be requested.
### 2. Warning
**Community Impact**: A violation through a single incident or series
of actions.
**Consequence**: A warning with consequences for continued behavior. No
interaction with the people involved, including unsolicited interaction with
those enforcing the Code of Conduct, for a specified period of time. This
includes avoiding interactions in community spaces as well as external channels
like social media. Violating these terms may lead to a temporary or
permanent ban.
### 3. Temporary Ban
**Community Impact**: A serious violation of community standards, including
sustained inappropriate behavior.
**Consequence**: A temporary ban from any sort of interaction or public
communication with the community for a specified period of time. No public or
private interaction with the people involved, including unsolicited interaction
with those enforcing the Code of Conduct, is allowed during this period.
Violating these terms may lead to a permanent ban.
### 4. Permanent Ban
**Community Impact**: Demonstrating a pattern of violation of community
standards, including sustained inappropriate behavior, harassment of an
individual, or aggression toward or disparagement of classes of individuals.
**Consequence**: A permanent ban from any sort of public interaction within
the community.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 2.0, available at
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
Community Impact Guidelines were inspired by [Mozilla's code of conduct
enforcement ladder](https://github.com/mozilla/diversity).
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see the FAQ at
https://www.contributor-covenant.org/faq. Translations are available at
https://www.contributor-covenant.org/translations.

View File

@ -1,30 +0,0 @@
# Contributing
As a contributor, you should agree that:
- The producer can adjust the open-source agreement to be more strict or relaxed as deemed necessary.
- Your contributed code may be used for commercial purposes, including but not limited to its cloud business operations.
## Create pull request
PR are always welcome, even if they only contain small fixes like typos or a few lines of code. If there will be a significant effort, please document it as an issue and get a discussion going before starting to work on it.
Please submit a PR broken down into small changes bit by bit. A PR consisting of a lot of features and code changes may be hard to review. It is recommended to submit PRs in an incremental fashion.
This [development guideline](https://github.com/1Panel-dev/MaxKB/wiki/3-%E5%BC%80%E5%8F%91%E7%8E%AF%E5%A2%83%E6%90%AD%E5%BB%BA) contains information about repository structure, how to set up development environment, how to run it, and more.
Note: If you split your pull request to small changes, please make sure any of the changes goes to master will not break anything. Otherwise, it can not be merged until this feature complete.
## Report issues
It is a great way to contribute by reporting an issue. Well-written and complete bug reports are always welcome! Please open an issue and follow the template to fill in required information.
Before opening any issue, please look up the existing issues to avoid submitting a duplication.
If you find a match, you can "subscribe" to it to get notified on updates. If you have additional helpful information about the issue, please leave a comment.
When reporting issues, always include:
* Which version you are using.
* Steps to reproduce the issue.
* Snapshots or log files if needed
Because the issues are open to the public, when submitting files, be sure to remove any sensitive information, e.g. user name, password, IP address, and company name. You can
replace those parts with "REDACTED" or other strings like "****".

132
README.md
View File

@ -1,126 +1,76 @@
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
<h3 align="center">Open-source platform for building enterprise-grade agents</h3>
<h3 align="center">强大易用的企业级智能体平台</h3>
<p align="center"><a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a></p>
<h3 align="center">基于 LLM 大语言模型的知识库问答系统</h3>
<p align="center">
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
<a href="https://www.gnu.org/licenses/old-licenses/gpl-3.0"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
<a href="https://app.codacy.com/gh/1Panel-dev/maxkb?utm_source=github.com&utm_medium=referral&utm_content=1Panel-dev/maxkb&utm_campaign=Badge_Grade_Dashboard"><img src="https://app.codacy.com/project/badge/Grade/da67574fd82b473992781d1386b937ef" alt="Codacy"></a>
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?color=%231890FF&style=flat-square" alt="Stars"></a>
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a><br/>
[<a href="/README_CN.md">中文(简体)</a>] | [<a href="/README.md">English</a>]
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
</p>
<hr/>
MaxKB = Max Knowledge Brain, it is an open-source platform for building enterprise-grade agents. MaxKB integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
MaxKB 是一款基于 LLM 大语言模型的知识库问答系统。MaxKB = Max Knowledge Base旨在成为企业的最强大脑。
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
- **Multi Modal**: Native support for input and output text, image, audio and video.
- **开箱即用**:支持直接上传文档、自动爬取在线文档,支持文本自动拆分、向量化,智能问答交互体验好;
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统;
- **多模型支持**:支持对接主流的大模型,包括本地私有大模型(如 Llama 2、Llama 3、通义千问、OpenAI、Azure OpenAI、Kimi 和百度千帆大模型等。
## Quick start
## 快速开始
Execute the script below to start a MaxKB container using Docker:
```
docker run -d --name=maxkb -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data 1panel/maxkb
```bash
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages 1panel/maxkb
# 用户名: admin
# 密码: MaxKB@123..
```
Access MaxKB web interface at `http://your_server_ip:8080` with default admin credentials:
你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB + Ollama + Llama 230 分钟内即可上线基于本地大模型的知识库问答系统,并嵌入到第三方业务系统中。
- username: admin
- password: MaxKB@123..
你也可以在线体验:[DataEase 小助手](https://dataease.io/docs/v2/),它是基于 MaxKB 搭建的智能问答系统,已经嵌入到 DataEase 产品及在线文档中。
中国用户如遇到 Docker 镜像 Pull 失败问题,请参照该 [离线安装文档](https://maxkb.cn/docs/installation/offline_installtion/) 进行安装
如你有更多问题,可以查看使用手册,或者通过论坛与我们交流
## Screenshots
- [使用手册](https://github.com/1Panel-dev/MaxKB/wiki/1-%E5%AE%89%E8%A3%85%E9%83%A8%E7%BD%B2)
- [论坛求助](https://bbs.fit2cloud.com/c/mk/11)
- [演示视频](https://www.bilibili.com/video/BV1BE421M7YM/)
## UI 展示
<table style="border-collapse: collapse; border: 1px solid black;">
<tr>
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/overview.png" alt="MaxKB Demo1" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/screenshot-models.png" alt="MaxKB Demo2" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/80892890/2b893a25-ae46-48da-b6a1-61d23015565e" alt="MaxKB Demo1" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/80892890/3e50e7ff-cdc4-4a37-b430-d84975f11d4e" alt="MaxKB Demo2" /></td>
</tr>
<tr>
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/screenshot-knowledge.png" alt="MaxKB Demo3" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://maxkb.hk/images/screenshot-function.png" alt="MaxKB Demo4" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/80892890/dfdcc03f-ef36-4f75-bb82-797c0f9da1ad" alt="MaxKB Demo3" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/80892890/884a9db1-3f93-4013-bc8f-a3f0dbcfeb2f" alt="MaxKB Demo4" /></td>
</tr>
</table>
## Technical stack
## 技术栈
- Frontend[Vue.js](https://vuejs.org/)
- Backend[Python / Django](https://www.djangoproject.com/)
- LLM Framework[LangChain](https://www.langchain.com/)
- Database[PostgreSQL + pgvector](https://www.postgresql.org/)
## Feature Comparison
<table style="width: 100%;">
<tr>
<th align="center">Feature</th>
<th align="center">LangChain</th>
<th align="center">Dify.AI</th>
<th align="center">Flowise</th>
<th align="center">MaxKB <br>Built upon LangChain</th>
</tr>
<tr>
<td align="center">Supported LLMs</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
<td align="center">Rich Variety</td>
</tr>
<tr>
<td align="center">RAG Engine</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Agent</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Workflow</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">Observability</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
<tr>
<td align="center">SSO/Access control</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center">✅ (Pro)</td>
</tr>
<tr>
<td align="center">On-premise Deployment</td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
<td align="center"></td>
</tr>
</table>
- 前端:[Vue.js](https://cn.vuejs.org/)
- 后端:[Python / Django](https://www.djangoproject.com/)
- LangChain[LangChain](https://www.langchain.com/)
- 向量数据库:[PostgreSQL / pgvector](https://www.postgresql.org/)
- 大模型Azure OpenAI、OpenAI、百度千帆大模型、[Ollama](https://github.com/ollama/ollama)、通义千问、Kimi
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=1Panel-dev/MaxKB&type=Date)](https://star-history.com/#1Panel-dev/MaxKB&Date)
## 我们的其他开源产品
- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机
- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具
- [MeterSphere](https://github.com/metersphere/metersphere/) - 一站式开源自动化测试平台
- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板
- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具
## License
Copyright (c) 2014-2024 飞致云 FIT2CLOUD, All rights reserved.
Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
<https://www.gnu.org/licenses/gpl-3.0.html>

View File

@ -1,89 +0,0 @@
<p align="center"><img src= "https://github.com/1Panel-dev/maxkb/assets/52996290/c0694996-0eed-40d8-b369-322bf2a380bf" alt="MaxKB" width="300" /></p>
<h3 align="center">强大易用的企业级智能体平台</h3>
<p align="center">
<a href="https://trendshift.io/repositories/9113" target="_blank"><img src="https://trendshift.io/api/badge/repositories/9113" alt="1Panel-dev%2FMaxKB | Trendshift" style="width: 250px; height: auto;" /></a>
</p>
<p align="center">
<a href="README_EN.md"><img src="https://img.shields.io/badge/English_README-blue" alt="English README"></a>
<a href="https://www.gnu.org/licenses/gpl-3.0.html#license-text"><img src="https://img.shields.io/github/license/1Panel-dev/maxkb?color=%231890FF" alt="License: GPL v3"></a>
<a href="https://github.com/1Panel-dev/maxkb/releases/latest"><img src="https://img.shields.io/github/v/release/1Panel-dev/maxkb" alt="Latest release"></a>
<a href="https://github.com/1Panel-dev/maxkb"><img src="https://img.shields.io/github/stars/1Panel-dev/maxkb?style=flat-square" alt="Stars"></a>
<a href="https://hub.docker.com/r/1panel/maxkb"><img src="https://img.shields.io/docker/pulls/1panel/maxkb?label=downloads" alt="Download"></a>
<a href="https://gitee.com/fit2cloud-feizhiyun/MaxKB"><img src="https://gitee.com/fit2cloud-feizhiyun/MaxKB/badge/star.svg?theme=gvp" alt="Gitee Stars"></a>
<a href="https://gitcode.com/feizhiyun/MaxKB"><img src="https://gitcode.com/feizhiyun/MaxKB/star/badge.svg" alt="GitCode Stars"></a>
</p>
<hr/>
MaxKB = Max Knowledge Brain是一款强大易用的企业级智能体平台支持 RAG 检索增强生成、工作流编排、MCP 工具调用能力。MaxKB 支持对接各种主流大语言模型,广泛应用于智能客服、企业内部知识库问答、员工助手、学术研究与教育等场景。
- **RAG 检索增强生成**:高效搭建本地 AI 知识库,支持直接上传文档 / 自动爬取在线文档,支持文本自动拆分、向量化,有效减少大模型幻觉,提升问答效果;
- **灵活编排**:内置强大的工作流引擎、函数库和 MCP 工具调用能力,支持编排 AI 工作过程,满足复杂业务场景下的需求;
- **无缝嵌入**:支持零编码快速嵌入到第三方业务系统,让已有系统快速拥有智能问答能力,提高用户满意度;
- **模型中立**支持对接各种大模型包括本地私有大模型DeepSeek R1 / Llama 3 / Qwen 2 等)、国内公共大模型(通义千问 / 腾讯混元 / 字节豆包 / 百度千帆 / 智谱 AI / Kimi 等和国外公共大模型OpenAI / Claude / Gemini 等)。
MaxKB 三分钟视频介绍https://www.bilibili.com/video/BV18JypYeEkj/
## 快速开始
```
# Linux 机器
docker run -d --name=maxkb --restart=always -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data -v ~/.python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb
# Windows 机器
docker run -d --name=maxkb --restart=always -p 8080:8080 -v C:/maxkb:/var/lib/postgresql/data -v C:/python-packages:/opt/maxkb/app/sandbox/python-packages registry.fit2cloud.com/maxkb/maxkb
# 用户名: admin
# 密码: MaxKB@123..
```
- 你也可以通过 [1Panel 应用商店](https://apps.fit2cloud.com/1panel) 快速部署 MaxKB
- 如果是内网环境,推荐使用 [离线安装包](https://community.fit2cloud.com/#/products/maxkb/downloads) 进行安装部署;
- MaxKB 产品版本分为社区版和专业版,详情请参见:[MaxKB 产品版本对比](https://maxkb.cn/pricing.html)
- 如果您需要向团队介绍 MaxKB可以使用这个 [官方 PPT 材料](https://maxkb.cn/download/introduce-maxkb_202503.pdf)。
如你有更多问题,可以查看使用手册,或者通过论坛与我们交流。
- [案例展示](USE-CASES.md)
- [使用手册](https://maxkb.cn/docs/)
- [论坛求助](https://bbs.fit2cloud.com/c/mk/11)
- 技术交流群
<image height="150px" width="150px" src="https://github.com/1Panel-dev/MaxKB/assets/52996290/a083d214-02be-4178-a1db-4f428124153a"/>
## UI 展示
<table style="border-collapse: collapse; border: 1px solid black;">
<tr>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/d87395fa-a8d7-401c-82bf-c6e475d10ae9" alt="MaxKB Demo1" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/1Panel-dev/MaxKB/assets/52996290/47c35ee4-3a3b-4bd4-9f4f-ee20788b2b9a" alt="MaxKB Demo2" /></td>
</tr>
<tr>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/user-attachments/assets/9a1043cb-fa62-4f71-b9a3-0b46fa59a70e" alt="MaxKB Demo3" /></td>
<td style="padding: 5px;background-color:#fff;"><img src= "https://github.com/user-attachments/assets/3407ce9a-779c-4eb4-858e-9441a2ddc664" alt="MaxKB Demo4" /></td>
</tr>
</table>
## 技术栈
- 前端:[Vue.js](https://cn.vuejs.org/)
- 后端:[Python / Django](https://www.djangoproject.com/)
- LangChain[LangChain](https://www.langchain.com/)
- 向量数据库:[PostgreSQL / pgvector](https://www.postgresql.org/)
## 飞致云的其他明星项目
- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板
- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机
- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具
- [MeterSphere](https://github.com/metersphere/metersphere/) - 新一代的开源持续测试工具
- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具
## License
Copyright (c) 2014-2025 飞致云 FIT2CLOUD, All rights reserved.
Licensed under The GNU General Public License version 3 (GPLv3) (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
<https://www.gnu.org/licenses/gpl-3.0.html>
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.

View File

@ -1,17 +0,0 @@
# 安全说明
如果您发现安全问题,请直接联系我们:
- support@fit2cloud.com
- 400-052-0755
感谢您的支持!
# Security Policy
All security bugs should be reported to the contact as below:
- support@fit2cloud.com
- 400-052-0755
Thanks for your support!

View File

@ -1,39 +0,0 @@
<h3 align="center">MaxKB 应用案例,持续更新中...</h3>
------------------------------
- [MaxKB 应用案例:中国农业大学-小鹉哥](https://mp.weixin.qq.com/s/4g_gySMBQZCJ9OZ-yBkmvw)
- [MaxKB 应用案例:东北财经大学-小银杏](https://mp.weixin.qq.com/s/3BoxkY7EMomMmmvFYxvDIA)
- [MaxKB 应用案例:中铁水务](https://mp.weixin.qq.com/s/voNAddbK2CJOrJJs1ewZ8g)
- [MaxKB 应用案例:解放军总医院](https://mp.weixin.qq.com/s/ETrZC-vrA4Aap0eF-15EeQ)
- [MaxKB 应用案例:无锡市数据局](https://mp.weixin.qq.com/s/enfUFLevvL_La74PQ0kIXw)
- [MaxKB 应用案例:中核西仪研究院-西仪睿答](https://mp.weixin.qq.com/s/CbKr4mev8qahKLAtV6Dxdg)
- [MaxKB 应用案例:南京中医药大学](https://mp.weixin.qq.com/s/WUmAKYbZjp3272HIecpRFA)
- [MaxKB 应用案例:西北电力设计院-AI数字助理Memex](https://mp.weixin.qq.com/s/ezHFdB7C7AVL9MTtDwYGSA)
- [MaxKB 应用案例:西安国际医院中心医院-国医小助](https://mp.weixin.qq.com/s/DSOUvwrQrxbqQxKBilTCFQ)
- [MaxKB 应用案例华莱士智能AI客服助手上线啦](https://www.bilibili.com/video/BV1hQtVeXEBL)
- [MaxKB 应用案例:把医疗行业知识转化为知识库问答助手!](https://www.bilibili.com/video/BV157wme9EgB)
- [MaxKB 应用案例会展AI智能客服体验](https://www.bilibili.com/video/BV1J7BqY6EKA)
- [MaxKB 应用案例孩子要上幼儿园了AI 智能助手择校好帮手](https://www.bilibili.com/video/BV1wKrhYvEer)
- [MaxKB 应用案例产品使用指南AI助手新手小白也能轻松搞定](https://www.bilibili.com/video/BV1Yz6gYtEqX)
- [MaxKB 应用案例生物医药AI客服智能体验!](https://www.bilibili.com/video/BV13JzvYsE3e)
- [MaxKB 应用案例高校行政管理AI小助手](https://www.bilibili.com/video/BV1yvBMYvEdy)
- [MaxKB 应用案例:岳阳市人民医院-OA小助手](https://mp.weixin.qq.com/s/O94Qo3UH-MiUtDdWCVg8sQ)
- [MaxKB 应用案例:常熟市第一人民医院](https://mp.weixin.qq.com/s/s5XXGTR3_MUo41NbJ8WzZQ)
- [MaxKB 应用案例:华北水利水电大学](https://mp.weixin.qq.com/s/PoOFAcMCr9qJdvSj8c08qg)
- [MaxKB 应用案例:唐山海事局-“小海”AI语音助手](https://news.qq.com/rain/a/20250223A030BE00)
- [MaxKB 应用案例:湖南汉寿政务](http://hsds.hsdj.gov.cn:19999/ui/chat/a2c976736739aadc)
- [MaxKB 应用案例:广州市妇女儿童医疗中心-AI医疗数据分类分级小助手](https://mp.weixin.qq.com/s/YHUMkUOAaUomBV8bswpK3g)
- [MaxKB 应用案例:苏州热工研究院有限公司-维修大纲评估质量自查AI小助手](https://mp.weixin.qq.com/s/Ts5FQdnv7Tu9Jp7bvofCVA)
- [MaxKB 应用案例:国核自仪系统工程有限公司-NuCON AI帮](https://mp.weixin.qq.com/s/HNPc7u5xVfGLJr8IQz3vjQ)
- [MaxKB 应用案例深圳通开启Deep Seek智能应用新篇章](https://mp.weixin.qq.com/s/SILN0GSescH9LyeQqYP0VQ)
- [MaxKB 应用案例南通智慧出行领跑长三角首款接入DeepSeek的"畅行南通"APP上线AI新场景](https://mp.weixin.qq.com/s/WEC9UQ6msY0VS8LhTZh-Ew)
- [MaxKB 应用案例:中船动力人工智能"智慧动力云助手"及首批数字员工正式上线](https://mp.weixin.qq.com/s/OGcEkjh9DzGO1Tkc9nr7qg)
- [MaxKB 应用案例AI+矿山DeepSeek助力绿色智慧矿山智慧“升级”](https://mp.weixin.qq.com/s/SZstxTvVoLZg0ECbZbfpIA)
- [MaxKB 应用案例DeepSeek落地弘盛铜业国产大模型点亮"黑灯工厂"新引擎](https://mp.weixin.qq.com/s/Eczdx574MS5RMF7WfHN7_A)
- [MaxKB 应用案例:拥抱智能时代!中国五矿以 “AI+”赋能企业发展](https://mp.weixin.qq.com/s/D5vBtlX2E81pWE3_2OgWSw)
- [MaxKB 应用案例DeepSeek赋能中冶武勘AI智能体](https://mp.weixin.qq.com/s/8m0vxGcWXNdZazziQrLyxg)
- [MaxKB 应用案例重磅陕西广电网络“秦岭云”平台实现DeepSeek本地化部署](https://mp.weixin.qq.com/s/ZKmEU_wWShK1YDomKJHQeA)
- [MaxKB 应用案例粤海集团完成DeepSeek私有化部署助力集团智能化管理](https://mp.weixin.qq.com/s/2JbVp0-kr9Hfp-0whH4cvg)
- [MaxKB 应用案例建筑材料工业信息中心完成DeepSeek本地化部署推动行业数智化转型新发展](https://mp.weixin.qq.com/s/HThGSnND3qDF8ySEqiM4jw)
- [MaxKB 应用案例一起DeepSeek福建设计以AI大模型开启新篇章](https://mp.weixin.qq.com/s/m67e-H7iQBg3d24NM82UjA)

View File

@ -18,8 +18,7 @@ from dataset.models import Paragraph
class ParagraphPipelineModel:
def __init__(self, _id: str, document_id: str, dataset_id: str, content: str, title: str, status: str,
is_active: bool, comprehensive_score: float, similarity: float, dataset_name: str, document_name: str,
hit_handling_method: str, directly_return_similarity: float, meta: dict = None):
is_active: bool, comprehensive_score: float, similarity: float, dataset_name: str, document_name: str):
self.id = _id
self.document_id = document_id
self.dataset_id = dataset_id
@ -31,9 +30,6 @@ class ParagraphPipelineModel:
self.similarity = similarity
self.dataset_name = dataset_name
self.document_name = document_name
self.hit_handling_method = hit_handling_method
self.directly_return_similarity = directly_return_similarity
self.meta = meta
def to_dict(self):
return {
@ -47,8 +43,7 @@ class ParagraphPipelineModel:
'comprehensive_score': self.comprehensive_score,
'similarity': self.similarity,
'dataset_name': self.dataset_name,
'document_name': self.document_name,
'meta': self.meta,
'document_name': self.document_name
}
class builder:
@ -58,9 +53,6 @@ class ParagraphPipelineModel:
self.comprehensive_score = None
self.document_name = None
self.dataset_name = None
self.hit_handling_method = None
self.directly_return_similarity = 0.9
self.meta = {}
def add_paragraph(self, paragraph):
if isinstance(paragraph, Paragraph):
@ -84,14 +76,6 @@ class ParagraphPipelineModel:
self.document_name = document_name
return self
def add_hit_handling_method(self, hit_handling_method):
self.hit_handling_method = hit_handling_method
return self
def add_directly_return_similarity(self, directly_return_similarity):
self.directly_return_similarity = directly_return_similarity
return self
def add_comprehensive_score(self, comprehensive_score: float):
self.comprehensive_score = comprehensive_score
return self
@ -100,10 +84,6 @@ class ParagraphPipelineModel:
self.similarity = similarity
return self
def add_meta(self, meta: dict):
self.meta = meta
return self
def build(self):
return ParagraphPipelineModel(str(self.paragraph.get('id')), str(self.paragraph.get('document_id')),
str(self.paragraph.get('dataset_id')),
@ -111,8 +91,7 @@ class ParagraphPipelineModel:
self.paragraph.get('status'),
self.paragraph.get('is_active'),
self.comprehensive_score, self.similarity, self.dataset_name,
self.document_name, self.hit_handling_method, self.directly_return_similarity,
self.meta)
self.document_name)
class IBaseChatPipelineStep:

View File

@ -11,18 +11,14 @@ from functools import reduce
from typing import List, Type, Dict
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep
from common.handle.base_to_response import BaseToResponse
from common.handle.impl.response.system_to_response import SystemToResponse
class PipelineManage:
def __init__(self, step_list: List[Type[IBaseChatPipelineStep]],
base_to_response: BaseToResponse = SystemToResponse()):
def __init__(self, step_list: List[Type[IBaseChatPipelineStep]]):
# 步骤执行器
self.step_list = [step() for step in step_list]
# 上下文
self.context = {'message_tokens': 0, 'answer_tokens': 0}
self.base_to_response = base_to_response
def run(self, context: Dict = None):
self.context['start_time'] = time.time()
@ -37,21 +33,13 @@ class PipelineManage:
filter(lambda r: r is not None,
[row.get_details(self) for row in self.step_list])], {})
def get_base_to_response(self):
return self.base_to_response
class builder:
def __init__(self):
self.step_list: List[Type[IBaseChatPipelineStep]] = []
self.base_to_response = SystemToResponse()
def append_step(self, step: Type[IBaseChatPipelineStep]):
self.step_list.append(step)
return self
def add_base_to_response(self, base_to_response: BaseToResponse):
self.base_to_response = base_to_response
return self
def build(self):
return PipelineManage(step_list=self.step_list, base_to_response=self.base_to_response)
return PipelineManage(step_list=self.step_list)

View File

@ -9,22 +9,21 @@
from abc import abstractmethod
from typing import Type, List
from django.utils.translation import gettext_lazy as _
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseMessage
from rest_framework import serializers
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep, ParagraphPipelineModel
from application.chat_pipeline.pipeline_manage import PipelineManage
from application.serializers.application_serializers import NoReferencesSetting
from common.field.common import InstanceField
from common.util.field_message import ErrMessage
from dataset.models import Paragraph
class ModelField(serializers.Field):
def to_internal_value(self, data):
if not isinstance(data, BaseChatModel):
self.fail(_('Model type error'), value=data)
self.fail('模型类型错误', value=data)
return data
def to_representation(self, value):
@ -34,7 +33,7 @@ class ModelField(serializers.Field):
class MessageField(serializers.Field):
def to_internal_value(self, data):
if not isinstance(data, BaseMessage):
self.fail(_('Message type error'), value=data)
self.fail('message类型错误', value=data)
return data
def to_representation(self, value):
@ -53,42 +52,31 @@ class IChatStep(IBaseChatPipelineStep):
class InstanceSerializer(serializers.Serializer):
# 对话列表
message_list = serializers.ListField(required=True, child=MessageField(required=True),
error_messages=ErrMessage.list(_("Conversation list")))
model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Model id")))
error_messages=ErrMessage.list("对话列表"))
# 大语言模型
chat_model = ModelField(error_messages=ErrMessage.list("大语言模型"))
# 段落列表
paragraph_list = serializers.ListField(error_messages=ErrMessage.list(_("Paragraph List")))
paragraph_list = serializers.ListField(error_messages=ErrMessage.list("段落列表"))
# 对话id
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("Conversation ID")))
chat_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("对话id"))
# 用户问题
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.uuid(_("User Questions")))
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.uuid("用户问题"))
# 后置处理器
post_response_handler = InstanceField(model_type=PostResponseHandler,
error_messages=ErrMessage.base(_("Post-processor")))
error_messages=ErrMessage.base("用户问题"))
# 补全问题
padding_problem_text = serializers.CharField(required=False,
error_messages=ErrMessage.base(_("Completion Question")))
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.base("补全问题"))
# 是否使用流的形式输出
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base(_("Streaming Output")))
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client id")))
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Client Type")))
# 未查询到引用分段
no_references_setting = NoReferencesSetting(required=True,
error_messages=ErrMessage.base(_("No reference segment settings")))
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
model_setting = serializers.DictField(required=True, allow_null=True,
error_messages=ErrMessage.dict(_("Model settings")))
model_params_setting = serializers.DictField(required=False, allow_null=True,
error_messages=ErrMessage.dict(_("Model parameter settings")))
stream = serializers.BooleanField(required=False, error_messages=ErrMessage.base("流式输出"))
client_id = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端id"))
client_type = serializers.CharField(required=True, error_messages=ErrMessage.char("客户端类型"))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
message_list: List = self.initial_data.get('message_list')
for message in message_list:
if not isinstance(message, BaseMessage):
raise Exception(_("message type error"))
raise Exception("message 类型错误")
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
return self.InstanceSerializer
@ -101,10 +89,8 @@ class IChatStep(IBaseChatPipelineStep):
def execute(self, message_list: List[BaseMessage],
chat_id, problem_text,
post_response_handler: PostResponseHandler,
model_id: str = None,
user_id: str = None,
chat_model: BaseChatModel = None,
paragraph_list=None,
manage: PipelineManage = None,
padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None,
no_references_setting=None, model_params_setting=None, model_setting=None, **kwargs):
padding_problem_text: str = None, stream: bool = True, client_id=None, client_type=None, **kwargs):
pass

View File

@ -6,6 +6,7 @@
@date2024/1/9 18:25
@desc: 对话step Base实现
"""
import json
import logging
import time
import traceback
@ -14,44 +15,27 @@ from typing import List
from django.db.models import QuerySet
from django.http import StreamingHttpResponse
from django.utils.translation import gettext as _
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseMessage
from langchain.schema.messages import HumanMessage, AIMessage
from langchain_core.messages import AIMessageChunk
from rest_framework import status
from langchain.schema.messages import BaseMessageChunk, HumanMessage, AIMessage
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
from application.chat_pipeline.pipeline_manage import PipelineManage
from application.chat_pipeline.step.chat_step.i_chat_step import IChatStep, PostResponseHandler
from application.flow.tools import Reasoning
from application.models.api_key_model import ApplicationPublicAccessClient
from common.constants.authentication_type import AuthenticationType
from setting.models_provider.tools import get_model_instance_by_model_user_id
from common.response import result
def add_access_num(client_id=None, client_type=None, application_id=None):
if client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value and application_id is not None:
application_public_access_client = (QuerySet(ApplicationPublicAccessClient).filter(client_id=client_id,
application_id=application_id)
.first())
def add_access_num(client_id=None, client_type=None):
if client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value:
application_public_access_client = QuerySet(ApplicationPublicAccessClient).filter(id=client_id).first()
if application_public_access_client is not None:
application_public_access_client.access_num = application_public_access_client.access_num + 1
application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1
application_public_access_client.save()
def write_context(step, manage, request_token, response_token, all_text):
step.context['message_tokens'] = request_token
step.context['answer_tokens'] = response_token
current_time = time.time()
step.context['answer_text'] = all_text
step.context['run_time'] = current_time - step.context['start_time']
manage.context['run_time'] = current_time - manage.context['start_time']
manage.context['message_tokens'] = manage.context['message_tokens'] + request_token
manage.context['answer_tokens'] = manage.context['answer_tokens'] + response_token
def event_content(response,
chat_id,
chat_record_id,
@ -63,95 +47,34 @@ def event_content(response,
message_list: List[BaseMessage],
problem_text: str,
padding_problem_text: str = None,
client_id=None, client_type=None,
is_ai_chat: bool = None,
model_setting=None):
if model_setting is None:
model_setting = {}
reasoning_content_enable = model_setting.get('reasoning_content_enable', False)
reasoning_content_start = model_setting.get('reasoning_content_start', '<think>')
reasoning_content_end = model_setting.get('reasoning_content_end', '</think>')
reasoning = Reasoning(reasoning_content_start,
reasoning_content_end)
client_id=None, client_type=None):
all_text = ''
reasoning_content = ''
try:
response_reasoning_content = False
for chunk in response:
reasoning_chunk = reasoning.get_reasoning_content(chunk)
content_chunk = reasoning_chunk.get('content')
if 'reasoning_content' in chunk.additional_kwargs:
response_reasoning_content = True
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
else:
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
all_text += content_chunk
if reasoning_content_chunk is None:
reasoning_content_chunk = ''
reasoning_content += reasoning_content_chunk
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
[], content_chunk,
False,
0, 0, {'node_is_end': False,
'view_type': 'many_view',
'node_type': 'ai-chat-node',
'real_node_id': 'ai-chat-node',
'reasoning_content': reasoning_content_chunk if reasoning_content_enable else ''})
reasoning_chunk = reasoning.get_end_reasoning_content()
all_text += reasoning_chunk.get('content')
reasoning_content_chunk = ""
if not response_reasoning_content:
reasoning_content_chunk = reasoning_chunk.get(
'reasoning_content')
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
[], reasoning_chunk.get('content'),
False,
0, 0, {'node_is_end': False,
'view_type': 'many_view',
'node_type': 'ai-chat-node',
'real_node_id': 'ai-chat-node',
'reasoning_content'
: reasoning_content_chunk if reasoning_content_enable else ''})
all_text += chunk.content
yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
'content': chunk.content, 'is_end': False}) + "\n\n"
# 获取token
if is_ai_chat:
try:
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(all_text)
except Exception as e:
request_token = 0
response_token = 0
else:
request_token = 0
response_token = 0
write_context(step, manage, request_token, response_token, all_text)
asker = manage.context.get('form_data', {}).get('asker', None)
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(all_text)
step.context['message_tokens'] = request_token
step.context['answer_tokens'] = response_token
current_time = time.time()
step.context['answer_text'] = all_text
step.context['run_time'] = current_time - step.context['start_time']
manage.context['run_time'] = current_time - manage.context['start_time']
manage.context['message_tokens'] = manage.context['message_tokens'] + request_token
manage.context['answer_tokens'] = manage.context['answer_tokens'] + response_token
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
all_text, manage, step, padding_problem_text, client_id,
reasoning_content=reasoning_content if reasoning_content_enable else ''
, asker=asker)
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
[], '', True,
request_token, response_token,
{'node_is_end': True, 'view_type': 'many_view',
'node_type': 'ai-chat-node'})
add_access_num(client_id, client_type, manage.context.get('application_id'))
all_text, manage, step, padding_problem_text, client_id)
yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
'content': '', 'is_end': True}) + "\n\n"
add_access_num(client_id, client_type)
except Exception as e:
logging.getLogger("max_kb_error").error(f'{str(e)}:{traceback.format_exc()}')
all_text = 'Exception:' + str(e)
write_context(step, manage, 0, 0, all_text)
asker = manage.context.get('form_data', {}).get('asker', None)
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
all_text, manage, step, padding_problem_text, client_id, reasoning_content='',
asker=asker)
add_access_num(client_id, client_type, manage.context.get('application_id'))
yield manage.get_base_to_response().to_stream_chunk_response(chat_id, str(chat_record_id), 'ai-chat-node',
[], all_text,
False,
0, 0, {'node_is_end': False,
'view_type': 'many_view',
'node_type': 'ai-chat-node',
'real_node_id': 'ai-chat-node',
'reasoning_content': ''})
yield 'data: ' + json.dumps({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
'content': '异常' + str(e), 'is_end': True}) + "\n\n"
class BaseChatStep(IChatStep):
@ -159,29 +82,21 @@ class BaseChatStep(IChatStep):
chat_id,
problem_text,
post_response_handler: PostResponseHandler,
model_id: str = None,
user_id: str = None,
chat_model: BaseChatModel = None,
paragraph_list=None,
manage: PipelineManage = None,
padding_problem_text: str = None,
stream: bool = True,
client_id=None, client_type=None,
no_references_setting=None,
model_params_setting=None,
model_setting=None,
**kwargs):
chat_model = get_model_instance_by_model_user_id(model_id, user_id,
**model_params_setting) if model_id is not None else None
if stream:
return self.execute_stream(message_list, chat_id, problem_text, post_response_handler, chat_model,
paragraph_list,
manage, padding_problem_text, client_id, client_type, no_references_setting,
model_setting)
manage, padding_problem_text, client_id, client_type)
else:
return self.execute_block(message_list, chat_id, problem_text, post_response_handler, chat_model,
paragraph_list,
manage, padding_problem_text, client_id, client_type, no_references_setting,
model_setting)
manage, padding_problem_text, client_id, client_type)
def get_details(self, manage, **kwargs):
return {
@ -204,29 +119,6 @@ class BaseChatStep(IChatStep):
result.append({'role': 'ai', 'content': answer_text})
return result
@staticmethod
def get_stream_result(message_list: List[BaseMessage],
chat_model: BaseChatModel = None,
paragraph_list=None,
no_references_setting=None,
problem_text=None):
if paragraph_list is None:
paragraph_list = []
directly_return_chunk_list = [AIMessageChunk(content=paragraph.content)
for paragraph in paragraph_list if (
paragraph.hit_handling_method == 'directly_return' and paragraph.similarity >= paragraph.directly_return_similarity)]
if directly_return_chunk_list is not None and len(directly_return_chunk_list) > 0:
return iter(directly_return_chunk_list), False
elif len(paragraph_list) == 0 and no_references_setting.get(
'status') == 'designated_answer':
return iter(
[AIMessageChunk(content=no_references_setting.get('value').replace('{question}', problem_text))]), False
if chat_model is None:
return iter([AIMessageChunk(
_('Sorry, the AI model is not configured. Please go to the application to set up the AI model first.'))]), False
else:
return chat_model.stream(message_list), True
def execute_stream(self, message_list: List[BaseMessage],
chat_id,
problem_text,
@ -235,43 +127,24 @@ class BaseChatStep(IChatStep):
paragraph_list=None,
manage: PipelineManage = None,
padding_problem_text: str = None,
client_id=None, client_type=None,
no_references_setting=None,
model_setting=None):
chat_result, is_ai_chat = self.get_stream_result(message_list, chat_model, paragraph_list,
no_references_setting, problem_text)
client_id=None, client_type=None):
# 调用模型
if chat_model is None:
chat_result = iter(
[BaseMessageChunk(content=paragraph.title + "\n" + paragraph.content) for paragraph in paragraph_list])
else:
chat_result = chat_model.stream(message_list)
chat_record_id = uuid.uuid1()
r = StreamingHttpResponse(
streaming_content=event_content(chat_result, chat_id, chat_record_id, paragraph_list,
post_response_handler, manage, self, chat_model, message_list, problem_text,
padding_problem_text, client_id, client_type, is_ai_chat, model_setting),
padding_problem_text, client_id, client_type),
content_type='text/event-stream;charset=utf-8')
r['Cache-Control'] = 'no-cache'
return r
@staticmethod
def get_block_result(message_list: List[BaseMessage],
chat_model: BaseChatModel = None,
paragraph_list=None,
no_references_setting=None,
problem_text=None):
if paragraph_list is None:
paragraph_list = []
directly_return_chunk_list = [AIMessageChunk(content=paragraph.content)
for paragraph in paragraph_list if (
paragraph.hit_handling_method == 'directly_return' and paragraph.similarity >= paragraph.directly_return_similarity)]
if directly_return_chunk_list is not None and len(directly_return_chunk_list) > 0:
return directly_return_chunk_list[0], False
elif len(paragraph_list) == 0 and no_references_setting.get(
'status') == 'designated_answer':
return AIMessage(no_references_setting.get('value').replace('{question}', problem_text)), False
if chat_model is None:
return AIMessage(
_('Sorry, the AI model is not configured. Please go to the application to set up the AI model first.')), False
else:
return chat_model.invoke(message_list), True
def execute_block(self, message_list: List[BaseMessage],
chat_id,
problem_text,
@ -280,55 +153,26 @@ class BaseChatStep(IChatStep):
paragraph_list=None,
manage: PipelineManage = None,
padding_problem_text: str = None,
client_id=None, client_type=None, no_references_setting=None,
model_setting=None):
reasoning_content_enable = model_setting.get('reasoning_content_enable', False)
reasoning_content_start = model_setting.get('reasoning_content_start', '<think>')
reasoning_content_end = model_setting.get('reasoning_content_end', '</think>')
reasoning = Reasoning(reasoning_content_start,
reasoning_content_end)
chat_record_id = uuid.uuid1()
client_id=None, client_type=None):
# 调用模型
try:
chat_result, is_ai_chat = self.get_block_result(message_list, chat_model, paragraph_list,
no_references_setting, problem_text)
if is_ai_chat:
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(chat_result.content)
else:
request_token = 0
response_token = 0
write_context(self, manage, request_token, response_token, chat_result.content)
reasoning_result = reasoning.get_reasoning_content(chat_result)
reasoning_result_end = reasoning.get_end_reasoning_content()
content = reasoning_result.get('content') + reasoning_result_end.get('content')
if 'reasoning_content' in chat_result.response_metadata:
reasoning_content = chat_result.response_metadata.get('reasoning_content', '')
else:
reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get(
'reasoning_content')
asker = manage.context.get('form_data', {}).get('asker', None)
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
content, manage, self, padding_problem_text, client_id,
reasoning_content=reasoning_content if reasoning_content_enable else '',
asker=asker)
add_access_num(client_id, client_type, manage.context.get('application_id'))
return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id),
content, True,
request_token, response_token,
{
'reasoning_content': reasoning_content if reasoning_content_enable else '',
'answer_list': [{
'content': content,
'reasoning_content': reasoning_content if reasoning_content_enable else ''
}]})
except Exception as e:
all_text = 'Exception:' + str(e)
write_context(self, manage, 0, 0, all_text)
asker = manage.context.get('form_data', {}).get('asker', None)
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
all_text, manage, self, padding_problem_text, client_id, reasoning_content='',
asker=asker)
add_access_num(client_id, client_type, manage.context.get('application_id'))
return manage.get_base_to_response().to_block_response(str(chat_id), str(chat_record_id), all_text, True, 0,
0, _status=status.HTTP_500_INTERNAL_SERVER_ERROR)
if chat_model is None:
chat_result = AIMessage(
content="\n\n".join([paragraph.title + "\n" + paragraph.content for paragraph in paragraph_list]))
else:
chat_result = chat_model.invoke(message_list)
chat_record_id = uuid.uuid1()
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(chat_result.content)
self.context['message_tokens'] = request_token
self.context['answer_tokens'] = response_token
current_time = time.time()
self.context['answer_text'] = chat_result.content
self.context['run_time'] = current_time - self.context['start_time']
manage.context['run_time'] = current_time - manage.context['start_time']
manage.context['message_tokens'] = manage.context['message_tokens'] + request_token
manage.context['answer_tokens'] = manage.context['answer_tokens'] + response_token
post_response_handler.handler(chat_id, chat_record_id, paragraph_list, problem_text,
chat_result.content, manage, self, padding_problem_text, client_id)
add_access_num(client_id, client_type)
return result.success({'chat_id': str(chat_id), 'id': str(chat_record_id), 'operate': True,
'content': chat_result.content, 'is_end': True})

View File

@ -9,41 +9,36 @@
from abc import abstractmethod
from typing import Type, List
from django.utils.translation import gettext_lazy as _
from langchain.schema import BaseMessage
from rest_framework import serializers
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep, ParagraphPipelineModel
from application.chat_pipeline.pipeline_manage import PipelineManage
from application.models import ChatRecord
from application.serializers.application_serializers import NoReferencesSetting
from common.field.common import InstanceField
from common.util.field_message import ErrMessage
from dataset.models import Paragraph
class IGenerateHumanMessageStep(IBaseChatPipelineStep):
class InstanceSerializer(serializers.Serializer):
# 问题
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("question")))
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char("问题"))
# 段落列表
paragraph_list = serializers.ListField(child=InstanceField(model_type=ParagraphPipelineModel, required=True),
error_messages=ErrMessage.list(_("Paragraph List")))
error_messages=ErrMessage.list("段落列表"))
# 历史对答
history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True),
error_messages=ErrMessage.list(_("History Questions")))
error_messages=ErrMessage.list("历史对答"))
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer("多轮对话数量"))
# 最大携带知识库段落长度
max_paragraph_char_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(
_("Maximum length of the knowledge base paragraph")))
"最大携带知识库段落长度"))
# 模板
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
system = serializers.CharField(required=False, allow_null=True, allow_blank=True,
error_messages=ErrMessage.char(_("System prompt words (role)")))
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char("提示词"))
# 补齐问题
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Completion problem")))
# 未查询到引用分段
no_references_setting = NoReferencesSetting(required=True, error_messages=ErrMessage.base(_("No reference segment settings")))
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("补齐问题"))
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
return self.InstanceSerializer
@ -61,8 +56,6 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
max_paragraph_char_number: int,
prompt: str,
padding_problem_text: str = None,
no_references_setting=None,
system=None,
**kwargs) -> List[BaseMessage]:
"""
@ -74,8 +67,6 @@ class IGenerateHumanMessageStep(IBaseChatPipelineStep):
:param prompt: 模板
:param padding_problem_text 用户修改文本
:param kwargs: 其他参数
:param no_references_setting: 无引用分段设置
:param system 系统提示称
:return:
"""
pass

View File

@ -6,10 +6,9 @@
@date2024/1/10 17:50
@desc:
"""
from typing import List, Dict
from typing import List
from langchain.schema import BaseMessage, HumanMessage
from langchain_core.messages import SystemMessage
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
from application.chat_pipeline.step.generate_human_message_step.i_generate_human_message_step import \
@ -27,37 +26,22 @@ class BaseGenerateHumanMessageStep(IGenerateHumanMessageStep):
max_paragraph_char_number: int,
prompt: str,
padding_problem_text: str = None,
no_references_setting=None,
system=None,
**kwargs) -> List[BaseMessage]:
prompt = prompt if (paragraph_list is not None and len(paragraph_list) > 0) else no_references_setting.get(
'value')
exec_problem_text = padding_problem_text if padding_problem_text is not None else problem_text
start_index = len(history_chat_record) - dialogue_number
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))]
if system is not None and len(system) > 0:
return [SystemMessage(system), *flat_map(history_message),
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list,
no_references_setting)]
return [*flat_map(history_message),
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list,
no_references_setting)]
self.to_human_message(prompt, exec_problem_text, max_paragraph_char_number, paragraph_list)]
@staticmethod
def to_human_message(prompt: str,
problem: str,
max_paragraph_char_number: int,
paragraph_list: List[ParagraphPipelineModel],
no_references_setting: Dict):
paragraph_list: List[ParagraphPipelineModel]):
if paragraph_list is None or len(paragraph_list) == 0:
if no_references_setting.get('status') == 'ai_questioning':
return HumanMessage(
content=no_references_setting.get('value').replace('{question}', problem))
else:
return HumanMessage(content=prompt.replace('{data}', "").replace('{question}', problem))
return HumanMessage(content=prompt.format(**{'data': "<data></data>", 'question': problem}))
temp_data = ""
data_list = []
for p in paragraph_list:
@ -70,4 +54,4 @@ class BaseGenerateHumanMessageStep(IGenerateHumanMessageStep):
else:
data_list.append(f"<data>{content}</data>")
data = "\n".join(data_list)
return HumanMessage(content=prompt.replace('{data}', data).replace('{question}', problem))
return HumanMessage(content=prompt.format(**{'data': data, 'question': problem}))

View File

@ -9,11 +9,12 @@
from abc import abstractmethod
from typing import Type, List
from django.utils.translation import gettext_lazy as _
from langchain.chat_models.base import BaseChatModel
from rest_framework import serializers
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep
from application.chat_pipeline.pipeline_manage import PipelineManage
from application.chat_pipeline.step.chat_step.i_chat_step import ModelField
from application.models import ChatRecord
from common.field.common import InstanceField
from common.util.field_message import ErrMessage
@ -22,16 +23,12 @@ from common.util.field_message import ErrMessage
class IResetProblemStep(IBaseChatPipelineStep):
class InstanceSerializer(serializers.Serializer):
# 问题文本
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.float(_("question")))
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.float("问题文本"))
# 历史对答
history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True),
error_messages=ErrMessage.list(_("History Questions")))
error_messages=ErrMessage.list("历史对答"))
# 大语言模型
model_id = serializers.UUIDField(required=False, allow_null=True, error_messages=ErrMessage.uuid(_("Model id")))
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
problem_optimization_prompt = serializers.CharField(required=False, max_length=102400,
error_messages=ErrMessage.char(
_("Question completion prompt")))
chat_model = ModelField(error_messages=ErrMessage.base("大语言模型"))
def get_step_serializer(self, manage: PipelineManage) -> Type[serializers.Serializer]:
return self.InstanceSerializer
@ -45,13 +42,10 @@ class IResetProblemStep(IBaseChatPipelineStep):
manage.context['problem_text'] = source_problem_text
manage.context['padding_problem_text'] = padding_problem
# 累加tokens
manage.context['message_tokens'] = manage.context.get('message_tokens', 0) + self.context.get('message_tokens',
0)
manage.context['answer_tokens'] = manage.context.get('answer_tokens', 0) + self.context.get('answer_tokens', 0)
manage.context['message_tokens'] = manage.context['message_tokens'] + self.context.get('message_tokens')
manage.context['answer_tokens'] = manage.context['answer_tokens'] + self.context.get('answer_tokens')
@abstractmethod
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None,
problem_optimization_prompt=None,
user_id=None,
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None,
**kwargs):
pass

View File

@ -8,33 +8,26 @@
"""
from typing import List
from django.utils.translation import gettext as _
from langchain.chat_models.base import BaseChatModel
from langchain.schema import HumanMessage
from application.chat_pipeline.step.reset_problem_step.i_reset_problem_step import IResetProblemStep
from application.models import ChatRecord
from common.util.split_model import flat_map
from setting.models_provider.tools import get_model_instance_by_model_user_id
prompt = _(
"() contains the user's question. Answer the guessed user's question based on the context ({question}) Requirement: Output a complete question and put it in the <data></data> tag")
prompt = (
'()里面是用户问题,根据上下文回答揣测用户问题({question}) 要求: 输出一个补全问题,并且放在<data></data>标签中')
class BaseResetProblemStep(IResetProblemStep):
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None,
problem_optimization_prompt=None,
user_id=None,
def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, chat_model: BaseChatModel = None,
**kwargs) -> str:
chat_model = get_model_instance_by_model_user_id(model_id, user_id) if model_id is not None else None
if chat_model is None:
return problem_text
start_index = len(history_chat_record) - 3
history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))]
reset_prompt = problem_optimization_prompt if problem_optimization_prompt else prompt
message_list = [*flat_map(history_message),
HumanMessage(content=reset_prompt.replace('{question}', problem_text))]
HumanMessage(content=prompt.format(**{'question': problem_text}))]
response = chat_model.invoke(message_list)
padding_problem = problem_text
if response.content.__contains__("<data>") and response.content.__contains__('</data>'):
@ -42,17 +35,8 @@ class BaseResetProblemStep(IResetProblemStep):
response.content.index('<data>') + 6:response.content.index('</data>')]
if padding_problem_data is not None and len(padding_problem_data.strip()) > 0:
padding_problem = padding_problem_data
elif len(response.content) > 0:
padding_problem = response.content
try:
request_token = chat_model.get_num_tokens_from_messages(message_list)
response_token = chat_model.get_num_tokens(padding_problem)
except Exception as e:
request_token = 0
response_token = 0
self.context['message_tokens'] = request_token
self.context['answer_tokens'] = response_token
self.context['message_tokens'] = chat_model.get_num_tokens_from_messages(message_list)
self.context['answer_tokens'] = chat_model.get_num_tokens(padding_problem)
return padding_problem
def get_details(self, manage, **kwargs):
@ -60,8 +44,8 @@ class BaseResetProblemStep(IResetProblemStep):
'step_type': 'problem_padding',
'run_time': self.context['run_time'],
'model_id': str(manage.context['model_id']) if 'model_id' in manage.context else None,
'message_tokens': self.context.get('message_tokens', 0),
'answer_tokens': self.context.get('answer_tokens', 0),
'message_tokens': self.context['message_tokens'],
'answer_tokens': self.context['answer_tokens'],
'cost': 0,
'padding_problem_text': self.context.get('padding_problem_text'),
'problem_text': self.context.get("step_args").get('problem_text'),

View File

@ -6,46 +6,38 @@
@date2024/1/9 18:10
@desc: 检索知识库
"""
import re
from abc import abstractmethod
from typing import List, Type
from django.core import validators
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from application.chat_pipeline.I_base_chat_pipeline import IBaseChatPipelineStep, ParagraphPipelineModel
from application.chat_pipeline.pipeline_manage import PipelineManage
from common.util.field_message import ErrMessage
from dataset.models import Paragraph
class ISearchDatasetStep(IBaseChatPipelineStep):
class InstanceSerializer(serializers.Serializer):
# 原始问题文本
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char(_("question")))
problem_text = serializers.CharField(required=True, error_messages=ErrMessage.char("问题"))
# 系统补全问题文本
padding_problem_text = serializers.CharField(required=False,
error_messages=ErrMessage.char(_("System completes question text")))
padding_problem_text = serializers.CharField(required=False, error_messages=ErrMessage.char("系统补全问题文本"))
# 需要查询的数据集id列表
dataset_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
error_messages=ErrMessage.list(_("Dataset id list")))
error_messages=ErrMessage.list("数据集id列表"))
# 需要排除的文档id
exclude_document_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
error_messages=ErrMessage.list(_("List of document ids to exclude")))
error_messages=ErrMessage.list("排除的文档id列表"))
# 需要排除向量id
exclude_paragraph_id_list = serializers.ListField(required=True, child=serializers.UUIDField(required=True),
error_messages=ErrMessage.list(_("List of exclusion vector ids")))
error_messages=ErrMessage.list("排除向量id列表"))
# 需要查询的条数
top_n = serializers.IntegerField(required=True,
error_messages=ErrMessage.integer(_("Reference segment number")))
error_messages=ErrMessage.integer("引用分段数"))
# 相似度 0-1之间
similarity = serializers.FloatField(required=True, max_value=1, min_value=0,
error_messages=ErrMessage.float(_("Similarity")))
search_mode = serializers.CharField(required=True, validators=[
validators.RegexValidator(regex=re.compile("^embedding|keywords|blend$"),
message=_("The type only supports embedding|keywords|blend"), code=500)
], error_messages=ErrMessage.char(_("Retrieval Mode")))
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_("User ID")))
error_messages=ErrMessage.float("引用分段数"))
def get_step_serializer(self, manage: PipelineManage) -> Type[InstanceSerializer]:
return self.InstanceSerializer
@ -58,8 +50,6 @@ class ISearchDatasetStep(IBaseChatPipelineStep):
@abstractmethod
def execute(self, problem_text: str, dataset_id_list: list[str], exclude_document_id_list: list[str],
exclude_paragraph_id_list: list[str], top_n: int, similarity: float, padding_problem_text: str = None,
search_mode: str = None,
user_id=None,
**kwargs) -> List[ParagraphPipelineModel]:
"""
关于 用户和补全问题 说明: 补全问题如果有就使用补全问题去查询 反之就用用户原始问题查询
@ -70,8 +60,6 @@ class ISearchDatasetStep(IBaseChatPipelineStep):
:param exclude_document_id_list: 需要排除的文档id
:param exclude_paragraph_id_list: 需要排除段落id
:param padding_problem_text 补全问题
:param search_mode 检索模式
:param user_id 用户id
:return: 段落列表
"""
pass

View File

@ -10,63 +10,31 @@ import os
from typing import List, Dict
from django.db.models import QuerySet
from django.utils.translation import gettext_lazy as _
from rest_framework.utils.formatting import lazy_format
from application.chat_pipeline.I_base_chat_pipeline import ParagraphPipelineModel
from application.chat_pipeline.step.search_dataset_step.i_search_dataset_step import ISearchDatasetStep
from common.config.embedding_config import VectorStore, ModelManage
from common.config.embedding_config import VectorStore, EmbeddingModel
from common.db.search import native_search
from common.util.file_util import get_file_content
from dataset.models import Paragraph, DataSet
from embedding.models import SearchMode
from setting.models import Model
from setting.models_provider import get_model
from dataset.models import Paragraph
from smartdoc.conf import PROJECT_DIR
def get_model_by_id(_id, user_id):
model = QuerySet(Model).filter(id=_id).first()
if model is None:
raise Exception(_("Model does not exist"))
if model.permission_type == 'PRIVATE' and str(model.user_id) != str(user_id):
message = lazy_format(_('No permission to use this model {model_name}'), model_name=model.name)
raise Exception(message)
return model
def get_embedding_id(dataset_id_list):
dataset_list = QuerySet(DataSet).filter(id__in=dataset_id_list)
if len(set([dataset.embedding_mode_id for dataset in dataset_list])) > 1:
raise Exception(_("The vector model of the associated knowledge base is inconsistent and the segmentation cannot be recalled."))
if len(dataset_list) == 0:
raise Exception(_("The knowledge base setting is wrong, please reset the knowledge base"))
return dataset_list[0].embedding_mode_id
class BaseSearchDatasetStep(ISearchDatasetStep):
def execute(self, problem_text: str, dataset_id_list: list[str], exclude_document_id_list: list[str],
exclude_paragraph_id_list: list[str], top_n: int, similarity: float, padding_problem_text: str = None,
search_mode: str = None,
user_id=None,
**kwargs) -> List[ParagraphPipelineModel]:
if len(dataset_id_list) == 0:
return []
exec_problem_text = padding_problem_text if padding_problem_text is not None else problem_text
model_id = get_embedding_id(dataset_id_list)
model = get_model_by_id(model_id, user_id)
self.context['model_name'] = model.name
embedding_model = ModelManage.get_model(model_id, lambda _id: get_model(model))
embedding_model = EmbeddingModel.get_embedding_model()
embedding_value = embedding_model.embed_query(exec_problem_text)
vector = VectorStore.get_embedding_vector()
embedding_list = vector.query(exec_problem_text, embedding_value, dataset_id_list, exclude_document_id_list,
exclude_paragraph_id_list, True, top_n, similarity, SearchMode(search_mode))
embedding_list = vector.query(embedding_value, dataset_id_list, exclude_document_id_list,
exclude_paragraph_id_list, True, top_n, similarity)
if embedding_list is None:
return []
paragraph_list = self.list_paragraph(embedding_list, vector)
result = [self.reset_paragraph(paragraph, embedding_list) for paragraph in paragraph_list]
return result
paragraph_list = self.list_paragraph([row.get('paragraph_id') for row in embedding_list], vector)
return [self.reset_paragraph(paragraph, embedding_list) for paragraph in paragraph_list]
@staticmethod
def reset_paragraph(paragraph: Dict, embedding_list: List) -> ParagraphPipelineModel:
@ -80,23 +48,10 @@ class BaseSearchDatasetStep(ISearchDatasetStep):
.add_comprehensive_score(find_embedding.get('comprehensive_score'))
.add_dataset_name(paragraph.get('dataset_name'))
.add_document_name(paragraph.get('document_name'))
.add_hit_handling_method(paragraph.get('hit_handling_method'))
.add_directly_return_similarity(paragraph.get('directly_return_similarity'))
.add_meta(paragraph.get('meta'))
.build())
@staticmethod
def get_similarity(paragraph, embedding_list: List):
filter_embedding_list = [embedding for embedding in embedding_list if
str(embedding.get('paragraph_id')) == str(paragraph.get('id'))]
if filter_embedding_list is not None and len(filter_embedding_list) > 0:
find_embedding = filter_embedding_list[-1]
return find_embedding.get('comprehensive_score')
return 0
@staticmethod
def list_paragraph(embedding_list: List, vector):
paragraph_id_list = [row.get('paragraph_id') for row in embedding_list]
def list_paragraph(paragraph_id_list: List, vector):
if paragraph_id_list is None or len(paragraph_id_list) == 0:
return []
paragraph_list = native_search(QuerySet(Paragraph).filter(id__in=paragraph_id_list),
@ -110,16 +65,6 @@ class BaseSearchDatasetStep(ISearchDatasetStep):
for paragraph_id in paragraph_id_list:
if not exist_paragraph_list.__contains__(paragraph_id):
vector.delete_by_paragraph_id(paragraph_id)
# 如果存在直接返回的则取直接返回段落
hit_handling_method_paragraph = [paragraph for paragraph in paragraph_list if
(paragraph.get(
'hit_handling_method') == 'directly_return' and BaseSearchDatasetStep.get_similarity(
paragraph, embedding_list) >= paragraph.get(
'directly_return_similarity'))]
if len(hit_handling_method_paragraph) > 0:
# 找到评分最高的
return [sorted(hit_handling_method_paragraph,
key=lambda p: BaseSearchDatasetStep.get_similarity(p, embedding_list))[-1]]
return paragraph_list
def get_details(self, manage, **kwargs):
@ -131,7 +76,7 @@ class BaseSearchDatasetStep(ISearchDatasetStep):
'run_time': self.context['run_time'],
'problem_text': step_args.get(
'padding_problem_text') if 'padding_problem_text' in step_args else step_args.get('problem_text'),
'model_name': self.context.get('model_name'),
'model_name': EmbeddingModel.get_embedding_model().model_name,
'message_tokens': 0,
'answer_tokens': 0,
'cost': 0

View File

@ -1,8 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2024/6/7 14:43
@desc:
"""

View File

@ -1,44 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file common.py
@date2024/12/11 17:57
@desc:
"""
class Answer:
def __init__(self, content, view_type, runtime_node_id, chat_record_id, child_node, real_node_id,
reasoning_content):
self.view_type = view_type
self.content = content
self.reasoning_content = reasoning_content
self.runtime_node_id = runtime_node_id
self.chat_record_id = chat_record_id
self.child_node = child_node
self.real_node_id = real_node_id
def to_dict(self):
return {'view_type': self.view_type, 'content': self.content, 'runtime_node_id': self.runtime_node_id,
'chat_record_id': self.chat_record_id,
'child_node': self.child_node,
'reasoning_content': self.reasoning_content,
'real_node_id': self.real_node_id}
class NodeChunk:
def __init__(self):
self.status = 0
self.chunk_list = []
def add_chunk(self, chunk):
self.chunk_list.append(chunk)
def end(self, chunk=None):
if chunk is not None:
self.add_chunk(chunk)
self.status = 200
def is_end(self):
return self.status == 200

View File

@ -1,451 +0,0 @@
{
"nodes": [
{
"id": "base-node",
"type": "base-node",
"x": 360,
"y": 2810,
"properties": {
"config": {
},
"height": 825.6,
"stepName": "基本信息",
"node_data": {
"desc": "",
"name": "maxkbapplication",
"prologue": "您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?"
},
"input_field_list": [
]
}
},
{
"id": "start-node",
"type": "start-node",
"x": 430,
"y": 3660,
"properties": {
"config": {
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
},
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"height": 276,
"stepName": "开始",
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
}
},
{
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"type": "search-dataset-node",
"x": 840,
"y": 3210,
"properties": {
"config": {
"fields": [
{
"label": "检索结果的分段列表",
"value": "paragraph_list"
},
{
"label": "满足直接回答的分段列表",
"value": "is_hit_handling_method_list"
},
{
"label": "检索结果",
"value": "data"
},
{
"label": "满足直接回答的分段内容",
"value": "directly_return"
}
]
},
"height": 794,
"stepName": "知识库检索",
"node_data": {
"dataset_id_list": [
],
"dataset_setting": {
"top_n": 3,
"similarity": 0.6,
"search_mode": "embedding",
"max_paragraph_char_number": 5000
},
"question_reference_address": [
"start-node",
"question"
],
"source_dataset_id_list": [
]
}
}
},
{
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"type": "condition-node",
"x": 1490,
"y": 3210,
"properties": {
"width": 600,
"config": {
"fields": [
{
"label": "分支名称",
"value": "branch_name"
}
]
},
"height": 543.675,
"stepName": "判断器",
"node_data": {
"branch": [
{
"id": "1009",
"type": "IF",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"is_hit_handling_method_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "4908",
"type": "ELSE IF 1",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"paragraph_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "161",
"type": "ELSE",
"condition": "and",
"conditions": [
]
}
]
},
"branch_condition_list": [
{
"index": 0,
"height": 121.225,
"id": "1009"
},
{
"index": 1,
"height": 121.225,
"id": "4908"
},
{
"index": 2,
"height": 44,
"id": "161"
}
]
}
},
{
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"type": "reply-node",
"x": 2170,
"y": 2480,
"properties": {
"config": {
"fields": [
{
"label": "内容",
"value": "answer"
}
]
},
"height": 378,
"stepName": "指定回复",
"node_data": {
"fields": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"directly_return"
],
"content": "",
"reply_type": "referencing",
"is_result": true
}
}
},
{
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"type": "ai-chat-node",
"x": 2160,
"y": 3200,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI 对话",
"node_data": {
"prompt": "已知信息:\n{{知识库检索.data}}\n问题\n{{开始.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
},
{
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"type": "ai-chat-node",
"x": 2160,
"y": 3970,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI 对话1",
"node_data": {
"prompt": "{{开始.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
}
],
"edges": [
{
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
"type": "app-edge",
"sourceNodeId": "start-node",
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"startPoint": {
"x": 590,
"y": 3660
},
"endPoint": {
"x": 680,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 590,
"y": 3660
},
{
"x": 700,
"y": 3660
},
{
"x": 570,
"y": 3210
},
{
"x": 680,
"y": 3210
}
],
"sourceAnchorId": "start-node_right",
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
},
{
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
"type": "app-edge",
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"startPoint": {
"x": 1000,
"y": 3210
},
"endPoint": {
"x": 1200,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 1000,
"y": 3210
},
{
"x": 1110,
"y": 3210
},
{
"x": 1090,
"y": 3210
},
{
"x": 1200,
"y": 3210
}
],
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
},
{
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"startPoint": {
"x": 1780,
"y": 3073.775
},
"endPoint": {
"x": 2010,
"y": 2480
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3073.775
},
{
"x": 1890,
"y": 3073.775
},
{
"x": 1900,
"y": 2480
},
{
"x": 2010,
"y": 2480
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
},
{
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"startPoint": {
"x": 1780,
"y": 3203
},
"endPoint": {
"x": 2000,
"y": 3200
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3203
},
{
"x": 1890,
"y": 3203
},
{
"x": 1890,
"y": 3200
},
{
"x": 2000,
"y": 3200
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
},
{
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"startPoint": {
"x": 1780,
"y": 3293.6124999999997
},
"endPoint": {
"x": 2000,
"y": 3970
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3970
},
{
"x": 2000,
"y": 3970
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
}
]
}

View File

@ -1,451 +0,0 @@
{
"nodes": [
{
"id": "base-node",
"type": "base-node",
"x": 360,
"y": 2810,
"properties": {
"config": {
},
"height": 825.6,
"stepName": "Base",
"node_data": {
"desc": "",
"name": "maxkbapplication",
"prologue": "Hello, I am the MaxKB assistant. You can ask me about MaxKB usage issues.\n-What are the main functions of MaxKB?\n-What major language models does MaxKB support?\n-What document types does MaxKB support?"
},
"input_field_list": [
]
}
},
{
"id": "start-node",
"type": "start-node",
"x": 430,
"y": 3660,
"properties": {
"config": {
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
},
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"height": 276,
"stepName": "Start",
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
}
},
{
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"type": "search-dataset-node",
"x": 840,
"y": 3210,
"properties": {
"config": {
"fields": [
{
"label": "检索结果的分段列表",
"value": "paragraph_list"
},
{
"label": "满足直接回答的分段列表",
"value": "is_hit_handling_method_list"
},
{
"label": "检索结果",
"value": "data"
},
{
"label": "满足直接回答的分段内容",
"value": "directly_return"
}
]
},
"height": 794,
"stepName": "Knowledge Search",
"node_data": {
"dataset_id_list": [
],
"dataset_setting": {
"top_n": 3,
"similarity": 0.6,
"search_mode": "embedding",
"max_paragraph_char_number": 5000
},
"question_reference_address": [
"start-node",
"question"
],
"source_dataset_id_list": [
]
}
}
},
{
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"type": "condition-node",
"x": 1490,
"y": 3210,
"properties": {
"width": 600,
"config": {
"fields": [
{
"label": "分支名称",
"value": "branch_name"
}
]
},
"height": 543.675,
"stepName": "Conditional Branch",
"node_data": {
"branch": [
{
"id": "1009",
"type": "IF",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"is_hit_handling_method_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "4908",
"type": "ELSE IF 1",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"paragraph_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "161",
"type": "ELSE",
"condition": "and",
"conditions": [
]
}
]
},
"branch_condition_list": [
{
"index": 0,
"height": 121.225,
"id": "1009"
},
{
"index": 1,
"height": 121.225,
"id": "4908"
},
{
"index": 2,
"height": 44,
"id": "161"
}
]
}
},
{
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"type": "reply-node",
"x": 2170,
"y": 2480,
"properties": {
"config": {
"fields": [
{
"label": "内容",
"value": "answer"
}
]
},
"height": 378,
"stepName": "Specified Reply",
"node_data": {
"fields": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"directly_return"
],
"content": "",
"reply_type": "referencing",
"is_result": true
}
}
},
{
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"type": "ai-chat-node",
"x": 2160,
"y": 3200,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI Chat",
"node_data": {
"prompt": "Known information:\n{{Knowledge Search.data}}\nQuestion:\n{{Start.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
},
{
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"type": "ai-chat-node",
"x": 2160,
"y": 3970,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI Chat1",
"node_data": {
"prompt": "{{Start.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
}
],
"edges": [
{
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
"type": "app-edge",
"sourceNodeId": "start-node",
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"startPoint": {
"x": 590,
"y": 3660
},
"endPoint": {
"x": 680,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 590,
"y": 3660
},
{
"x": 700,
"y": 3660
},
{
"x": 570,
"y": 3210
},
{
"x": 680,
"y": 3210
}
],
"sourceAnchorId": "start-node_right",
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
},
{
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
"type": "app-edge",
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"startPoint": {
"x": 1000,
"y": 3210
},
"endPoint": {
"x": 1200,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 1000,
"y": 3210
},
{
"x": 1110,
"y": 3210
},
{
"x": 1090,
"y": 3210
},
{
"x": 1200,
"y": 3210
}
],
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
},
{
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"startPoint": {
"x": 1780,
"y": 3073.775
},
"endPoint": {
"x": 2010,
"y": 2480
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3073.775
},
{
"x": 1890,
"y": 3073.775
},
{
"x": 1900,
"y": 2480
},
{
"x": 2010,
"y": 2480
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
},
{
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"startPoint": {
"x": 1780,
"y": 3203
},
"endPoint": {
"x": 2000,
"y": 3200
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3203
},
{
"x": 1890,
"y": 3203
},
{
"x": 1890,
"y": 3200
},
{
"x": 2000,
"y": 3200
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
},
{
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"startPoint": {
"x": 1780,
"y": 3293.6124999999997
},
"endPoint": {
"x": 2000,
"y": 3970
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3970
},
{
"x": 2000,
"y": 3970
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
}
]
}

View File

@ -1,451 +0,0 @@
{
"nodes": [
{
"id": "base-node",
"type": "base-node",
"x": 360,
"y": 2810,
"properties": {
"config": {
},
"height": 825.6,
"stepName": "基本信息",
"node_data": {
"desc": "",
"name": "maxkbapplication",
"prologue": "您好,我是 MaxKB 小助手,您可以向我提出 MaxKB 使用问题。\n- MaxKB 主要功能有什么?\n- MaxKB 支持哪些大语言模型?\n- MaxKB 支持哪些文档类型?"
},
"input_field_list": [
]
}
},
{
"id": "start-node",
"type": "start-node",
"x": 430,
"y": 3660,
"properties": {
"config": {
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
},
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"height": 276,
"stepName": "开始",
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
}
},
{
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"type": "search-dataset-node",
"x": 840,
"y": 3210,
"properties": {
"config": {
"fields": [
{
"label": "检索结果的分段列表",
"value": "paragraph_list"
},
{
"label": "满足直接回答的分段列表",
"value": "is_hit_handling_method_list"
},
{
"label": "检索结果",
"value": "data"
},
{
"label": "满足直接回答的分段内容",
"value": "directly_return"
}
]
},
"height": 794,
"stepName": "知识库检索",
"node_data": {
"dataset_id_list": [
],
"dataset_setting": {
"top_n": 3,
"similarity": 0.6,
"search_mode": "embedding",
"max_paragraph_char_number": 5000
},
"question_reference_address": [
"start-node",
"question"
],
"source_dataset_id_list": [
]
}
}
},
{
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"type": "condition-node",
"x": 1490,
"y": 3210,
"properties": {
"width": 600,
"config": {
"fields": [
{
"label": "分支名称",
"value": "branch_name"
}
]
},
"height": 543.675,
"stepName": "判断器",
"node_data": {
"branch": [
{
"id": "1009",
"type": "IF",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"is_hit_handling_method_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "4908",
"type": "ELSE IF 1",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"paragraph_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "161",
"type": "ELSE",
"condition": "and",
"conditions": [
]
}
]
},
"branch_condition_list": [
{
"index": 0,
"height": 121.225,
"id": "1009"
},
{
"index": 1,
"height": 121.225,
"id": "4908"
},
{
"index": 2,
"height": 44,
"id": "161"
}
]
}
},
{
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"type": "reply-node",
"x": 2170,
"y": 2480,
"properties": {
"config": {
"fields": [
{
"label": "内容",
"value": "answer"
}
]
},
"height": 378,
"stepName": "指定回复",
"node_data": {
"fields": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"directly_return"
],
"content": "",
"reply_type": "referencing",
"is_result": true
}
}
},
{
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"type": "ai-chat-node",
"x": 2160,
"y": 3200,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI 对话",
"node_data": {
"prompt": "已知信息:\n{{知识库检索.data}}\n问题\n{{开始.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
},
{
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"type": "ai-chat-node",
"x": 2160,
"y": 3970,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI 对话1",
"node_data": {
"prompt": "{{开始.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
}
],
"edges": [
{
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
"type": "app-edge",
"sourceNodeId": "start-node",
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"startPoint": {
"x": 590,
"y": 3660
},
"endPoint": {
"x": 680,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 590,
"y": 3660
},
{
"x": 700,
"y": 3660
},
{
"x": 570,
"y": 3210
},
{
"x": 680,
"y": 3210
}
],
"sourceAnchorId": "start-node_right",
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
},
{
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
"type": "app-edge",
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"startPoint": {
"x": 1000,
"y": 3210
},
"endPoint": {
"x": 1200,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 1000,
"y": 3210
},
{
"x": 1110,
"y": 3210
},
{
"x": 1090,
"y": 3210
},
{
"x": 1200,
"y": 3210
}
],
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
},
{
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"startPoint": {
"x": 1780,
"y": 3073.775
},
"endPoint": {
"x": 2010,
"y": 2480
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3073.775
},
{
"x": 1890,
"y": 3073.775
},
{
"x": 1900,
"y": 2480
},
{
"x": 2010,
"y": 2480
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
},
{
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"startPoint": {
"x": 1780,
"y": 3203
},
"endPoint": {
"x": 2000,
"y": 3200
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3203
},
{
"x": 1890,
"y": 3203
},
{
"x": 1890,
"y": 3200
},
{
"x": 2000,
"y": 3200
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
},
{
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"startPoint": {
"x": 1780,
"y": 3293.6124999999997
},
"endPoint": {
"x": 2000,
"y": 3970
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3970
},
{
"x": 2000,
"y": 3970
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
}
]
}

View File

@ -1,451 +0,0 @@
{
"nodes": [
{
"id": "base-node",
"type": "base-node",
"x": 360,
"y": 2810,
"properties": {
"config": {
},
"height": 825.6,
"stepName": "基本資訊",
"node_data": {
"desc": "",
"name": "maxkbapplication",
"prologue": "您好我是MaxKB小助手您可以向我提出MaxKB使用問題。\n- MaxKB主要功能有什麼\n- MaxKB支持哪些大語言模型\n- MaxKB支持哪些文檔類型"
},
"input_field_list": [
]
}
},
{
"id": "start-node",
"type": "start-node",
"x": 430,
"y": 3660,
"properties": {
"config": {
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
},
"fields": [
{
"label": "用户问题",
"value": "question"
}
],
"height": 276,
"stepName": "開始",
"globalFields": [
{
"label": "当前时间",
"value": "time"
}
]
}
},
{
"id": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"type": "search-dataset-node",
"x": 840,
"y": 3210,
"properties": {
"config": {
"fields": [
{
"label": "检索结果的分段列表",
"value": "paragraph_list"
},
{
"label": "满足直接回答的分段列表",
"value": "is_hit_handling_method_list"
},
{
"label": "检索结果",
"value": "data"
},
{
"label": "满足直接回答的分段内容",
"value": "directly_return"
}
]
},
"height": 794,
"stepName": "知識庫檢索",
"node_data": {
"dataset_id_list": [
],
"dataset_setting": {
"top_n": 3,
"similarity": 0.6,
"search_mode": "embedding",
"max_paragraph_char_number": 5000
},
"question_reference_address": [
"start-node",
"question"
],
"source_dataset_id_list": [
]
}
}
},
{
"id": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"type": "condition-node",
"x": 1490,
"y": 3210,
"properties": {
"width": 600,
"config": {
"fields": [
{
"label": "分支名称",
"value": "branch_name"
}
]
},
"height": 543.675,
"stepName": "判斷器",
"node_data": {
"branch": [
{
"id": "1009",
"type": "IF",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"is_hit_handling_method_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "4908",
"type": "ELSE IF 1",
"condition": "and",
"conditions": [
{
"field": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"paragraph_list"
],
"value": "1",
"compare": "len_ge"
}
]
},
{
"id": "161",
"type": "ELSE",
"condition": "and",
"conditions": [
]
}
]
},
"branch_condition_list": [
{
"index": 0,
"height": 121.225,
"id": "1009"
},
{
"index": 1,
"height": 121.225,
"id": "4908"
},
{
"index": 2,
"height": 44,
"id": "161"
}
]
}
},
{
"id": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"type": "reply-node",
"x": 2170,
"y": 2480,
"properties": {
"config": {
"fields": [
{
"label": "内容",
"value": "answer"
}
]
},
"height": 378,
"stepName": "指定回覆",
"node_data": {
"fields": [
"b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"directly_return"
],
"content": "",
"reply_type": "referencing",
"is_result": true
}
}
},
{
"id": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"type": "ai-chat-node",
"x": 2160,
"y": 3200,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI 對話",
"node_data": {
"prompt": "已知資訊:\n{{知識庫檢索.data}}\n問題\n{{開始.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
},
{
"id": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"type": "ai-chat-node",
"x": 2160,
"y": 3970,
"properties": {
"config": {
"fields": [
{
"label": "AI 回答内容",
"value": "answer"
}
]
},
"height": 763,
"stepName": "AI 對話1",
"node_data": {
"prompt": "{{開始.question}}",
"system": "",
"model_id": "",
"dialogue_number": 0,
"is_result": true
}
}
}
],
"edges": [
{
"id": "7d0f166f-c472-41b2-b9a2-c294f4c83d73",
"type": "app-edge",
"sourceNodeId": "start-node",
"targetNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"startPoint": {
"x": 590,
"y": 3660
},
"endPoint": {
"x": 680,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 590,
"y": 3660
},
{
"x": 700,
"y": 3660
},
{
"x": 570,
"y": 3210
},
{
"x": 680,
"y": 3210
}
],
"sourceAnchorId": "start-node_right",
"targetAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_left"
},
{
"id": "35cb86dd-f328-429e-a973-12fd7218b696",
"type": "app-edge",
"sourceNodeId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5",
"targetNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"startPoint": {
"x": 1000,
"y": 3210
},
"endPoint": {
"x": 1200,
"y": 3210
},
"properties": {
},
"pointsList": [
{
"x": 1000,
"y": 3210
},
{
"x": 1110,
"y": 3210
},
{
"x": 1090,
"y": 3210
},
{
"x": 1200,
"y": 3210
}
],
"sourceAnchorId": "b931efe5-5b66-46e0-ae3b-0160cb18eeb5_right",
"targetAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_left"
},
{
"id": "e8f6cfe6-7e48-41cd-abd3-abfb5304d0d8",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "4ffe1086-25df-4c85-b168-979b5bbf0a26",
"startPoint": {
"x": 1780,
"y": 3073.775
},
"endPoint": {
"x": 2010,
"y": 2480
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3073.775
},
{
"x": 1890,
"y": 3073.775
},
{
"x": 1900,
"y": 2480
},
{
"x": 2010,
"y": 2480
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_1009_right",
"targetAnchorId": "4ffe1086-25df-4c85-b168-979b5bbf0a26_left"
},
{
"id": "994ff325-6f7a-4ebc-b61b-10e15519d6d2",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb",
"startPoint": {
"x": 1780,
"y": 3203
},
"endPoint": {
"x": 2000,
"y": 3200
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3203
},
{
"x": 1890,
"y": 3203
},
{
"x": 1890,
"y": 3200
},
{
"x": 2000,
"y": 3200
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_4908_right",
"targetAnchorId": "f1f1ee18-5a02-46f6-b4e6-226253cdffbb_left"
},
{
"id": "19270caf-bb9f-4ba7-9bf8-200aa70fecd5",
"type": "app-edge",
"sourceNodeId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b",
"targetNodeId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7",
"startPoint": {
"x": 1780,
"y": 3293.6124999999997
},
"endPoint": {
"x": 2000,
"y": 3970
},
"properties": {
},
"pointsList": [
{
"x": 1780,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3293.6124999999997
},
{
"x": 1890,
"y": 3970
},
{
"x": 2000,
"y": 3970
}
],
"sourceAnchorId": "fc60863a-dec2-4854-9e5a-7a44b7187a2b_161_right",
"targetAnchorId": "309d0eef-c597-46b5-8d51-b9a28aaef4c7_left"
}
]
}

View File

@ -1,256 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file i_step_node.py
@date2024/6/3 14:57
@desc:
"""
import time
import uuid
from abc import abstractmethod
from hashlib import sha1
from typing import Type, Dict, List
from django.core import cache
from django.db.models import QuerySet
from rest_framework import serializers
from rest_framework.exceptions import ValidationError, ErrorDetail
from application.flow.common import Answer, NodeChunk
from application.models import ChatRecord
from application.models.api_key_model import ApplicationPublicAccessClient
from common.constants.authentication_type import AuthenticationType
from common.field.common import InstanceField
from common.util.field_message import ErrMessage
chat_cache = cache.caches['chat_cache']
def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if step_variable is not None:
for key in step_variable:
node.context[key] = step_variable[key]
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'answer' in step_variable:
answer = step_variable['answer']
yield answer
node.answer_text = answer
if global_variable is not None:
for key in global_variable:
workflow.context[key] = global_variable[key]
node.context['run_time'] = time.time() - node.context['start_time']
def is_interrupt(node, step_variable: Dict, global_variable: Dict):
return node.type == 'form-node' and not node.context.get('is_submit', False)
class WorkFlowPostHandler:
def __init__(self, chat_info, client_id, client_type):
self.chat_info = chat_info
self.client_id = client_id
self.client_type = client_type
def handler(self, chat_id,
chat_record_id,
answer,
workflow):
question = workflow.params['question']
details = workflow.get_runtime_details()
message_tokens = sum([row.get('message_tokens') for row in details.values() if
'message_tokens' in row and row.get('message_tokens') is not None])
answer_tokens = sum([row.get('answer_tokens') for row in details.values() if
'answer_tokens' in row and row.get('answer_tokens') is not None])
answer_text_list = workflow.get_answer_text_list()
answer_text = '\n\n'.join(
'\n\n'.join([a.get('content') for a in answer]) for answer in
answer_text_list)
if workflow.chat_record is not None:
chat_record = workflow.chat_record
chat_record.answer_text = answer_text
chat_record.details = details
chat_record.message_tokens = message_tokens
chat_record.answer_tokens = answer_tokens
chat_record.answer_text_list = answer_text_list
chat_record.run_time = time.time() - workflow.context['start_time']
else:
chat_record = ChatRecord(id=chat_record_id,
chat_id=chat_id,
problem_text=question,
answer_text=answer_text,
details=details,
message_tokens=message_tokens,
answer_tokens=answer_tokens,
answer_text_list=answer_text_list,
run_time=time.time() - workflow.context['start_time'],
index=0)
asker = workflow.context.get('asker', None)
self.chat_info.append_chat_record(chat_record, self.client_id, asker)
# 重新设置缓存
chat_cache.set(chat_id,
self.chat_info, timeout=60 * 30)
if self.client_type == AuthenticationType.APPLICATION_ACCESS_TOKEN.value:
application_public_access_client = (QuerySet(ApplicationPublicAccessClient)
.filter(client_id=self.client_id,
application_id=self.chat_info.application.id).first())
if application_public_access_client is not None:
application_public_access_client.access_num = application_public_access_client.access_num + 1
application_public_access_client.intraday_access_num = application_public_access_client.intraday_access_num + 1
application_public_access_client.save()
class NodeResult:
def __init__(self, node_variable: Dict, workflow_variable: Dict,
_write_context=write_context, _is_interrupt=is_interrupt):
self._write_context = _write_context
self.node_variable = node_variable
self.workflow_variable = workflow_variable
self._is_interrupt = _is_interrupt
def write_context(self, node, workflow):
return self._write_context(self.node_variable, self.workflow_variable, node, workflow)
def is_assertion_result(self):
return 'branch_id' in self.node_variable
def is_interrupt_exec(self, current_node):
"""
是否中断执行
@param current_node:
@return:
"""
return self._is_interrupt(current_node, self.node_variable, self.workflow_variable)
class ReferenceAddressSerializer(serializers.Serializer):
node_id = serializers.CharField(required=True, error_messages=ErrMessage.char("节点id"))
fields = serializers.ListField(
child=serializers.CharField(required=True, error_messages=ErrMessage.char("节点字段")), required=True,
error_messages=ErrMessage.list("节点字段数组"))
class FlowParamsSerializer(serializers.Serializer):
# 历史对答
history_chat_record = serializers.ListField(child=InstanceField(model_type=ChatRecord, required=True),
error_messages=ErrMessage.list("历史对答"))
question = serializers.CharField(required=True, error_messages=ErrMessage.list("用户问题"))
chat_id = serializers.CharField(required=True, error_messages=ErrMessage.list("对话id"))
chat_record_id = serializers.CharField(required=True, error_messages=ErrMessage.char("对话记录id"))
stream = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("流式输出"))
client_id = serializers.CharField(required=False, error_messages=ErrMessage.char("客户端id"))
client_type = serializers.CharField(required=False, error_messages=ErrMessage.char("客户端类型"))
user_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("用户id"))
re_chat = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean("换个答案"))
class INode:
view_type = 'many_view'
@abstractmethod
def save_context(self, details, workflow_manage):
pass
def get_answer_list(self) -> List[Answer] | None:
if self.answer_text is None:
return None
reasoning_content_enable = self.context.get('model_setting', {}).get('reasoning_content_enable', False)
return [
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], {},
self.runtime_node_id, self.context.get('reasoning_content', '') if reasoning_content_enable else '')]
def __init__(self, node, workflow_params, workflow_manage, up_node_id_list=None,
get_node_params=lambda node: node.properties.get('node_data')):
# 当前步骤上下文,用于存储当前步骤信息
self.status = 200
self.err_message = ''
self.node = node
self.node_params = get_node_params(node)
self.workflow_params = workflow_params
self.workflow_manage = workflow_manage
self.node_params_serializer = None
self.flow_params_serializer = None
self.context = {}
self.answer_text = None
self.id = node.id
if up_node_id_list is None:
up_node_id_list = []
self.up_node_id_list = up_node_id_list
self.node_chunk = NodeChunk()
self.runtime_node_id = sha1(uuid.NAMESPACE_DNS.bytes + bytes(str(uuid.uuid5(uuid.NAMESPACE_DNS,
"".join([*sorted(up_node_id_list),
node.id]))),
"utf-8")).hexdigest()
def valid_args(self, node_params, flow_params):
flow_params_serializer_class = self.get_flow_params_serializer_class()
node_params_serializer_class = self.get_node_params_serializer_class()
if flow_params_serializer_class is not None and flow_params is not None:
self.flow_params_serializer = flow_params_serializer_class(data=flow_params)
self.flow_params_serializer.is_valid(raise_exception=True)
if node_params_serializer_class is not None:
self.node_params_serializer = node_params_serializer_class(data=node_params)
self.node_params_serializer.is_valid(raise_exception=True)
if self.node.properties.get('status', 200) != 200:
raise ValidationError(ErrorDetail(f'节点{self.node.properties.get("stepName")} 不可用'))
def get_reference_field(self, fields: List[str]):
return self.get_field(self.context, fields)
@staticmethod
def get_field(obj, fields: List[str]):
for field in fields:
value = obj.get(field)
if value is None:
return None
else:
obj = value
return obj
@abstractmethod
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
pass
def get_flow_params_serializer_class(self) -> Type[serializers.Serializer]:
return FlowParamsSerializer
def get_write_error_context(self, e):
self.status = 500
self.answer_text = str(e)
self.err_message = str(e)
self.context['run_time'] = time.time() - self.context['start_time']
def write_error_context(answer, status=200):
pass
return write_error_context
def run(self) -> NodeResult:
"""
:return: 执行结果
"""
start_time = time.time()
self.context['start_time'] = start_time
result = self._run()
self.context['run_time'] = time.time() - start_time
return result
def _run(self):
result = self.execute()
return result
def execute(self, **kwargs) -> NodeResult:
pass
def get_details(self, index: int, **kwargs):
"""
运行详情
:return: 步骤详情
"""
return {}

View File

@ -1,42 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2024/6/7 14:43
@desc:
"""
from .ai_chat_step_node import *
from .application_node import BaseApplicationNode
from .condition_node import *
from .direct_reply_node import *
from .form_node import *
from .function_lib_node import *
from .function_node import *
from .question_node import *
from .reranker_node import *
from .document_extract_node import *
from .image_understand_step_node import *
from .image_generate_step_node import *
from .search_dataset_node import *
from .speech_to_text_step_node import BaseSpeechToTextNode
from .start_node import *
from .text_to_speech_step_node.impl.base_text_to_speech_node import BaseTextToSpeechNode
from .variable_assign_node import BaseVariableAssignNode
from .mcp_node import BaseMcpNode
node_list = [BaseStartStepNode, BaseChatNode, BaseSearchDatasetNode, BaseQuestionNode,
BaseConditionNode, BaseReplyNode,
BaseFunctionNodeNode, BaseFunctionLibNodeNode, BaseRerankerNode, BaseApplicationNode,
BaseDocumentExtractNode,
BaseImageUnderstandNode, BaseFormNode, BaseSpeechToTextNode, BaseTextToSpeechNode,
BaseImageGenerateNode, BaseVariableAssignNode, BaseMcpNode]
def get_node(node_type):
find_list = [node for node in node_list if node.type == node_type]
if len(find_list) > 0:
return find_list[0]
return None

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py
@date2024/6/11 15:29
@desc:
"""
from .impl import *

View File

@ -1,58 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file i_chat_node.py
@date2024/6/4 13:58
@desc:
"""
from typing import Type
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
class ChatNodeSerializer(serializers.Serializer):
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
system = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char(_("Role Setting")))
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(
_("Number of multi-round conversations")))
is_result = serializers.BooleanField(required=False,
error_messages=ErrMessage.boolean(_('Whether to return content')))
model_params_setting = serializers.DictField(required=False,
error_messages=ErrMessage.dict(_("Model parameter settings")))
model_setting = serializers.DictField(required=False,
error_messages=ErrMessage.dict('Model settings'))
dialogue_type = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char(_("Context Type")))
mcp_enable = serializers.BooleanField(required=False,
error_messages=ErrMessage.boolean(_("Whether to enable MCP")))
mcp_servers = serializers.JSONField(required=False, error_messages=ErrMessage.list(_("MCP Server")))
class IChatNode(INode):
type = 'ai-chat-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ChatNodeSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id,
chat_record_id,
model_params_setting=None,
dialogue_type=None,
model_setting=None,
mcp_enable=False,
mcp_servers=None,
**kwargs) -> NodeResult:
pass

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py
@date2024/6/11 15:34
@desc:
"""
from .base_chat_node import BaseChatNode

View File

@ -1,288 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file base_question_node.py
@date2024/6/4 14:30
@desc:
"""
import asyncio
import json
import re
import time
from functools import reduce
from types import AsyncGeneratorType
from typing import List, Dict
from django.db.models import QuerySet
from langchain.schema import HumanMessage, SystemMessage
from langchain_core.messages import BaseMessage, AIMessage, AIMessageChunk, ToolMessage
from langchain_mcp_adapters.client import MultiServerMCPClient
from langgraph.prebuilt import create_react_agent
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.ai_chat_step_node.i_chat_node import IChatNode
from application.flow.tools import Reasoning
from setting.models import Model
from setting.models_provider import get_model_credential
from setting.models_provider.tools import get_model_instance_by_model_user_id
tool_message_template = """
<details>
<summary>
<strong>Called MCP Tool: <em>%s</em></strong>
</summary>
```json
%s
```
</details>
"""
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
reasoning_content: str):
chat_model = node_variable.get('chat_model')
message_tokens = chat_model.get_num_tokens_from_messages(node_variable.get('message_list'))
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
node.context['reasoning_content'] = reasoning_content
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
node.answer_text = answer
def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据 (流式)
@param node_variable: 节点数据
@param workflow_variable: 全局数据
@param node: 节点
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
answer = ''
reasoning_content = ''
model_setting = node.context.get('model_setting',
{'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
'reasoning_content_start': '<think>'})
reasoning = Reasoning(model_setting.get('reasoning_content_start', '<think>'),
model_setting.get('reasoning_content_end', '</think>'))
response_reasoning_content = False
for chunk in response:
reasoning_chunk = reasoning.get_reasoning_content(chunk)
content_chunk = reasoning_chunk.get('content')
if 'reasoning_content' in chunk.additional_kwargs:
response_reasoning_content = True
reasoning_content_chunk = chunk.additional_kwargs.get('reasoning_content', '')
else:
reasoning_content_chunk = reasoning_chunk.get('reasoning_content')
answer += content_chunk
if reasoning_content_chunk is None:
reasoning_content_chunk = ''
reasoning_content += reasoning_content_chunk
yield {'content': content_chunk,
'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable',
False) else ''}
reasoning_chunk = reasoning.get_end_reasoning_content()
answer += reasoning_chunk.get('content')
reasoning_content_chunk = ""
if not response_reasoning_content:
reasoning_content_chunk = reasoning_chunk.get(
'reasoning_content')
yield {'content': reasoning_chunk.get('content'),
'reasoning_content': reasoning_content_chunk if model_setting.get('reasoning_content_enable',
False) else ''}
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
async def _yield_mcp_response(chat_model, message_list, mcp_servers):
async with MultiServerMCPClient(json.loads(mcp_servers)) as client:
agent = create_react_agent(chat_model, client.get_tools())
response = agent.astream({"messages": message_list}, stream_mode='messages')
async for chunk in response:
if isinstance(chunk[0], ToolMessage):
content = tool_message_template % (chunk[0].name, chunk[0].content)
chunk[0].content = content
yield chunk[0]
if isinstance(chunk[0], AIMessageChunk):
yield chunk[0]
def mcp_response_generator(chat_model, message_list, mcp_servers):
loop = asyncio.new_event_loop()
try:
async_gen = _yield_mcp_response(chat_model, message_list, mcp_servers)
while True:
try:
chunk = loop.run_until_complete(anext_async(async_gen))
yield chunk
except StopAsyncIteration:
break
except Exception as e:
print(f'exception: {e}')
finally:
loop.close()
async def anext_async(agen):
return await agen.__anext__()
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据
@param node_variable: 节点数据
@param workflow_variable: 全局数据
@param node: 节点实例对象
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
model_setting = node.context.get('model_setting',
{'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
'reasoning_content_start': '<think>'})
reasoning = Reasoning(model_setting.get('reasoning_content_start'), model_setting.get('reasoning_content_end'))
reasoning_result = reasoning.get_reasoning_content(response)
reasoning_result_end = reasoning.get_end_reasoning_content()
content = reasoning_result.get('content') + reasoning_result_end.get('content')
if 'reasoning_content' in response.response_metadata:
reasoning_content = response.response_metadata.get('reasoning_content', '')
else:
reasoning_content = reasoning_result.get('reasoning_content') + reasoning_result_end.get('reasoning_content')
_write_context(node_variable, workflow_variable, node, workflow, content, reasoning_content)
def get_default_model_params_setting(model_id):
model = QuerySet(Model).filter(id=model_id).first()
credential = get_model_credential(model.provider, model.model_type, model.model_name)
model_params_setting = credential.get_model_params_setting_form(
model.model_name).get_default_form_data()
return model_params_setting
def get_node_message(chat_record, runtime_node_id):
node_details = chat_record.get_node_details_runtime_node_id(runtime_node_id)
if node_details is None:
return []
return [HumanMessage(node_details.get('question')), AIMessage(node_details.get('answer'))]
def get_workflow_message(chat_record):
return [chat_record.get_human_message(), chat_record.get_ai_message()]
def get_message(chat_record, dialogue_type, runtime_node_id):
return get_node_message(chat_record, runtime_node_id) if dialogue_type == 'NODE' else get_workflow_message(
chat_record)
class BaseChatNode(IChatNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
self.context['reasoning_content'] = details.get('reasoning_content')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, history_chat_record, stream, chat_id, chat_record_id,
model_params_setting=None,
dialogue_type=None,
model_setting=None,
mcp_enable=False,
mcp_servers=None,
**kwargs) -> NodeResult:
if dialogue_type is None:
dialogue_type = 'WORKFLOW'
if model_params_setting is None:
model_params_setting = get_default_model_params_setting(model_id)
if model_setting is None:
model_setting = {'reasoning_content_enable': False, 'reasoning_content_end': '</think>',
'reasoning_content_start': '<think>'}
self.context['model_setting'] = model_setting
chat_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number, dialogue_type,
self.runtime_node_id)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question.content
system = self.workflow_manage.generate_prompt(system)
self.context['system'] = system
message_list = self.generate_message_list(system, prompt, history_message)
self.context['message_list'] = message_list
if mcp_enable and mcp_servers is not None and '"stdio"' not in mcp_servers:
r = mcp_response_generator(chat_model, message_list, mcp_servers)
return NodeResult(
{'result': r, 'chat_model': chat_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context_stream)
if stream:
r = chat_model.stream(message_list)
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context_stream)
else:
r = chat_model.invoke(message_list)
return NodeResult({'result': r, 'chat_model': chat_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context)
@staticmethod
def get_history_message(history_chat_record, dialogue_number, dialogue_type, runtime_node_id):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
get_message(history_chat_record[index], dialogue_type, runtime_node_id)
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
for message in history_message:
if isinstance(message.content, str):
message.content = re.sub('<form_rander>[\d\D]*?<\/form_rander>', '', message.content)
return history_message
def generate_prompt_question(self, prompt):
return HumanMessage(self.workflow_manage.generate_prompt(prompt))
def generate_message_list(self, system: str, prompt: str, history_message):
if system is not None and len(system) > 0:
return [SystemMessage(self.workflow_manage.generate_prompt(system)), *history_message,
HumanMessage(self.workflow_manage.generate_prompt(prompt))]
else:
return [*history_message, HumanMessage(self.workflow_manage.generate_prompt(prompt))]
@staticmethod
def reset_message_list(message_list: List[BaseMessage], answer_text):
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
message
in
message_list]
result.append({'role': 'ai', 'content': answer_text})
return result
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'system': self.context.get('system'),
'history_message': [{'content': message.content, 'role': message.type} for message in
(self.context.get('history_message') if self.context.get(
'history_message') is not None else [])],
'question': self.context.get('question'),
'answer': self.context.get('answer'),
'reasoning_content': self.context.get('reasoning_content'),
'type': self.node.type,
'message_tokens': self.context.get('message_tokens'),
'answer_tokens': self.context.get('answer_tokens'),
'status': self.status,
'err_message': self.err_message
}

View File

@ -1,2 +0,0 @@
# coding=utf-8
from .impl import *

View File

@ -1,86 +0,0 @@
# coding=utf-8
from typing import Type
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
class ApplicationNodeSerializer(serializers.Serializer):
application_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Application ID")))
question_reference_address = serializers.ListField(required=True,
error_messages=ErrMessage.list(_("User Questions")))
api_input_field_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("API Input Fields")))
user_input_field_list = serializers.ListField(required=False,
error_messages=ErrMessage.uuid(_("User Input Fields")))
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
child_node = serializers.DictField(required=False, allow_null=True,
error_messages=ErrMessage.dict(_("Child Nodes")))
node_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Form Data")))
class IApplicationNode(INode):
type = 'application-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ApplicationNodeSerializer
def _run(self):
question = self.workflow_manage.get_reference_field(
self.node_params_serializer.data.get('question_reference_address')[0],
self.node_params_serializer.data.get('question_reference_address')[1:])
kwargs = {}
for api_input_field in self.node_params_serializer.data.get('api_input_field_list', []):
value = api_input_field.get('value', [''])[0] if api_input_field.get('value') else ''
kwargs[api_input_field['variable']] = self.workflow_manage.get_reference_field(value,
api_input_field['value'][
1:]) if value != '' else ''
for user_input_field in self.node_params_serializer.data.get('user_input_field_list', []):
value = user_input_field.get('value', [''])[0] if user_input_field.get('value') else ''
kwargs[user_input_field['field']] = self.workflow_manage.get_reference_field(value,
user_input_field['value'][
1:]) if value != '' else ''
# 判断是否包含这个属性
app_document_list = self.node_params_serializer.data.get('document_list', [])
if app_document_list and len(app_document_list) > 0:
app_document_list = self.workflow_manage.get_reference_field(
app_document_list[0],
app_document_list[1:])
for document in app_document_list:
if 'file_id' not in document:
raise ValueError(
_("Parameter value error: The uploaded document lacks file_id, and the document upload fails"))
app_image_list = self.node_params_serializer.data.get('image_list', [])
if app_image_list and len(app_image_list) > 0:
app_image_list = self.workflow_manage.get_reference_field(
app_image_list[0],
app_image_list[1:])
for image in app_image_list:
if 'file_id' not in image:
raise ValueError(
_("Parameter value error: The uploaded image lacks file_id, and the image upload fails"))
app_audio_list = self.node_params_serializer.data.get('audio_list', [])
if app_audio_list and len(app_audio_list) > 0:
app_audio_list = self.workflow_manage.get_reference_field(
app_audio_list[0],
app_audio_list[1:])
for audio in app_audio_list:
if 'file_id' not in audio:
raise ValueError(
_("Parameter value error: The uploaded audio lacks file_id, and the audio upload fails."))
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data,
app_document_list=app_document_list, app_image_list=app_image_list,
app_audio_list=app_audio_list,
message=str(question), **kwargs)
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
**kwargs) -> NodeResult:
pass

View File

@ -1,2 +0,0 @@
# coding=utf-8
from .base_application_node import BaseApplicationNode

View File

@ -1,267 +0,0 @@
# coding=utf-8
import json
import re
import time
import uuid
from typing import Dict, List
from application.flow.common import Answer
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.application_node.i_application_node import IApplicationNode
from application.models import Chat
def string_to_uuid(input_str):
return str(uuid.uuid5(uuid.NAMESPACE_DNS, input_str))
def _is_interrupt_exec(node, node_variable: Dict, workflow_variable: Dict):
return node_variable.get('is_interrupt_exec', False)
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str,
reasoning_content: str):
result = node_variable.get('result')
node.context['application_node_dict'] = node_variable.get('application_node_dict')
node.context['node_dict'] = node_variable.get('node_dict', {})
node.context['is_interrupt_exec'] = node_variable.get('is_interrupt_exec')
node.context['message_tokens'] = result.get('usage', {}).get('prompt_tokens', 0)
node.context['answer_tokens'] = result.get('usage', {}).get('completion_tokens', 0)
node.context['answer'] = answer
node.context['result'] = answer
node.context['reasoning_content'] = reasoning_content
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
node.answer_text = answer
def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据 (流式)
@param node_variable: 节点数据
@param workflow_variable: 全局数据
@param node: 节点
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
answer = ''
reasoning_content = ''
usage = {}
node_child_node = {}
application_node_dict = node.context.get('application_node_dict', {})
is_interrupt_exec = False
for chunk in response:
# 先把流转成字符串
response_content = chunk.decode('utf-8')[6:]
response_content = json.loads(response_content)
content = response_content.get('content', '')
runtime_node_id = response_content.get('runtime_node_id', '')
chat_record_id = response_content.get('chat_record_id', '')
child_node = response_content.get('child_node')
view_type = response_content.get('view_type')
node_type = response_content.get('node_type')
real_node_id = response_content.get('real_node_id')
node_is_end = response_content.get('node_is_end', False)
_reasoning_content = response_content.get('reasoning_content', '')
if node_type == 'form-node':
is_interrupt_exec = True
answer += content
reasoning_content += _reasoning_content
node_child_node = {'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
'child_node': child_node}
if real_node_id is not None:
application_node = application_node_dict.get(real_node_id, None)
if application_node is None:
application_node_dict[real_node_id] = {'content': content,
'runtime_node_id': runtime_node_id,
'chat_record_id': chat_record_id,
'child_node': child_node,
'index': len(application_node_dict),
'view_type': view_type,
'reasoning_content': _reasoning_content}
else:
application_node['content'] += content
application_node['reasoning_content'] += _reasoning_content
yield {'content': content,
'node_type': node_type,
'runtime_node_id': runtime_node_id, 'chat_record_id': chat_record_id,
'reasoning_content': _reasoning_content,
'child_node': child_node,
'real_node_id': real_node_id,
'node_is_end': node_is_end,
'view_type': view_type}
usage = response_content.get('usage', {})
node_variable['result'] = {'usage': usage}
node_variable['is_interrupt_exec'] = is_interrupt_exec
node_variable['child_node'] = node_child_node
node_variable['application_node_dict'] = application_node_dict
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据
@param node_variable: 节点数据
@param workflow_variable: 全局数据
@param node: 节点实例对象
@param workflow: 工作流管理器
"""
response = node_variable.get('result', {}).get('data', {})
node_variable['result'] = {'usage': {'completion_tokens': response.get('completion_tokens'),
'prompt_tokens': response.get('prompt_tokens')}}
answer = response.get('content', '') or "抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。"
reasoning_content = response.get('reasoning_content', '')
answer_list = response.get('answer_list', [])
node_variable['application_node_dict'] = {answer.get('real_node_id'): {**answer, 'index': index} for answer, index
in
zip(answer_list, range(len(answer_list)))}
_write_context(node_variable, workflow_variable, node, workflow, answer, reasoning_content)
def reset_application_node_dict(application_node_dict, runtime_node_id, node_data):
try:
if application_node_dict is None:
return
for key in application_node_dict:
application_node = application_node_dict[key]
if application_node.get('runtime_node_id') == runtime_node_id:
content: str = application_node.get('content')
match = re.search('<form_rander>.*?</form_rander>', content)
if match:
form_setting_str = match.group().replace('<form_rander>', '').replace('</form_rander>', '')
form_setting = json.loads(form_setting_str)
form_setting['is_submit'] = True
form_setting['form_data'] = node_data
value = f'<form_rander>{json.dumps(form_setting)}</form_rander>'
res = re.sub('<form_rander>.*?</form_rander>',
'${value}', content)
application_node['content'] = res.replace('${value}', value)
except Exception as e:
pass
class BaseApplicationNode(IApplicationNode):
def get_answer_list(self) -> List[Answer] | None:
if self.answer_text is None:
return None
application_node_dict = self.context.get('application_node_dict')
if application_node_dict is None or len(application_node_dict) == 0:
return [
Answer(self.answer_text, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'],
self.context.get('child_node'), self.runtime_node_id, '')]
else:
return [Answer(n.get('content'), n.get('view_type'), self.runtime_node_id,
self.workflow_params['chat_record_id'], {'runtime_node_id': n.get('runtime_node_id'),
'chat_record_id': n.get('chat_record_id')
, 'child_node': n.get('child_node')}, n.get('real_node_id'),
n.get('reasoning_content', ''))
for n in
sorted(application_node_dict.values(), key=lambda item: item.get('index'))]
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['result'] = details.get('answer')
self.context['question'] = details.get('question')
self.context['type'] = details.get('type')
self.context['reasoning_content'] = details.get('reasoning_content')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, application_id, message, chat_id, chat_record_id, stream, re_chat, client_id, client_type,
app_document_list=None, app_image_list=None, app_audio_list=None, child_node=None, node_data=None,
**kwargs) -> NodeResult:
from application.serializers.chat_message_serializers import ChatMessageSerializer
# 生成嵌入应用的chat_id
current_chat_id = string_to_uuid(chat_id + application_id)
Chat.objects.get_or_create(id=current_chat_id, defaults={
'application_id': application_id,
'abstract': message[0:1024],
'client_id': client_id,
})
if app_document_list is None:
app_document_list = []
if app_image_list is None:
app_image_list = []
if app_audio_list is None:
app_audio_list = []
runtime_node_id = None
record_id = None
child_node_value = None
if child_node is not None:
runtime_node_id = child_node.get('runtime_node_id')
record_id = child_node.get('chat_record_id')
child_node_value = child_node.get('child_node')
application_node_dict = self.context.get('application_node_dict')
reset_application_node_dict(application_node_dict, runtime_node_id, node_data)
response = ChatMessageSerializer(
data={'chat_id': current_chat_id, 'message': message,
're_chat': re_chat,
'stream': stream,
'application_id': application_id,
'client_id': client_id,
'client_type': client_type,
'document_list': app_document_list,
'image_list': app_image_list,
'audio_list': app_audio_list,
'runtime_node_id': runtime_node_id,
'chat_record_id': record_id,
'child_node': child_node_value,
'node_data': node_data,
'form_data': kwargs}).chat()
if response.status_code == 200:
if stream:
content_generator = response.streaming_content
return NodeResult({'result': content_generator, 'question': message}, {},
_write_context=write_context_stream, _is_interrupt=_is_interrupt_exec)
else:
data = json.loads(response.content)
return NodeResult({'result': data, 'question': message}, {},
_write_context=write_context, _is_interrupt=_is_interrupt_exec)
def get_details(self, index: int, **kwargs):
global_fields = []
for api_input_field in self.node_params_serializer.data.get('api_input_field_list', []):
value = api_input_field.get('value', [''])[0] if api_input_field.get('value') else ''
global_fields.append({
'label': api_input_field['variable'],
'key': api_input_field['variable'],
'value': self.workflow_manage.get_reference_field(
value,
api_input_field['value'][1:]
) if value != '' else ''
})
for user_input_field in self.node_params_serializer.data.get('user_input_field_list', []):
value = user_input_field.get('value', [''])[0] if user_input_field.get('value') else ''
global_fields.append({
'label': user_input_field['label'],
'key': user_input_field['field'],
'value': self.workflow_manage.get_reference_field(
value,
user_input_field['value'][1:]
) if value != '' else ''
})
return {
'name': self.node.properties.get('stepName'),
"index": index,
"info": self.node.properties.get('node_data'),
'run_time': self.context.get('run_time'),
'question': self.context.get('question'),
'answer': self.context.get('answer'),
'reasoning_content': self.context.get('reasoning_content'),
'type': self.node.type,
'message_tokens': self.context.get('message_tokens'),
'answer_tokens': self.context.get('answer_tokens'),
'status': self.status,
'err_message': self.err_message,
'global_fields': global_fields,
'document_list': self.workflow_manage.document_list,
'image_list': self.workflow_manage.image_list,
'audio_list': self.workflow_manage.audio_list,
'application_node_dict': self.context.get('application_node_dict')
}

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2024/6/7 14:43
@desc:
"""
from .impl import *

View File

@ -1,30 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2024/6/7 14:43
@desc:
"""
from .contain_compare import *
from .equal_compare import *
from .ge_compare import *
from .gt_compare import *
from .is_not_null_compare import *
from .is_not_true import IsNotTrueCompare
from .is_null_compare import *
from .is_true import IsTrueCompare
from .le_compare import *
from .len_equal_compare import *
from .len_ge_compare import *
from .len_gt_compare import *
from .len_le_compare import *
from .len_lt_compare import *
from .lt_compare import *
from .not_contain_compare import *
compare_handle_list = [GECompare(), GTCompare(), ContainCompare(), EqualCompare(), LTCompare(), LECompare(),
LenLECompare(), LenGECompare(), LenEqualCompare(), LenGTCompare(), LenLTCompare(),
IsNullCompare(),
IsNotNullCompare(), NotContainCompare(), IsTrueCompare(), IsNotTrueCompare()]

View File

@ -1,20 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file compare.py
@date2024/6/7 14:37
@desc:
"""
from abc import abstractmethod
from typing import List
class Compare:
@abstractmethod
def support(self, node_id, fields: List[str], source_value, compare, target_value):
pass
@abstractmethod
def compare(self, source_value, compare, target_value):
pass

View File

@ -1,23 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file contain_compare.py
@date2024/6/11 10:02
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class ContainCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'contain':
return True
def compare(self, source_value, compare, target_value):
if isinstance(source_value, str):
return str(target_value) in source_value
return any([str(item) == str(target_value) for item in source_value])

View File

@ -1,21 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file equal_compare.py
@date2024/6/7 14:44
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class EqualCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'eq':
return True
def compare(self, source_value, compare, target_value):
return str(source_value) == str(target_value)

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 大于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class GECompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'ge':
return True
def compare(self, source_value, compare, target_value):
try:
return float(source_value) >= float(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 大于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class GTCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'gt':
return True
def compare(self, source_value, compare, target_value):
try:
return float(source_value) > float(target_value)
except Exception as e:
return False

View File

@ -1,21 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file is_not_null_compare.py
@date2024/6/28 10:45
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare import Compare
class IsNotNullCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'is_not_null':
return True
def compare(self, source_value, compare, target_value):
return source_value is not None and len(source_value) > 0

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file is_not_true.py
@date2025/4/7 13:44
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare import Compare
class IsNotTrueCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'is_not_true':
return True
def compare(self, source_value, compare, target_value):
try:
return source_value is False
except Exception as e:
return False

View File

@ -1,21 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file is_null_compare.py
@date2024/6/28 10:45
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare import Compare
class IsNullCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'is_null':
return True
def compare(self, source_value, compare, target_value):
return source_value is None or len(source_value) == 0

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file IsTrue.py
@date2025/4/7 13:38
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare import Compare
class IsTrueCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'is_true':
return True
def compare(self, source_value, compare, target_value):
try:
return source_value is True
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 小于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LECompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'le':
return True
def compare(self, source_value, compare, target_value):
try:
return float(source_value) <= float(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file equal_compare.py
@date2024/6/7 14:44
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LenEqualCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'len_eq':
return True
def compare(self, source_value, compare, target_value):
try:
return len(source_value) == int(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 大于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LenGECompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'len_ge':
return True
def compare(self, source_value, compare, target_value):
try:
return len(source_value) >= int(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 大于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LenGTCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'len_gt':
return True
def compare(self, source_value, compare, target_value):
try:
return len(source_value) > int(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 小于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LenLECompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'len_le':
return True
def compare(self, source_value, compare, target_value):
try:
return len(source_value) <= int(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 小于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LenLTCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'len_lt':
return True
def compare(self, source_value, compare, target_value):
try:
return len(source_value) < int(target_value)
except Exception as e:
return False

View File

@ -1,24 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file lt_compare.py
@date2024/6/11 9:52
@desc: 小于比较器
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class LTCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'lt':
return True
def compare(self, source_value, compare, target_value):
try:
return float(source_value) < float(target_value)
except Exception as e:
return False

View File

@ -1,23 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file contain_compare.py
@date2024/6/11 10:02
@desc:
"""
from typing import List
from application.flow.step_node.condition_node.compare.compare import Compare
class NotContainCompare(Compare):
def support(self, node_id, fields: List[str], source_value, compare, target_value):
if compare == 'not_contain':
return True
def compare(self, source_value, compare, target_value):
if isinstance(source_value, str):
return str(target_value) not in source_value
return not any([str(item) == str(target_value) for item in source_value])

View File

@ -1,39 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file i_condition_node.py
@date2024/6/7 9:54
@desc:
"""
from typing import Type
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from application.flow.i_step_node import INode
from common.util.field_message import ErrMessage
class ConditionSerializer(serializers.Serializer):
compare = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Comparator")))
value = serializers.CharField(required=True, error_messages=ErrMessage.char(_("value")))
field = serializers.ListField(required=True, error_messages=ErrMessage.char(_("Fields")))
class ConditionBranchSerializer(serializers.Serializer):
id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Branch id")))
type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Branch Type")))
condition = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Condition or|and")))
conditions = ConditionSerializer(many=True)
class ConditionNodeParamsSerializer(serializers.Serializer):
branch = ConditionBranchSerializer(many=True)
class IConditionNode(INode):
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ConditionNodeParamsSerializer
type = 'condition-node'

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py
@date2024/6/11 15:35
@desc:
"""
from .base_condition_node import BaseConditionNode

View File

@ -1,62 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file base_condition_node.py
@date2024/6/7 11:29
@desc:
"""
from typing import List
from application.flow.i_step_node import NodeResult
from application.flow.step_node.condition_node.compare import compare_handle_list
from application.flow.step_node.condition_node.i_condition_node import IConditionNode
class BaseConditionNode(IConditionNode):
def save_context(self, details, workflow_manage):
self.context['branch_id'] = details.get('branch_id')
self.context['branch_name'] = details.get('branch_name')
def execute(self, **kwargs) -> NodeResult:
branch_list = self.node_params_serializer.data['branch']
branch = self._execute(branch_list)
r = NodeResult({'branch_id': branch.get('id'), 'branch_name': branch.get('type')}, {})
return r
def _execute(self, branch_list: List):
for branch in branch_list:
if self.branch_assertion(branch):
return branch
def branch_assertion(self, branch):
condition_list = [self.assertion(row.get('field'), row.get('compare'), row.get('value')) for row in
branch.get('conditions')]
condition = branch.get('condition')
return all(condition_list) if condition == 'and' else any(condition_list)
def assertion(self, field_list: List[str], compare: str, value):
try:
value = self.workflow_manage.generate_prompt(value)
except Exception as e:
pass
field_value = None
try:
field_value = self.workflow_manage.get_reference_field(field_list[0], field_list[1:])
except Exception as e:
pass
for compare_handler in compare_handle_list:
if compare_handler.support(field_list[0], field_list[1:], field_value, compare, value):
return compare_handler.compare(field_value, compare, value)
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'branch_id': self.context.get('branch_id'),
'branch_name': self.context.get('branch_name'),
'type': self.node.type,
'status': self.status,
'err_message': self.err_message
}

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py
@date2024/6/11 17:50
@desc:
"""
from .impl import *

View File

@ -1,48 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file i_reply_node.py
@date2024/6/11 16:25
@desc:
"""
from typing import Type
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.exception.app_exception import AppApiException
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
class ReplyNodeParamsSerializer(serializers.Serializer):
reply_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Response Type")))
fields = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Reference Field")))
content = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char(_("Direct answer content")))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
if self.data.get('reply_type') == 'referencing':
if 'fields' not in self.data:
raise AppApiException(500, _("Reference field cannot be empty"))
if len(self.data.get('fields')) < 2:
raise AppApiException(500, _("Reference field error"))
else:
if 'content' not in self.data or self.data.get('content') is None:
raise AppApiException(500, _("Content cannot be empty"))
class IReplyNode(INode):
type = 'reply-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ReplyNodeParamsSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
pass

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py
@date2024/6/11 17:49
@desc:
"""
from .base_reply_node import *

View File

@ -1,45 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file base_reply_node.py
@date2024/6/11 17:25
@desc:
"""
from typing import List
from application.flow.i_step_node import NodeResult
from application.flow.step_node.direct_reply_node.i_reply_node import IReplyNode
class BaseReplyNode(IReplyNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, reply_type, stream, fields=None, content=None, **kwargs) -> NodeResult:
if reply_type == 'referencing':
result = self.get_reference_content(fields)
else:
result = self.generate_reply_content(content)
return NodeResult({'answer': result}, {})
def generate_reply_content(self, prompt):
return self.workflow_manage.generate_prompt(prompt)
def get_reference_content(self, fields: List[str]):
return str(self.workflow_manage.get_reference_field(
fields[0],
fields[1:]))
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'type': self.node.type,
'answer': self.context.get('answer'),
'status': self.status,
'err_message': self.err_message
}

View File

@ -1 +0,0 @@
from .impl import *

View File

@ -1,28 +0,0 @@
# coding=utf-8
from typing import Type
from django.utils.translation import gettext_lazy as _
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
class DocumentExtractNodeSerializer(serializers.Serializer):
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
class IDocumentExtractNode(INode):
type = 'document-extract-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return DocumentExtractNodeSerializer
def _run(self):
res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('document_list')[0],
self.node_params_serializer.data.get('document_list')[1:])
return self.execute(document=res, **self.flow_params_serializer.data)
def execute(self, document, chat_id, **kwargs) -> NodeResult:
pass

View File

@ -1 +0,0 @@
from .base_document_extract_node import BaseDocumentExtractNode

View File

@ -1,94 +0,0 @@
# coding=utf-8
import io
import mimetypes
from django.core.files.uploadedfile import InMemoryUploadedFile
from django.db.models import QuerySet
from application.flow.i_step_node import NodeResult
from application.flow.step_node.document_extract_node.i_document_extract_node import IDocumentExtractNode
from dataset.models import File
from dataset.serializers.document_serializers import split_handles, parse_table_handle_list, FileBufferHandle
from dataset.serializers.file_serializers import FileSerializer
def bytes_to_uploaded_file(file_bytes, file_name="file.txt"):
content_type, _ = mimetypes.guess_type(file_name)
if content_type is None:
# 如果未能识别,设置为默认的二进制文件类型
content_type = "application/octet-stream"
# 创建一个内存中的字节流对象
file_stream = io.BytesIO(file_bytes)
# 获取文件大小
file_size = len(file_bytes)
# 创建 InMemoryUploadedFile 对象
uploaded_file = InMemoryUploadedFile(
file=file_stream,
field_name=None,
name=file_name,
content_type=content_type,
size=file_size,
charset=None,
)
return uploaded_file
splitter = '\n`-----------------------------------`\n'
class BaseDocumentExtractNode(IDocumentExtractNode):
def save_context(self, details, workflow_manage):
self.context['content'] = details.get('content')
def execute(self, document, chat_id, **kwargs):
get_buffer = FileBufferHandle().get_buffer
self.context['document_list'] = document
content = []
if document is None or not isinstance(document, list):
return NodeResult({'content': ''}, {})
application = self.workflow_manage.work_flow_post_handler.chat_info.application
# doc文件中的图片保存
def save_image(image_list):
for image in image_list:
meta = {
'debug': False if application.id else True,
'chat_id': chat_id,
'application_id': str(application.id) if application.id else None,
'file_id': str(image.id)
}
file = bytes_to_uploaded_file(image.image, image.image_name)
FileSerializer(data={'file': file, 'meta': meta}).upload()
for doc in document:
file = QuerySet(File).filter(id=doc['file_id']).first()
buffer = io.BytesIO(file.get_byte().tobytes())
buffer.name = doc['name'] # this is the important line
for split_handle in (parse_table_handle_list + split_handles):
if split_handle.support(buffer, get_buffer):
# 回到文件头
buffer.seek(0)
file_content = split_handle.get_content(buffer, save_image)
content.append('### ' + doc['name'] + '\n' + file_content)
break
return NodeResult({'content': splitter.join(content)}, {})
def get_details(self, index: int, **kwargs):
content = self.context.get('content', '').split(splitter)
# 不保存content全部内容因为content内容可能会很大
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'type': self.node.type,
'content': [file_content[:500] for file_content in content],
'status': self.status,
'err_message': self.err_message,
'document_list': self.context.get('document_list')
}

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file __init__.py.py
@date2024/11/4 14:48
@desc:
"""
from .impl import *

View File

@ -1,35 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file i_form_node.py
@date2024/11/4 14:48
@desc:
"""
from typing import Type
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
class FormNodeParamsSerializer(serializers.Serializer):
form_field_list = serializers.ListField(required=True, error_messages=ErrMessage.list(_("Form Configuration")))
form_content_format = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Form output content')))
form_data = serializers.DictField(required=False, allow_null=True, error_messages=ErrMessage.dict(_("Form Data")))
class IFormNode(INode):
type = 'form-node'
view_type = 'single_view'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return FormNodeParamsSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, form_field_list, form_content_format, form_data, **kwargs) -> NodeResult:
pass

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file __init__.py.py
@date2024/11/4 14:49
@desc:
"""
from .base_form_node import BaseFormNode

View File

@ -1,107 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file base_form_node.py
@date2024/11/4 14:52
@desc:
"""
import json
import time
from typing import Dict, List
from langchain_core.prompts import PromptTemplate
from application.flow.common import Answer
from application.flow.i_step_node import NodeResult
from application.flow.step_node.form_node.i_form_node import IFormNode
def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if step_variable is not None:
for key in step_variable:
node.context[key] = step_variable[key]
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
result = step_variable['result']
yield result
node.answer_text = result
node.context['run_time'] = time.time() - node.context['start_time']
class BaseFormNode(IFormNode):
def save_context(self, details, workflow_manage):
form_data = details.get('form_data', None)
self.context['result'] = details.get('result')
self.context['form_content_format'] = details.get('form_content_format')
self.context['form_field_list'] = details.get('form_field_list')
self.context['run_time'] = details.get('run_time')
self.context['start_time'] = details.get('start_time')
self.context['form_data'] = form_data
self.context['is_submit'] = details.get('is_submit')
if self.node_params.get('is_result', False):
self.answer_text = details.get('result')
if form_data is not None:
for key in form_data:
self.context[key] = form_data[key]
def execute(self, form_field_list, form_content_format, form_data, **kwargs) -> NodeResult:
if form_data is not None:
self.context['is_submit'] = True
self.context['form_data'] = form_data
for key in form_data:
self.context[key] = form_data.get(key)
else:
self.context['is_submit'] = False
form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id,
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
"is_submit": self.context.get("is_submit", False)}
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
value = prompt_template.format(form=form, context=context)
return NodeResult(
{'result': value, 'form_field_list': form_field_list, 'form_content_format': form_content_format}, {},
_write_context=write_context)
def get_answer_list(self) -> List[Answer] | None:
form_content_format = self.context.get('form_content_format')
form_field_list = self.context.get('form_field_list')
form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id,
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
'form_data': self.context.get('form_data', {}),
"is_submit": self.context.get("is_submit", False)}
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
value = prompt_template.format(form=form, context=context)
return [Answer(value, self.view_type, self.runtime_node_id, self.workflow_params['chat_record_id'], None,
self.runtime_node_id, '')]
def get_details(self, index: int, **kwargs):
form_content_format = self.context.get('form_content_format')
form_field_list = self.context.get('form_field_list')
form_setting = {"form_field_list": form_field_list, "runtime_node_id": self.runtime_node_id,
"chat_record_id": self.flow_params_serializer.data.get("chat_record_id"),
'form_data': self.context.get('form_data', {}),
"is_submit": self.context.get("is_submit", False)}
form = f'<form_rander>{json.dumps(form_setting, ensure_ascii=False)}</form_rander>'
context = self.workflow_manage.get_workflow_content()
form_content_format = self.workflow_manage.reset_prompt(form_content_format)
prompt_template = PromptTemplate.from_template(form_content_format, template_format='jinja2')
value = prompt_template.format(form=form, context=context)
return {
'name': self.node.properties.get('stepName'),
"index": index,
"result": value,
"form_content_format": self.context.get('form_content_format'),
"form_field_list": self.context.get('form_field_list'),
'form_data': self.context.get('form_data'),
'start_time': self.context.get('start_time'),
'is_submit': self.context.get('is_submit'),
'run_time': self.context.get('run_time'),
'type': self.node.type,
'status': self.status,
'err_message': self.err_message
}

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file __init__.py
@date2024/8/8 17:45
@desc:
"""
from .impl import *

View File

@ -1,48 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file i_function_lib_node.py
@date2024/8/8 16:21
@desc:
"""
from typing import Type
from django.db.models import QuerySet
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.field.common import ObjectField
from common.util.field_message import ErrMessage
from function_lib.models.function import FunctionLib
from django.utils.translation import gettext_lazy as _
class InputField(serializers.Serializer):
name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Variable Name')))
value = ObjectField(required=True, error_messages=ErrMessage.char(_("Variable Value")), model_type_list=[str, list])
class FunctionLibNodeParamsSerializer(serializers.Serializer):
function_lib_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid(_('Library ID')))
input_field_list = InputField(required=True, many=True)
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
f_lib = QuerySet(FunctionLib).filter(id=self.data.get('function_lib_id')).first()
if f_lib is None:
raise Exception(_('The function has been deleted'))
class IFunctionLibNode(INode):
type = 'function-lib-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return FunctionLibNodeParamsSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
pass

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file __init__.py
@date2024/8/8 17:48
@desc:
"""
from .base_function_lib_node import BaseFunctionLibNodeNode

View File

@ -1,150 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file base_function_lib_node.py
@date2024/8/8 17:49
@desc:
"""
import json
import time
from typing import Dict
from django.db.models import QuerySet
from django.utils.translation import gettext as _
from application.flow.i_step_node import NodeResult
from application.flow.step_node.function_lib_node.i_function_lib_node import IFunctionLibNode
from common.exception.app_exception import AppApiException
from common.util.function_code import FunctionExecutor
from common.util.rsa_util import rsa_long_decrypt
from function_lib.models.function import FunctionLib
from smartdoc.const import CONFIG
function_executor = FunctionExecutor(CONFIG.get('SANDBOX'))
def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if step_variable is not None:
for key in step_variable:
node.context[key] = step_variable[key]
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
result = str(step_variable['result']) + '\n'
yield result
node.answer_text = result
node.context['run_time'] = time.time() - node.context['start_time']
def get_field_value(debug_field_list, name, is_required):
result = [field for field in debug_field_list if field.get('name') == name]
if len(result) > 0:
return result[-1]['value']
if is_required:
raise AppApiException(500, _('Field: {name} No value set').format(name=name))
return None
def valid_reference_value(_type, value, name):
if _type == 'int':
instance_type = int | float
elif _type == 'float':
instance_type = float | int
elif _type == 'dict':
instance_type = dict
elif _type == 'array':
instance_type = list
elif _type == 'string':
instance_type = str
else:
raise Exception(_('Field: {name} Type: {_type} Value: {value} Unsupported types').format(name=name,
_type=_type))
if not isinstance(value, instance_type):
raise Exception(
_('Field: {name} Type: {_type} Value: {value} Type error').format(name=name, _type=_type,
value=value))
def convert_value(name: str, value, _type, is_required, source, node):
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
return None
if not is_required and source == 'reference' and (value is None or len(value) == 0):
return None
if source == 'reference':
value = node.workflow_manage.get_reference_field(
value[0],
value[1:])
valid_reference_value(_type, value, name)
if _type == 'int':
return int(value)
if _type == 'float':
return float(value)
return value
try:
if _type == 'int':
return int(value)
if _type == 'float':
return float(value)
if _type == 'dict':
v = json.loads(value)
if isinstance(v, dict):
return v
raise Exception(_('type error'))
if _type == 'array':
v = json.loads(value)
if isinstance(v, list):
return v
raise Exception(_('type error'))
return value
except Exception as e:
raise Exception(
_('Field: {name} Type: {_type} Value: {value} Type error').format(name=name, _type=_type,
value=value))
def valid_function(function_lib, user_id):
if function_lib is None:
raise Exception(_('Function does not exist'))
if function_lib.permission_type == 'PRIVATE' and str(function_lib.user_id) != str(user_id):
raise Exception(_('No permission to use this function {name}').format(name=function_lib.name))
if not function_lib.is_active:
raise Exception(_('Function {name} is unavailable').format(name=function_lib.name))
class BaseFunctionLibNodeNode(IFunctionLibNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
if self.node_params.get('is_result'):
self.answer_text = str(details.get('result'))
def execute(self, function_lib_id, input_field_list, **kwargs) -> NodeResult:
function_lib = QuerySet(FunctionLib).filter(id=function_lib_id).first()
valid_function(function_lib, self.flow_params_serializer.data.get('user_id'))
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
field.get('is_required'),
field.get('source'), self)
for field in
[{'value': get_field_value(input_field_list, field.get('name'), field.get('is_required'),
), **field}
for field in
function_lib.input_field_list]}
self.context['params'] = params
# 合并初始化参数
if function_lib.init_params is not None:
all_params = json.loads(rsa_long_decrypt(function_lib.init_params)) | params
else:
all_params = params
result = function_executor.exec_code(function_lib.code, all_params)
return NodeResult({'result': result}, {}, _write_context=write_context)
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
"result": self.context.get('result'),
"params": self.context.get('params'),
'run_time': self.context.get('run_time'),
'type': self.node.type,
'status': self.status,
'err_message': self.err_message
}

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file __init__.py.py
@date2024/8/13 10:43
@desc:
"""
from .impl import *

View File

@ -1,63 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file i_function_lib_node.py
@date2024/8/8 16:21
@desc:
"""
import re
from typing import Type
from django.core import validators
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.exception.app_exception import AppApiException
from common.field.common import ObjectField
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
from rest_framework.utils.formatting import lazy_format
class InputField(serializers.Serializer):
name = serializers.CharField(required=True, error_messages=ErrMessage.char(_('Variable Name')))
is_required = serializers.BooleanField(required=True, error_messages=ErrMessage.boolean(_("Is this field required")))
type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("type")), validators=[
validators.RegexValidator(regex=re.compile("^string|int|dict|array|float$"),
message=_("The field only supports string|int|dict|array|float"), code=500)
])
source = serializers.CharField(required=True, error_messages=ErrMessage.char(_("source")), validators=[
validators.RegexValidator(regex=re.compile("^custom|reference$"),
message=_("The field only supports custom|reference"), code=500)
])
value = ObjectField(required=True, error_messages=ErrMessage.char(_("Variable Value")), model_type_list=[str, list])
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
is_required = self.data.get('is_required')
if is_required and self.data.get('value') is None:
message = lazy_format(_('{field}, this field is required.'), field=self.data.get("name"))
raise AppApiException(500, message)
class FunctionNodeParamsSerializer(serializers.Serializer):
input_field_list = InputField(required=True, many=True)
code = serializers.CharField(required=True, error_messages=ErrMessage.char(_("function")))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
def is_valid(self, *, raise_exception=False):
super().is_valid(raise_exception=True)
class IFunctionNode(INode):
type = 'function-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return FunctionNodeParamsSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
pass

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file __init__.py.py
@date2024/8/13 11:19
@desc:
"""
from .base_function_node import BaseFunctionNodeNode

View File

@ -1,108 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file base_function_lib_node.py
@date2024/8/8 17:49
@desc:
"""
import json
import time
from typing import Dict
from application.flow.i_step_node import NodeResult
from application.flow.step_node.function_node.i_function_node import IFunctionNode
from common.exception.app_exception import AppApiException
from common.util.function_code import FunctionExecutor
from smartdoc.const import CONFIG
function_executor = FunctionExecutor(CONFIG.get('SANDBOX'))
def write_context(step_variable: Dict, global_variable: Dict, node, workflow):
if step_variable is not None:
for key in step_variable:
node.context[key] = step_variable[key]
if workflow.is_result(node, NodeResult(step_variable, global_variable)) and 'result' in step_variable:
result = str(step_variable['result']) + '\n'
yield result
node.answer_text = result
node.context['run_time'] = time.time() - node.context['start_time']
def valid_reference_value(_type, value, name):
if _type == 'int':
instance_type = int | float
elif _type == 'float':
instance_type = float | int
elif _type == 'dict':
instance_type = dict
elif _type == 'array':
instance_type = list
elif _type == 'string':
instance_type = str
else:
raise Exception(500, f'字段:{name}类型:{_type} 不支持的类型')
if not isinstance(value, instance_type):
raise Exception(f'字段:{name}类型:{_type}值:{value}类型错误')
def convert_value(name: str, value, _type, is_required, source, node):
if not is_required and (value is None or (isinstance(value, str) and len(value) == 0)):
return None
if source == 'reference':
value = node.workflow_manage.get_reference_field(
value[0],
value[1:])
valid_reference_value(_type, value, name)
if _type == 'int':
return int(value)
if _type == 'float':
return float(value)
return value
try:
if _type == 'int':
return int(value)
if _type == 'float':
return float(value)
if _type == 'dict':
v = json.loads(value)
if isinstance(v, dict):
return v
raise Exception("类型错误")
if _type == 'array':
v = json.loads(value)
if isinstance(v, list):
return v
raise Exception("类型错误")
return value
except Exception as e:
raise Exception(f'字段:{name}类型:{_type}值:{value}类型错误')
class BaseFunctionNodeNode(IFunctionNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
if self.node_params.get('is_result', False):
self.answer_text = str(details.get('result'))
def execute(self, input_field_list, code, **kwargs) -> NodeResult:
params = {field.get('name'): convert_value(field.get('name'), field.get('value'), field.get('type'),
field.get('is_required'), field.get('source'), self)
for field in input_field_list}
result = function_executor.exec_code(code, params)
self.context['params'] = params
return NodeResult({'result': result}, {}, _write_context=write_context)
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
"result": self.context.get('result'),
"params": self.context.get('params'),
'run_time': self.context.get('run_time'),
'type': self.node.type,
'status': self.status,
'err_message': self.err_message
}

View File

@ -1,3 +0,0 @@
# coding=utf-8
from .impl import *

View File

@ -1,45 +0,0 @@
# coding=utf-8
from typing import Type
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
class ImageGenerateNodeSerializer(serializers.Serializer):
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word (positive)")))
negative_prompt = serializers.CharField(required=False, error_messages=ErrMessage.char(_("Prompt word (negative)")),
allow_null=True, allow_blank=True, )
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=False, default=0,
error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
dialogue_type = serializers.CharField(required=False, default='NODE',
error_messages=ErrMessage.char(_("Conversation storage type")))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
model_params_setting = serializers.JSONField(required=False, default=dict,
error_messages=ErrMessage.json(_("Model parameter settings")))
class IImageGenerateNode(INode):
type = 'image-generate-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ImageGenerateNodeSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
chat_record_id,
**kwargs) -> NodeResult:
pass

View File

@ -1,3 +0,0 @@
# coding=utf-8
from .base_image_generate_node import BaseImageGenerateNode

View File

@ -1,122 +0,0 @@
# coding=utf-8
from functools import reduce
from typing import List
import requests
from langchain_core.messages import BaseMessage, HumanMessage, AIMessage
from application.flow.i_step_node import NodeResult
from application.flow.step_node.image_generate_step_node.i_image_generate_node import IImageGenerateNode
from common.util.common import bytes_to_uploaded_file
from dataset.serializers.file_serializers import FileSerializer
from setting.models_provider.tools import get_model_instance_by_model_user_id
class BaseImageGenerateNode(IImageGenerateNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, model_id, prompt, negative_prompt, dialogue_number, dialogue_type, history_chat_record, chat_id,
model_params_setting,
chat_record_id,
**kwargs) -> NodeResult:
print(model_params_setting)
application = self.workflow_manage.work_flow_post_handler.chat_info.application
tti_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'),
**model_params_setting)
history_message = self.get_history_message(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question
message_list = self.generate_message_list(question, history_message)
self.context['message_list'] = message_list
self.context['dialogue_type'] = dialogue_type
print(message_list)
image_urls = tti_model.generate_image(question, negative_prompt)
# 保存图片
file_urls = []
for image_url in image_urls:
file_name = 'generated_image.png'
file = bytes_to_uploaded_file(requests.get(image_url).content, file_name)
meta = {
'debug': False if application.id else True,
'chat_id': chat_id,
'application_id': str(application.id) if application.id else None,
}
file_url = FileSerializer(data={'file': file, 'meta': meta}).upload()
file_urls.append(file_url)
self.context['image_list'] = [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls]
answer = ' '.join([f"![Image]({path})" for path in file_urls])
return NodeResult({'answer': answer, 'chat_model': tti_model, 'message_list': message_list,
'image': [{'file_id': path.split('/')[-1], 'url': path} for path in file_urls],
'history_message': history_message, 'question': question}, {})
def generate_history_ai_message(self, chat_record):
for val in chat_record.details.values():
if self.node.id == val['node_id'] and 'image_list' in val:
if val['dialogue_type'] == 'WORKFLOW':
return chat_record.get_ai_message()
image_list = val['image_list']
return AIMessage(content=[
*[{'type': 'image_url', 'image_url': {'url': f'{file_url}'}} for file_url in image_list]
])
return chat_record.get_ai_message()
def get_history_message(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
def generate_history_human_message(self, chat_record):
for data in chat_record.details.values():
if self.node.id == data['node_id'] and 'image_list' in data:
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
return HumanMessage(content=data['question'])
return HumanMessage(content=chat_record.problem_text)
def generate_prompt_question(self, prompt):
return self.workflow_manage.generate_prompt(prompt)
def generate_message_list(self, question: str, history_message):
return [
*history_message,
question
]
@staticmethod
def reset_message_list(message_list: List[BaseMessage], answer_text):
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
message
in
message_list]
result.append({'role': 'ai', 'content': answer_text})
return result
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'history_message': [{'content': message.content, 'role': message.type} for message in
(self.context.get('history_message') if self.context.get(
'history_message') is not None else [])],
'question': self.context.get('question'),
'answer': self.context.get('answer'),
'type': self.node.type,
'message_tokens': self.context.get('message_tokens'),
'answer_tokens': self.context.get('answer_tokens'),
'status': self.status,
'err_message': self.err_message,
'image_list': self.context.get('image_list'),
'dialogue_type': self.context.get('dialogue_type')
}

View File

@ -1,3 +0,0 @@
# coding=utf-8
from .impl import *

View File

@ -1,46 +0,0 @@
# coding=utf-8
from typing import Type
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
class ImageUnderstandNodeSerializer(serializers.Serializer):
model_id = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Model id")))
system = serializers.CharField(required=False, allow_blank=True, allow_null=True,
error_messages=ErrMessage.char(_("Role Setting")))
prompt = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Prompt word")))
# 多轮对话数量
dialogue_number = serializers.IntegerField(required=True, error_messages=ErrMessage.integer(_("Number of multi-round conversations")))
dialogue_type = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Conversation storage type")))
is_result = serializers.BooleanField(required=False, error_messages=ErrMessage.boolean(_('Whether to return content')))
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
model_params_setting = serializers.JSONField(required=False, default=dict,
error_messages=ErrMessage.json(_("Model parameter settings")))
class IImageUnderstandNode(INode):
type = 'image-understand-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return ImageUnderstandNodeSerializer
def _run(self):
res = self.workflow_manage.get_reference_field(self.node_params_serializer.data.get('image_list')[0],
self.node_params_serializer.data.get('image_list')[1:])
return self.execute(image=res, **self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
model_params_setting,
chat_record_id,
image,
**kwargs) -> NodeResult:
pass

View File

@ -1,3 +0,0 @@
# coding=utf-8
from .base_image_understand_node import BaseImageUnderstandNode

View File

@ -1,224 +0,0 @@
# coding=utf-8
import base64
import os
import time
from functools import reduce
from typing import List, Dict
from django.db.models import QuerySet
from langchain_core.messages import BaseMessage, HumanMessage, SystemMessage, AIMessage
from application.flow.i_step_node import NodeResult, INode
from application.flow.step_node.image_understand_step_node.i_image_understand_node import IImageUnderstandNode
from dataset.models import File
from setting.models_provider.tools import get_model_instance_by_model_user_id
from imghdr import what
def _write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow, answer: str):
chat_model = node_variable.get('chat_model')
message_tokens = node_variable['usage_metadata']['output_tokens'] if 'usage_metadata' in node_variable else 0
answer_tokens = chat_model.get_num_tokens(answer)
node.context['message_tokens'] = message_tokens
node.context['answer_tokens'] = answer_tokens
node.context['answer'] = answer
node.context['history_message'] = node_variable['history_message']
node.context['question'] = node_variable['question']
node.context['run_time'] = time.time() - node.context['start_time']
if workflow.is_result(node, NodeResult(node_variable, workflow_variable)):
node.answer_text = answer
def write_context_stream(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据 (流式)
@param node_variable: 节点数据
@param workflow_variable: 全局数据
@param node: 节点
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
answer = ''
for chunk in response:
answer += chunk.content
yield chunk.content
_write_context(node_variable, workflow_variable, node, workflow, answer)
def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, workflow):
"""
写入上下文数据
@param node_variable: 节点数据
@param workflow_variable: 全局数据
@param node: 节点实例对象
@param workflow: 工作流管理器
"""
response = node_variable.get('result')
answer = response.content
_write_context(node_variable, workflow_variable, node, workflow, answer)
def file_id_to_base64(file_id: str):
file = QuerySet(File).filter(id=file_id).first()
file_bytes = file.get_byte()
base64_image = base64.b64encode(file_bytes).decode("utf-8")
return [base64_image, what(None, file_bytes.tobytes())]
class BaseImageUnderstandNode(IImageUnderstandNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
if self.node_params.get('is_result', False):
self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
model_params_setting,
chat_record_id,
image,
**kwargs) -> NodeResult:
# 处理不正确的参数
if image is None or not isinstance(image, list):
image = []
print(model_params_setting)
image_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'), **model_params_setting)
# 执行详情中的历史消息不需要图片内容
history_message = self.get_history_message_for_details(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question.content
# 生成消息列表, 真实的history_message
message_list = self.generate_message_list(image_model, system, prompt,
self.get_history_message(history_chat_record, dialogue_number), image)
self.context['message_list'] = message_list
self.context['image_list'] = image
self.context['dialogue_type'] = dialogue_type
if stream:
r = image_model.stream(message_list)
return NodeResult({'result': r, 'chat_model': image_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context_stream)
else:
r = image_model.invoke(message_list)
return NodeResult({'result': r, 'chat_model': image_model, 'message_list': message_list,
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context)
def get_history_message_for_details(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message_for_details(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
def generate_history_ai_message(self, chat_record):
for val in chat_record.details.values():
if self.node.id == val['node_id'] and 'image_list' in val:
if val['dialogue_type'] == 'WORKFLOW':
return chat_record.get_ai_message()
return AIMessage(content=val['answer'])
return chat_record.get_ai_message()
def generate_history_human_message_for_details(self, chat_record):
for data in chat_record.details.values():
if self.node.id == data['node_id'] and 'image_list' in data:
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
file_id_list = [image.get('file_id') for image in image_list]
return HumanMessage(content=[
{'type': 'text', 'text': data['question']},
*[{'type': 'image_url', 'image_url': {'url': f'/api/file/{file_id}'}} for file_id in file_id_list]
])
return HumanMessage(content=chat_record.problem_text)
def get_history_message(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
def generate_history_human_message(self, chat_record):
for data in chat_record.details.values():
if self.node.id == data['node_id'] and 'image_list' in data:
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
image_base64_list = [file_id_to_base64(image.get('file_id')) for image in image_list]
return HumanMessage(
content=[
{'type': 'text', 'text': data['question']},
*[{'type': 'image_url', 'image_url': {'url': f'data:image/{base64_image[1]};base64,{base64_image[0]}'}} for
base64_image in image_base64_list]
])
return HumanMessage(content=chat_record.problem_text)
def generate_prompt_question(self, prompt):
return HumanMessage(self.workflow_manage.generate_prompt(prompt))
def generate_message_list(self, image_model, system: str, prompt: str, history_message, image):
if image is not None and len(image) > 0:
# 处理多张图片
images = []
for img in image:
file_id = img['file_id']
file = QuerySet(File).filter(id=file_id).first()
image_bytes = file.get_byte()
base64_image = base64.b64encode(image_bytes).decode("utf-8")
image_format = what(None, image_bytes.tobytes())
images.append({'type': 'image_url', 'image_url': {'url': f'data:image/{image_format};base64,{base64_image}'}})
messages = [HumanMessage(
content=[
{'type': 'text', 'text': self.workflow_manage.generate_prompt(prompt)},
*images
])]
else:
messages = [HumanMessage(self.workflow_manage.generate_prompt(prompt))]
if system is not None and len(system) > 0:
return [
SystemMessage(self.workflow_manage.generate_prompt(system)),
*history_message,
*messages
]
else:
return [
*history_message,
*messages
]
@staticmethod
def reset_message_list(message_list: List[BaseMessage], answer_text):
result = [{'role': 'user' if isinstance(message, HumanMessage) else 'ai', 'content': message.content} for
message
in
message_list]
result.append({'role': 'ai', 'content': answer_text})
return result
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'system': self.node_params.get('system'),
'history_message': [{'content': message.content, 'role': message.type} for message in
(self.context.get('history_message') if self.context.get(
'history_message') is not None else [])],
'question': self.context.get('question'),
'answer': self.context.get('answer'),
'type': self.node.type,
'message_tokens': self.context.get('message_tokens'),
'answer_tokens': self.context.get('answer_tokens'),
'status': self.status,
'err_message': self.err_message,
'image_list': self.context.get('image_list'),
'dialogue_type': self.context.get('dialogue_type')
}

View File

@ -1,3 +0,0 @@
# coding=utf-8
from .impl import *

View File

@ -1,35 +0,0 @@
# coding=utf-8
from typing import Type
from rest_framework import serializers
from application.flow.i_step_node import INode, NodeResult
from common.util.field_message import ErrMessage
from django.utils.translation import gettext_lazy as _
class McpNodeSerializer(serializers.Serializer):
mcp_servers = serializers.JSONField(required=True,
error_messages=ErrMessage.char(_("Mcp servers")))
mcp_server = serializers.CharField(required=True,
error_messages=ErrMessage.char(_("Mcp server")))
mcp_tool = serializers.CharField(required=True, error_messages=ErrMessage.char(_("Mcp tool")))
tool_params = serializers.DictField(required=True,
error_messages=ErrMessage.char(_("Tool parameters")))
class IMcpNode(INode):
type = 'mcp-node'
def get_node_params_serializer_class(self) -> Type[serializers.Serializer]:
return McpNodeSerializer
def _run(self):
return self.execute(**self.node_params_serializer.data, **self.flow_params_serializer.data)
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
pass

View File

@ -1,3 +0,0 @@
# coding=utf-8
from .base_mcp_node import BaseMcpNode

View File

@ -1,61 +0,0 @@
# coding=utf-8
import asyncio
import json
from typing import List
from langchain_mcp_adapters.client import MultiServerMCPClient
from application.flow.i_step_node import NodeResult
from application.flow.step_node.mcp_node.i_mcp_node import IMcpNode
class BaseMcpNode(IMcpNode):
def save_context(self, details, workflow_manage):
self.context['result'] = details.get('result')
self.context['tool_params'] = details.get('tool_params')
self.context['mcp_tool'] = details.get('mcp_tool')
if self.node_params.get('is_result', False):
self.answer_text = details.get('result')
def execute(self, mcp_servers, mcp_server, mcp_tool, tool_params, **kwargs) -> NodeResult:
servers = json.loads(mcp_servers)
params = json.loads(json.dumps(tool_params))
params = self.handle_variables(params)
async def call_tool(s, session, t, a):
async with MultiServerMCPClient(s) as client:
s = await client.sessions[session].call_tool(t, a)
return s
res = asyncio.run(call_tool(servers, mcp_server, mcp_tool, params))
return NodeResult(
{'result': [content.text for content in res.content], 'tool_params': params, 'mcp_tool': mcp_tool}, {})
def handle_variables(self, tool_params):
# 处理参数中的变量
for k, v in tool_params.items():
if type(v) == str:
tool_params[k] = self.workflow_manage.generate_prompt(tool_params[k])
if type(v) == dict:
self.handle_variables(v)
if (type(v) == list) and (type(v[0]) == str):
tool_params[k] = self.get_reference_content(v)
return tool_params
def get_reference_content(self, fields: List[str]):
return str(self.workflow_manage.get_reference_field(
fields[0],
fields[1:]))
def get_details(self, index: int, **kwargs):
return {
'name': self.node.properties.get('stepName'),
"index": index,
'run_time': self.context.get('run_time'),
'status': self.status,
'err_message': self.err_message,
'type': self.node.type,
'mcp_tool': self.context.get('mcp_tool'),
'tool_params': self.context.get('tool_params'),
'result': self.context.get('result'),
}

View File

@ -1,9 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py
@date2024/6/11 15:30
@desc:
"""
from .impl import *

Some files were not shown because too many files have changed in this diff Show More