diff --git a/README.md b/README.md
index f4fa0d621..4c5021ad7 100644
--- a/README.md
+++ b/README.md
@@ -59,6 +59,14 @@ docker run -d --name=maxkb -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data 1pa
[](https://star-history.com/#1Panel-dev/MaxKB&Date)
+## 我们的其他开源产品
+
+- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机
+- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具
+- [MeterSphere](https://github.com/metersphere/metersphere/) - 一站式开源自动化测试平台
+- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板
+- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具
+
## License
Copyright (c) 2014-2024 飞致云 FIT2CLOUD, All rights reserved.
diff --git a/apps/application/serializers/application_serializers.py b/apps/application/serializers/application_serializers.py
index 6ae7cc463..3b20a1b39 100644
--- a/apps/application/serializers/application_serializers.py
+++ b/apps/application/serializers/application_serializers.py
@@ -196,7 +196,7 @@ class ApplicationSerializer(serializers.Serializer):
access_token = serializers.CharField(required=True, error_messages=ErrMessage.char("access_token"))
def auth(self, request, with_valid=True):
- token = request.META.get('HTTP_AUTHORIZATION', None)
+ token = request.META.get('HTTP_AUTHORIZATION')
token_details = None
try:
# 校验token
diff --git a/apps/application/template/embed.js b/apps/application/template/embed.js
index eff570e40..33545af47 100644
--- a/apps/application/template/embed.js
+++ b/apps/application/template/embed.js
@@ -247,7 +247,7 @@ function initMaxkbStyle(root){
#maxkb #maxkb-chat-container{
z-index:10000;position: relative;
border-radius: 8px;
- border: 1px solid var(--N300, #DEE0E3);
+ border: 1px solid #ffffff;
background: linear-gradient(188deg, rgba(235, 241, 255, 0.20) 39.6%, rgba(231, 249, 255, 0.20) 94.3%), #EFF0F1;
box-shadow: 0px 4px 8px 0px rgba(31, 35, 41, 0.10);
position: fixed;bottom: 20px;right: 45px;overflow: hidden;
diff --git a/apps/common/auth/authenticate.py b/apps/common/auth/authenticate.py
index de1499e70..3d2a2258e 100644
--- a/apps/common/auth/authenticate.py
+++ b/apps/common/auth/authenticate.py
@@ -47,8 +47,7 @@ class TokenDetails:
class TokenAuth(TokenAuthentication):
# 重新 authenticate 方法,自定义认证规则
def authenticate(self, request):
- auth = request.META.get('HTTP_AUTHORIZATION', None
- )
+ auth = request.META.get('HTTP_AUTHORIZATION')
# 未认证
if auth is None:
raise AppAuthenticationFailed(1003, '未登录,请先登录')
diff --git a/apps/common/util/split_model.py b/apps/common/util/split_model.py
index 16945e0ec..19b265fc6 100644
--- a/apps/common/util/split_model.py
+++ b/apps/common/util/split_model.py
@@ -336,6 +336,7 @@ class SplitModel:
:return: 解析后数据 {content:段落数据,keywords:[‘段落关键词’],parent_chain:['段落父级链路']}
"""
text = text.replace('\r', '\n')
+ text = text.replace("\0", '')
result_tree = self.parse_to_tree(text, 0)
result = result_tree_to_paragraph(result_tree, [], [])
return [item for item in [self.post_reset_paragraph(row) for row in result] if
diff --git a/apps/setting/models_provider/constants/model_provider_constants.py b/apps/setting/models_provider/constants/model_provider_constants.py
index dad4f89fc..3816795e5 100644
--- a/apps/setting/models_provider/constants/model_provider_constants.py
+++ b/apps/setting/models_provider/constants/model_provider_constants.py
@@ -11,7 +11,9 @@ from enum import Enum
from setting.models_provider.impl.azure_model_provider.azure_model_provider import AzureModelProvider
from setting.models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider
from setting.models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider
+from setting.models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider
from setting.models_provider.impl.wenxin_model_provider.wenxin_model_provider import WenxinModelProvider
+from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import KimiModelProvider
class ModelProvideConstants(Enum):
@@ -19,3 +21,5 @@ class ModelProvideConstants(Enum):
model_wenxin_provider = WenxinModelProvider()
model_ollama_provider = OllamaModelProvider()
model_openai_provider = OpenAIModelProvider()
+ model_kimi_provider = KimiModelProvider()
+ model_qwen_provider = QwenModelProvider()
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/__init__.py b/apps/setting/models_provider/impl/kimi_model_provider/__init__.py
new file mode 100644
index 000000000..53b7001e5
--- /dev/null
+++ b/apps/setting/models_provider/impl/kimi_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2023/10/31 17:16
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg b/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg
new file mode 100644
index 000000000..80bfcabff
--- /dev/null
+++ b/apps/setting/models_provider/impl/kimi_model_provider/icon/kimi_icon_svg
@@ -0,0 +1,9 @@
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py b/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py
new file mode 100644
index 000000000..6394e5902
--- /dev/null
+++ b/apps/setting/models_provider/impl/kimi_model_provider/kimi_model_provider.py
@@ -0,0 +1,109 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: kimi_model_provider.py
+ @date:2024/3/28 16:26
+ @desc:
+"""
+import os
+from typing import Dict
+
+from langchain.schema import HumanMessage
+from langchain.chat_models.base import BaseChatModel
+
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
+ ModelInfo, \
+ ModelTypeConst, ValidCode
+from smartdoc.conf import PROJECT_DIR
+from setting.models_provider.impl.kimi_model_provider.model.kimi_chat_model import KimiChatModel
+
+
+
+
+class KimiLLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
+ model_type_list = KimiModelProvider().get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
+
+ for key in ['api_base', 'api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
+ else:
+ return False
+ try:
+ # llm_kimi = Moonshot(
+ # model_name=model_name,
+ # base_url=model_credential['api_base'],
+ # moonshot_api_key=model_credential['api_key']
+ # )
+
+ model = KimiModelProvider().get_model(model_type, model_name, model_credential)
+ model.invoke([HumanMessage(content='你好')])
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_base = forms.TextInputField('API 域名', required=True)
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+
+kimi_llm_model_credential = KimiLLMModelCredential()
+
+model_dict = {
+ 'moonshot-v1-8k': ModelInfo('moonshot-v1-8k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
+ ),
+ 'moonshot-v1-32k': ModelInfo('moonshot-v1-32k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
+ ),
+ 'moonshot-v1-128k': ModelInfo('moonshot-v1-128k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
+ )
+}
+
+
+class KimiModelProvider(IModelProvider):
+
+ def get_dialogue_number(self):
+ return 3
+
+ def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel:
+ kimi_chat_open_ai = KimiChatModel(
+ openai_api_base=model_credential['api_base'],
+ openai_api_key=model_credential['api_key'],
+ model_name=model_name,
+ )
+ return kimi_chat_open_ai
+
+ def get_model_credential(self, model_type, model_name):
+ if model_name in model_dict:
+ return model_dict.get(model_name).model_credential
+ return kimi_llm_model_credential
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_kimi_provider', name='Kimi', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'kimi_model_provider', 'icon',
+ 'kimi_icon_svg')))
+
+ def get_model_list(self, model_type: str):
+ if model_type is None:
+ raise AppApiException(500, '模型类型不能为空')
+ return [model_dict.get(key).to_dict() for key in
+ list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
+
+ def get_model_type_list(self):
+ return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py b/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py
new file mode 100644
index 000000000..c69cae48d
--- /dev/null
+++ b/apps/setting/models_provider/impl/kimi_model_provider/model/kimi_chat_model.py
@@ -0,0 +1,36 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: kimi_chat_model.py
+ @date:2023/11/10 17:45
+ @desc:
+"""
+from typing import List
+
+from langchain_community.chat_models import ChatOpenAI
+from langchain_core.messages import BaseMessage, get_buffer_string
+
+
+class TokenizerManage:
+ tokenizer = None
+
+ @staticmethod
+ def get_tokenizer():
+ from transformers import GPT2TokenizerFast
+ if TokenizerManage.tokenizer is None:
+ TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
+ cache_dir="/opt/maxkb/model/tokenizer",
+ resume_download=False,
+ force_download=False)
+ return TokenizerManage.tokenizer
+
+
+class KimiChatModel(ChatOpenAI):
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py b/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py
new file mode 100644
index 000000000..1cdfa2aff
--- /dev/null
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/openai_chat_model.py
@@ -0,0 +1,42 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: openai_chat_model.py
+ @date:2024/4/18 15:28
+ @desc:
+"""
+from typing import List
+
+from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_openai import ChatOpenAI
+
+
+class TokenizerManage:
+ tokenizer = None
+
+ @staticmethod
+ def get_tokenizer():
+ from transformers import GPT2TokenizerFast
+ if TokenizerManage.tokenizer is None:
+ TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
+ cache_dir="/opt/maxkb/model/tokenizer",
+ resume_download=False,
+ force_download=False)
+ return TokenizerManage.tokenizer
+
+
+class OpenAIChatModel(ChatOpenAI):
+ def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ try:
+ return super().get_num_tokens_from_messages(messages)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
+
+ def get_num_tokens(self, text: str) -> int:
+ try:
+ return super().get_num_tokens(text)
+ except Exception as e:
+ tokenizer = TokenizerManage.get_tokenizer()
+ return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py
index 6529d1784..aab6ac08c 100644
--- a/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py
+++ b/apps/setting/models_provider/impl/openai_model_provider/openai_model_provider.py
@@ -10,7 +10,6 @@ import os
from typing import Dict
from langchain.schema import HumanMessage
-from langchain_openai import ChatOpenAI
from common import forms
from common.exception.app_exception import AppApiException
@@ -19,6 +18,7 @@ from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
ModelInfo, \
ModelTypeConst, ValidCode
+from setting.models_provider.impl.openai_model_provider.model.openai_chat_model import OpenAIChatModel
from smartdoc.conf import PROJECT_DIR
@@ -71,8 +71,9 @@ class OpenAIModelProvider(IModelProvider):
def get_dialogue_number(self):
return 3
- def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatOpenAI:
- azure_chat_open_ai = ChatOpenAI(
+ def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> OpenAIChatModel:
+ azure_chat_open_ai = OpenAIChatModel(
+ model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key')
)
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/__init__.py b/apps/setting/models_provider/impl/qwen_model_provider/__init__.py
new file mode 100644
index 000000000..53b7001e5
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/__init__.py
@@ -0,0 +1,8 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: __init__.py.py
+ @date:2023/10/31 17:16
+ @desc:
+"""
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/icon/qwen_icon_svg b/apps/setting/models_provider/impl/qwen_model_provider/icon/qwen_icon_svg
new file mode 100644
index 000000000..cb9a718af
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/icon/qwen_icon_svg
@@ -0,0 +1 @@
+
\ No newline at end of file
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py
new file mode 100644
index 000000000..46ad1c6ec
--- /dev/null
+++ b/apps/setting/models_provider/impl/qwen_model_provider/qwen_model_provider.py
@@ -0,0 +1,92 @@
+# coding=utf-8
+"""
+ @project: maxkb
+ @Author:虎
+ @file: qwen_model_provider.py
+ @date:2023/10/31 16:19
+ @desc:
+"""
+import os
+from typing import Dict
+
+from langchain.schema import HumanMessage
+from langchain_community.chat_models.tongyi import ChatTongyi
+
+from common import forms
+from common.exception.app_exception import AppApiException
+from common.forms import BaseForm
+from common.util.file_util import get_file_content
+from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
+ ModelInfo, IModelProvider, ValidCode
+from smartdoc.conf import PROJECT_DIR
+
+
+class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
+
+ def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
+ model_type_list = QwenModelProvider().get_model_type_list()
+ if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
+ raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
+ for key in ['api_key']:
+ if key not in model_credential:
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
+ else:
+ return False
+ try:
+ model = QwenModelProvider().get_model(model_type, model_name, model_credential)
+ model.invoke([HumanMessage(content='你好')])
+ except Exception as e:
+ if isinstance(e, AppApiException):
+ raise e
+ if raise_exception:
+ raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
+ else:
+ return False
+ return True
+
+ def encryption_dict(self, model: Dict[str, object]):
+ return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
+
+ api_key = forms.PasswordInputField('API Key', required=True)
+
+
+qwen_model_credential = OpenAILLMModelCredential()
+
+model_dict = {
+ 'qwen-turbo': ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential),
+ 'qwen-plus': ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential),
+ 'qwen-max': ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential)
+}
+
+
+class QwenModelProvider(IModelProvider):
+
+ def get_dialogue_number(self):
+ return 3
+
+ def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatTongyi:
+ chat_tong_yi = ChatTongyi(
+ model_name=model_name,
+ dashscope_api_key=model_credential.get('api_key')
+ )
+ return chat_tong_yi
+
+ def get_model_credential(self, model_type, model_name):
+ if model_name in model_dict:
+ return model_dict.get(model_name).model_credential
+ return qwen_model_credential
+
+ def get_model_provide_info(self):
+ return ModelProvideInfo(provider='model_qwen_provider', name='通义千问', icon=get_file_content(
+ os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'qwen_model_provider', 'icon',
+ 'qwen_icon_svg')))
+
+ def get_model_list(self, model_type: str):
+ if model_type is None:
+ raise AppApiException(500, '模型类型不能为空')
+ return [model_dict.get(key).to_dict() for key in
+ list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
+
+ def get_model_type_list(self):
+ return [{'key': "大语言模型", 'value': "LLM"}]
diff --git a/apps/users/views/user.py b/apps/users/views/user.py
index a6fa856f1..e691ff4b9 100644
--- a/apps/users/views/user.py
+++ b/apps/users/views/user.py
@@ -88,8 +88,7 @@ class ResetCurrentUserPasswordView(APIView):
data.update(request.data)
serializer_obj = RePasswordSerializer(data=data)
if serializer_obj.reset_password():
- token_cache.delete(request.META.get('HTTP_AUTHORIZATION', None
- ))
+ token_cache.delete(request.META.get('HTTP_AUTHORIZATION'))
return result.success(True)
return result.error("修改密码失败")
@@ -119,8 +118,7 @@ class Logout(APIView):
responses=SendEmailSerializer().get_response_body_api(),
tags=['用户'])
def post(self, request: Request):
- token_cache.delete(request.META.get('HTTP_AUTHORIZATION', None
- ))
+ token_cache.delete(request.META.get('HTTP_AUTHORIZATION'))
return result.success(True)
diff --git a/pyproject.toml b/pyproject.toml
index cbe0e5dc2..35d4a42f0 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -29,9 +29,10 @@ html2text = "^2024.2.26"
langchain-openai = "^0.0.8"
django-ipware = "^6.0.4"
django-apscheduler = "^0.6.2"
-pymupdf = "^1.24.0"
+pymupdf = "1.24.1"
python-docx = "^1.1.0"
xlwt = "^1.3.0"
+dashscope = "^1.17.0"
[build-system]
requires = ["poetry-core"]
diff --git a/ui/src/api/type/application.ts b/ui/src/api/type/application.ts
index 8cd8d5437..eefd8f31f 100644
--- a/ui/src/api/type/application.ts
+++ b/ui/src/api/type/application.ts
@@ -51,19 +51,23 @@ export class ChatRecordManage {
this.loading.value = true
}
this.id = setInterval(() => {
- const s = this.chat.buffer.shift()
- if (s !== undefined) {
- this.chat.answer_text = this.chat.answer_text + s
+ if (this.chat.buffer.length > 20) {
+ this.chat.answer_text =
+ this.chat.answer_text + this.chat.buffer.splice(0, this.chat.buffer.length - 20).join('')
+ } else if (this.is_close) {
+ this.chat.answer_text = this.chat.answer_text + this.chat.buffer.join('')
+ this.chat.write_ed = true
+ this.write_ed = true
+ if (this.loading) {
+ this.loading.value = false
+ }
+ if (this.id) {
+ clearInterval(this.id)
+ }
} else {
- if (this.is_close) {
- this.chat.write_ed = true
- this.write_ed = true
- if (this.loading) {
- this.loading.value = false
- }
- if (this.id) {
- clearInterval(this.id)
- }
+ const s = this.chat.buffer.shift()
+ if (s !== undefined) {
+ this.chat.answer_text = this.chat.answer_text + s
}
}
}, this.ms)
diff --git a/ui/src/assets/hit-test-empty.png b/ui/src/assets/hit-test-empty.png
new file mode 100644
index 000000000..83a2c9a06
Binary files /dev/null and b/ui/src/assets/hit-test-empty.png differ
diff --git a/ui/src/components/ai-chat/index.vue b/ui/src/components/ai-chat/index.vue
index 4ace004bc..a76a3ab12 100644
--- a/ui/src/components/ai-chat/index.vue
+++ b/ui/src/components/ai-chat/index.vue
@@ -62,6 +62,9 @@
>
抱歉,没有查找到相关内容,请重新描述您的问题或提供更多信息。
+