This commit is contained in:
liqiang-fit2cloud 2024-04-18 17:43:22 +08:00
commit 0892a0eb72
35 changed files with 445 additions and 51 deletions

View File

@ -59,6 +59,14 @@ docker run -d --name=maxkb -p 8080:8080 -v ~/.maxkb:/var/lib/postgresql/data 1pa
[![Star History Chart](https://api.star-history.com/svg?repos=1Panel-dev/MaxKB&type=Date)](https://star-history.com/#1Panel-dev/MaxKB&Date)
## 我们的其他开源产品
- [JumpServer](https://github.com/jumpserver/jumpserver/) - 广受欢迎的开源堡垒机
- [DataEase](https://github.com/dataease/dataease/) - 人人可用的开源数据可视化分析工具
- [MeterSphere](https://github.com/metersphere/metersphere/) - 一站式开源自动化测试平台
- [1Panel](https://github.com/1panel-dev/1panel/) - 现代化、开源的 Linux 服务器运维管理面板
- [Halo](https://github.com/halo-dev/halo/) - 强大易用的开源建站工具
## License
Copyright (c) 2014-2024 飞致云 FIT2CLOUD, All rights reserved.

View File

@ -196,7 +196,7 @@ class ApplicationSerializer(serializers.Serializer):
access_token = serializers.CharField(required=True, error_messages=ErrMessage.char("access_token"))
def auth(self, request, with_valid=True):
token = request.META.get('HTTP_AUTHORIZATION', None)
token = request.META.get('HTTP_AUTHORIZATION')
token_details = None
try:
# 校验token

View File

@ -247,7 +247,7 @@ function initMaxkbStyle(root){
#maxkb #maxkb-chat-container{
z-index:10000;position: relative;
border-radius: 8px;
border: 1px solid var(--N300, #DEE0E3);
border: 1px solid #ffffff;
background: linear-gradient(188deg, rgba(235, 241, 255, 0.20) 39.6%, rgba(231, 249, 255, 0.20) 94.3%), #EFF0F1;
box-shadow: 0px 4px 8px 0px rgba(31, 35, 41, 0.10);
position: fixed;bottom: 20px;right: 45px;overflow: hidden;

View File

@ -47,8 +47,7 @@ class TokenDetails:
class TokenAuth(TokenAuthentication):
# 重新 authenticate 方法,自定义认证规则
def authenticate(self, request):
auth = request.META.get('HTTP_AUTHORIZATION', None
)
auth = request.META.get('HTTP_AUTHORIZATION')
# 未认证
if auth is None:
raise AppAuthenticationFailed(1003, '未登录,请先登录')

View File

@ -336,6 +336,7 @@ class SplitModel:
:return: 解析后数据 {content:段落数据,keywords:[段落关键词],parent_chain:['段落父级链路']}
"""
text = text.replace('\r', '\n')
text = text.replace("\0", '')
result_tree = self.parse_to_tree(text, 0)
result = result_tree_to_paragraph(result_tree, [], [])
return [item for item in [self.post_reset_paragraph(row) for row in result] if

View File

@ -11,7 +11,9 @@ from enum import Enum
from setting.models_provider.impl.azure_model_provider.azure_model_provider import AzureModelProvider
from setting.models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider
from setting.models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider
from setting.models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider
from setting.models_provider.impl.wenxin_model_provider.wenxin_model_provider import WenxinModelProvider
from setting.models_provider.impl.kimi_model_provider.kimi_model_provider import KimiModelProvider
class ModelProvideConstants(Enum):
@ -19,3 +21,5 @@ class ModelProvideConstants(Enum):
model_wenxin_provider = WenxinModelProvider()
model_ollama_provider = OllamaModelProvider()
model_openai_provider = OpenAIModelProvider()
model_kimi_provider = KimiModelProvider()
model_qwen_provider = QwenModelProvider()

View File

@ -0,0 +1,8 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2023/10/31 17:16
@desc:
"""

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 23 KiB

View File

@ -0,0 +1,109 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file kimi_model_provider.py
@date2024/3/28 16:26
@desc:
"""
import os
from typing import Dict
from langchain.schema import HumanMessage
from langchain.chat_models.base import BaseChatModel
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm
from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
ModelInfo, \
ModelTypeConst, ValidCode
from smartdoc.conf import PROJECT_DIR
from setting.models_provider.impl.kimi_model_provider.model.kimi_chat_model import KimiChatModel
class KimiLLMModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
model_type_list = KimiModelProvider().get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
for key in ['api_base', 'api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
else:
return False
try:
# llm_kimi = Moonshot(
# model_name=model_name,
# base_url=model_credential['api_base'],
# moonshot_api_key=model_credential['api_key']
# )
model = KimiModelProvider().get_model(model_type, model_name, model_credential)
model.invoke([HumanMessage(content='你好')])
except Exception as e:
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_base = forms.TextInputField('API 域名', required=True)
api_key = forms.PasswordInputField('API Key', required=True)
kimi_llm_model_credential = KimiLLMModelCredential()
model_dict = {
'moonshot-v1-8k': ModelInfo('moonshot-v1-8k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
),
'moonshot-v1-32k': ModelInfo('moonshot-v1-32k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
),
'moonshot-v1-128k': ModelInfo('moonshot-v1-128k', '', ModelTypeConst.LLM, kimi_llm_model_credential,
)
}
class KimiModelProvider(IModelProvider):
def get_dialogue_number(self):
return 3
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> BaseChatModel:
kimi_chat_open_ai = KimiChatModel(
openai_api_base=model_credential['api_base'],
openai_api_key=model_credential['api_key'],
model_name=model_name,
)
return kimi_chat_open_ai
def get_model_credential(self, model_type, model_name):
if model_name in model_dict:
return model_dict.get(model_name).model_credential
return kimi_llm_model_credential
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_kimi_provider', name='Kimi', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'kimi_model_provider', 'icon',
'kimi_icon_svg')))
def get_model_list(self, model_type: str):
if model_type is None:
raise AppApiException(500, '模型类型不能为空')
return [model_dict.get(key).to_dict() for key in
list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
def get_model_type_list(self):
return [{'key': "大语言模型", 'value': "LLM"}]

View File

@ -0,0 +1,36 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file kimi_chat_model.py
@date2023/11/10 17:45
@desc:
"""
from typing import List
from langchain_community.chat_models import ChatOpenAI
from langchain_core.messages import BaseMessage, get_buffer_string
class TokenizerManage:
tokenizer = None
@staticmethod
def get_tokenizer():
from transformers import GPT2TokenizerFast
if TokenizerManage.tokenizer is None:
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
cache_dir="/opt/maxkb/model/tokenizer",
resume_download=False,
force_download=False)
return TokenizerManage.tokenizer
class KimiChatModel(ChatOpenAI):
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
def get_num_tokens(self, text: str) -> int:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))

View File

@ -0,0 +1,42 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file openai_chat_model.py
@date2024/4/18 15:28
@desc:
"""
from typing import List
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_openai import ChatOpenAI
class TokenizerManage:
tokenizer = None
@staticmethod
def get_tokenizer():
from transformers import GPT2TokenizerFast
if TokenizerManage.tokenizer is None:
TokenizerManage.tokenizer = GPT2TokenizerFast.from_pretrained('gpt2',
cache_dir="/opt/maxkb/model/tokenizer",
resume_download=False,
force_download=False)
return TokenizerManage.tokenizer
class OpenAIChatModel(ChatOpenAI):
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return super().get_num_tokens_from_messages(messages)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
def get_num_tokens(self, text: str) -> int:
try:
return super().get_num_tokens(text)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))

View File

@ -10,7 +10,6 @@ import os
from typing import Dict
from langchain.schema import HumanMessage
from langchain_openai import ChatOpenAI
from common import forms
from common.exception.app_exception import AppApiException
@ -19,6 +18,7 @@ from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import IModelProvider, ModelProvideInfo, BaseModelCredential, \
ModelInfo, \
ModelTypeConst, ValidCode
from setting.models_provider.impl.openai_model_provider.model.openai_chat_model import OpenAIChatModel
from smartdoc.conf import PROJECT_DIR
@ -71,8 +71,9 @@ class OpenAIModelProvider(IModelProvider):
def get_dialogue_number(self):
return 3
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatOpenAI:
azure_chat_open_ai = ChatOpenAI(
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> OpenAIChatModel:
azure_chat_open_ai = OpenAIChatModel(
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key')
)

View File

@ -0,0 +1,8 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2023/10/31 17:16
@desc:
"""

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 14 KiB

View File

@ -0,0 +1,92 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file qwen_model_provider.py
@date2023/10/31 16:19
@desc:
"""
import os
from typing import Dict
from langchain.schema import HumanMessage
from langchain_community.chat_models.tongyi import ChatTongyi
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm
from common.util.file_util import get_file_content
from setting.models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, BaseModelCredential, \
ModelInfo, IModelProvider, ValidCode
from smartdoc.conf import PROJECT_DIR
class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], raise_exception=False):
model_type_list = QwenModelProvider().get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value, f'{model_type} 模型类型不支持')
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, f'{key} 字段为必填字段')
else:
return False
try:
model = QwenModelProvider().get_model(model_type, model_name, model_credential)
model.invoke([HumanMessage(content='你好')])
except Exception as e:
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, f'校验失败,请检查参数是否正确: {str(e)}')
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_key = forms.PasswordInputField('API Key', required=True)
qwen_model_credential = OpenAILLMModelCredential()
model_dict = {
'qwen-turbo': ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential),
'qwen-plus': ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential),
'qwen-max': ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential)
}
class QwenModelProvider(IModelProvider):
def get_dialogue_number(self):
return 3
def get_model(self, model_type, model_name, model_credential: Dict[str, object], **model_kwargs) -> ChatTongyi:
chat_tong_yi = ChatTongyi(
model_name=model_name,
dashscope_api_key=model_credential.get('api_key')
)
return chat_tong_yi
def get_model_credential(self, model_type, model_name):
if model_name in model_dict:
return model_dict.get(model_name).model_credential
return qwen_model_credential
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_qwen_provider', name='通义千问', icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", "setting", 'models_provider', 'impl', 'qwen_model_provider', 'icon',
'qwen_icon_svg')))
def get_model_list(self, model_type: str):
if model_type is None:
raise AppApiException(500, '模型类型不能为空')
return [model_dict.get(key).to_dict() for key in
list(filter(lambda key: model_dict.get(key).model_type == model_type, model_dict.keys()))]
def get_model_type_list(self):
return [{'key': "大语言模型", 'value': "LLM"}]

View File

@ -88,8 +88,7 @@ class ResetCurrentUserPasswordView(APIView):
data.update(request.data)
serializer_obj = RePasswordSerializer(data=data)
if serializer_obj.reset_password():
token_cache.delete(request.META.get('HTTP_AUTHORIZATION', None
))
token_cache.delete(request.META.get('HTTP_AUTHORIZATION'))
return result.success(True)
return result.error("修改密码失败")
@ -119,8 +118,7 @@ class Logout(APIView):
responses=SendEmailSerializer().get_response_body_api(),
tags=['用户'])
def post(self, request: Request):
token_cache.delete(request.META.get('HTTP_AUTHORIZATION', None
))
token_cache.delete(request.META.get('HTTP_AUTHORIZATION'))
return result.success(True)

View File

@ -29,9 +29,10 @@ html2text = "^2024.2.26"
langchain-openai = "^0.0.8"
django-ipware = "^6.0.4"
django-apscheduler = "^0.6.2"
pymupdf = "^1.24.0"
pymupdf = "1.24.1"
python-docx = "^1.1.0"
xlwt = "^1.3.0"
dashscope = "^1.17.0"
[build-system]
requires = ["poetry-core"]

View File

@ -51,19 +51,23 @@ export class ChatRecordManage {
this.loading.value = true
}
this.id = setInterval(() => {
const s = this.chat.buffer.shift()
if (s !== undefined) {
this.chat.answer_text = this.chat.answer_text + s
if (this.chat.buffer.length > 20) {
this.chat.answer_text =
this.chat.answer_text + this.chat.buffer.splice(0, this.chat.buffer.length - 20).join('')
} else if (this.is_close) {
this.chat.answer_text = this.chat.answer_text + this.chat.buffer.join('')
this.chat.write_ed = true
this.write_ed = true
if (this.loading) {
this.loading.value = false
}
if (this.id) {
clearInterval(this.id)
}
} else {
if (this.is_close) {
this.chat.write_ed = true
this.write_ed = true
if (this.loading) {
this.loading.value = false
}
if (this.id) {
clearInterval(this.id)
}
const s = this.chat.buffer.shift()
if (s !== undefined) {
this.chat.answer_text = this.chat.answer_text + s
}
}
}, this.ms)

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.7 KiB

View File

@ -62,6 +62,9 @@
>
抱歉没有查找到相关内容请重新描述您的问题或提供更多信息
</el-card>
<el-card v-else-if="item.is_stop" shadow="always" class="dialog-card">
已停止回答
</el-card>
<el-card v-else shadow="always" class="dialog-card">
回答中 <span class="dotting"></span>
</el-card>
@ -144,6 +147,7 @@
placeholder="请输入"
:rows="1"
type="textarea"
:maxlength="1024"
@keydown.enter="sendChatHandle($event)"
/>
<div class="operate">
@ -217,7 +221,7 @@ const chartOpenId = ref('')
const chatList = ref<any[]>([])
const isDisabledChart = computed(
() => !(inputValue.value && (props.appId || (props.data?.name && props.data?.model_id)))
() => !(inputValue.value.trim() && (props.appId || (props.data?.name && props.data?.model_id)))
)
const isMdArray = (val: string) => val.match(/^-\s.*/m)
const prologueList = computed(() => {
@ -286,7 +290,9 @@ function sendChatHandle(event: any) {
// ctrl
event.preventDefault()
if (!isDisabledChart.value && !loading.value && !event.isComposing) {
chatMessage()
if (inputValue.value.trim()) {
chatMessage()
}
}
} else {
// ctrl+
@ -423,7 +429,7 @@ function chatMessage(chat?: any, problem?: string, re_chat?: boolean) {
if (!chat) {
chat = reactive({
id: randomId(),
problem_text: problem ? problem : inputValue.value,
problem_text: problem ? problem : inputValue.value.trim(),
answer_text: '',
buffer: [],
write_ed: false,
@ -432,6 +438,8 @@ function chatMessage(chat?: any, problem?: string, re_chat?: boolean) {
vote_status: '-1'
})
chatList.value.push(chat)
ChatManagement.addChatRecord(chat, 50, loading)
ChatManagement.write(chat.id)
inputValue.value = ''
nextTick(() => {
//
@ -469,8 +477,6 @@ function chatMessage(chat?: any, problem?: string, re_chat?: boolean) {
//
scrollDiv.value.setScrollTop(getMaxHeight())
})
ChatManagement.addChatRecord(chat, 50, loading)
ChatManagement.write(chat.id)
const reader = response.body.getReader()
//
const write = getWrite(

View File

@ -4,6 +4,7 @@
:disabled="!(containerWeight > contentWeight)"
effect="dark"
placement="bottom"
popper-class="auto-tooltip-popper"
>
<div ref="tagLabel" :class="['auto-tooltip', className]" :style="style">
<slot></slot>

View File

@ -1,3 +1,4 @@
·
<template>
<div class="top-bar-container border-b flex-between">
<div class="flex-center h-full">
@ -9,7 +10,31 @@
</div>
<TopMenu></TopMenu>
</div>
<div class="avatar">
<div class="flex-center avatar">
<el-tooltip effect="dark" content="项目地址" placement="top">
<AppIcon
iconName="app-github"
class="cursor color-secondary mr-8 ml-8"
style="font-size: 20px"
@click="toUrl('https://github.com/1Panel-dev/MaxKB')"
></AppIcon>
</el-tooltip>
<el-tooltip effect="dark" content="用户手册" placement="top">
<AppIcon
iconName="app-reading"
class="cursor color-secondary mr-8 ml-8"
style="font-size: 20px"
@click="toUrl('https://github.com/1Panel-dev/MaxKB/wiki')"
></AppIcon>
</el-tooltip>
<el-tooltip effect="dark" content="论坛求助" placement="top">
<AppIcon
iconName="app-help"
class="cursor color-secondary mr-16 ml-8"
style="font-size: 20px"
@click="toUrl('https://bbs.fit2cloud.com/c/mk/11')"
></AppIcon>
</el-tooltip>
<Avatar></Avatar>
</div>
</div>
@ -20,6 +45,10 @@ import Avatar from './avatar/index.vue'
import { useRouter } from 'vue-router'
const router = useRouter()
const defaultTitle = import.meta.env.VITE_APP_TITLE
function toUrl(url: string) {
window.open(url, '_blank')
}
</script>
<style lang="scss">
.top-bar-container {

View File

@ -319,3 +319,15 @@
padding-left: 12px !important;
padding-right: 12px !important;
}
// select下拉框
.select-popper {
max-width: 300px;
.el-select-dropdown__wrap {
max-width: 300px;
}
}
.auto-tooltip-popper {
max-width: 500px;
}

View File

@ -70,7 +70,7 @@ onMounted(() => {
height: var(--app-header-height);
line-height: var(--app-header-height);
box-sizing: border-box;
border-bottom: 1px solid rgba(31, 35, 41, 0.15);
border-bottom: 1px solid var(--el-border-color);
}
&__main {
padding-top: calc(var(--app-header-height) + 24px);

View File

@ -9,6 +9,7 @@
>
<el-form-item prop="fileList">
<el-upload
:webkitdirectory="false"
class="w-full"
drag
multiple
@ -19,18 +20,18 @@
accept=".txt, .md, .csv, .log, .docx, .pdf"
:limit="50"
:on-exceed="onExceed"
:on-change="filehandleChange"
:on-change="fileHandleChange"
@click.prevent="handlePreview(false)"
>
<img src="@/assets/upload-icon.svg" alt="" />
<div class="el-upload__text">
<p>
将文件拖拽至此区域或
<em> 选择文件上传 </em>
拖拽文件至此上传或
<em class="hover" @click.prevent="handlePreview(false)"> 选择文件 </em>
<em class="hover" @click.prevent="handlePreview(true)"> 选择文件夹 </em>
</p>
<div class="upload__decoration">
<p>
支持格式TXTMarkdownPDFDOCX每次最多上传50个文件每个文件不超过 100MB
</p>
<p>支持格式TXTMarkdownPDFDOCX每次最多上传50个文件每个文件不超过 100MB</p>
<p>若使用高级分段建议上传前规范文件的分段标识</p>
</div>
</div>
@ -59,7 +60,7 @@
</el-row>
</template>
<script setup lang="ts">
import { ref, reactive, onUnmounted, onMounted, computed, watch } from 'vue'
import { ref, reactive, onUnmounted, onMounted, computed, watch, nextTick } from 'vue'
import type { UploadFile, UploadFiles } from 'element-plus'
import { filesize, getImgUrl, isRightType } from '@/utils/utils'
import { MsgError } from '@/utils/message'
@ -83,7 +84,7 @@ function deleteFile(index: number) {
}
// on-change
const filehandleChange = (file: any, fileList: UploadFiles) => {
const fileHandleChange = (file: any, fileList: UploadFiles) => {
//110M
const isLimit = file?.size / 1024 / 1024 < 100
if (!isLimit) {
@ -101,6 +102,17 @@ const filehandleChange = (file: any, fileList: UploadFiles) => {
const onExceed = () => {
MsgError('每次最多上传50个文件')
}
const handlePreview = (bool: boolean) => {
let inputDom: any = null
nextTick(() => {
if (document.querySelector('.el-upload__input') != null) {
inputDom = document.querySelector('.el-upload__input')
inputDom.webkitdirectory = bool
}
})
}
/*
表单校验
*/
@ -133,4 +145,9 @@ defineExpose({
line-height: 20px;
color: var(--el-text-color-secondary);
}
.el-upload__text {
.hover:hover {
color: var(--el-color-primary-light-5);
}
}
</style>

View File

@ -20,7 +20,8 @@
</div>
<el-scrollbar>
<div class="hit-test-height">
<el-empty v-if="paragraphDetail.length == 0" description="暂无数据" />
<el-empty v-if="first" :image="emptyImg" description="命中段落显示在这里" />
<el-empty v-else-if="paragraphDetail.length == 0" description="没有命中的分段" />
<el-row v-else>
<el-col
:xs="24"
@ -43,7 +44,8 @@
>
<template #icon>
<AppAvatar class="mr-12 avatar-light" :size="22">
{{ index + 1 + '' }}</AppAvatar>
{{ index + 1 + '' }}</AppAvatar
>
</template>
<div class="active-button primary">{{ item.similarity?.toFixed(3) }}</div>
<template #footer>
@ -145,6 +147,7 @@ import datasetApi from '@/api/dataset'
import applicationApi from '@/api/application'
import ParagraphDialog from '@/views/paragraph/component/ParagraphDialog.vue'
import { arraySort } from '@/utils/utils'
import emptyImg from '@/assets/hit-test-empty.png'
const route = useRoute()
const {
@ -161,6 +164,9 @@ const formInline = ref({
top_number: 5
})
//
const first = ref(true)
const cloneForm = ref<any>({})
const popoverVisible = ref(false)
@ -215,12 +221,14 @@ function getHitTestList() {
paragraphDetail.value = res.data && arraySort(res.data, 'comprehensive_score', true)
questionTitle.value = inputValue.value
inputValue.value = ''
first.value = false
})
} else if (isApplication.value) {
applicationApi.getApplicationHitTest(id, obj, loading).then((res) => {
paragraphDetail.value = res.data && arraySort(res.data, 'comprehensive_score', true)
questionTitle.value = inputValue.value
inputValue.value = ''
first.value = false
})
}
}

View File

@ -25,13 +25,17 @@
@change="addProblemHandle"
@blur="isAddProblem = false"
class="mb-16"
popper-class="select-popper"
:popper-append-to-body="false"
>
<el-option
v-for="item in problemOptions"
:key="item.id"
:label="item.content"
:value="item.id"
/>
>
{{ item.content }}
</el-option>
</el-select>
<template v-for="(item, index) in problemList" :key="index">
<TagEllipsis

View File

@ -157,7 +157,7 @@ function associationClick(item: any) {
function searchHandle() {
paginationConfig.current_page = 1
paragraphList.value = []
getParagraphList(currentDocument.value)
currentDocument.value && getParagraphList(currentDocument.value)
}
function clickDocumentHandle(item: any) {

View File

@ -44,9 +44,8 @@
<el-select
v-loading="model_type_loading"
@change="list_base_model($event)"
style="width: 100%"
v-model="base_form_data.model_type"
class="m-2"
class="w-full m-2"
placeholder="请选择模型类型"
>
<el-option
@ -61,9 +60,8 @@
<el-select
@change="getModelForm($event)"
v-loading="base_model_loading"
style="width: 100%"
v-model="base_form_data.model_name"
class="m-2"
class="w-full m-2"
placeholder="请选择基础模型"
filterable
allow-create

View File

@ -39,9 +39,8 @@
<el-select
v-loading="model_type_loading"
@change="list_base_model($event)"
style="width: 100%"
v-model="base_form_data.model_type"
class="m-2"
class="w-full m-2"
placeholder="请选择模型类型"
>
<el-option
@ -56,9 +55,8 @@
<el-select
@change="getModelForm($event)"
v-loading="base_model_loading"
style="width: 100%"
v-model="base_form_data.model_name"
class="m-2"
class="w-full m-2"
placeholder="请选择基础模型"
filterable
allow-create