fix: 修复tokens描述错误的缺陷

--bug=1046202 --user=王孝刚 【github#1144】【应用】参数设置,输出最大tokens描述错误 https://www.tapd.cn/57709429/s/1579168
This commit is contained in:
wxg0103 2024-09-14 18:36:37 +08:00 committed by wxg0103
parent 080ccd11c4
commit a873719a49
19 changed files with 97 additions and 55 deletions

View File

@ -23,6 +23,7 @@ urlpatterns = [
path('dataset/<str:dataset_id>/document/_bach', views.Document.Batch.as_view()),
path('dataset/<str:dataset_id>/document/batch_hit_handling', views.Document.BatchEditHitHandling.as_view()),
path('dataset/<str:dataset_id>/document/<int:current_page>/<int:page_size>', views.Document.Page.as_view()),
path('dataset/<str:dataset_id>/document/batch_refresh', views.Document.BatchRefresh.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>', views.Document.Operate.as_view(),
name="document_operate"),
path('dataset/document/split', views.Document.Split.as_view(),
@ -34,7 +35,6 @@ urlpatterns = [
name="document_export"),
path('dataset/<str:dataset_id>/document/<str:document_id>/sync', views.Document.SyncWeb.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>/refresh', views.Document.Refresh.as_view()),
path('dataset/<str:dataset_id>/document/batch_refresh', views.Document.BatchRefresh.as_view()),
path('dataset/<str:dataset_id>/document/<str:document_id>/paragraph', views.Paragraph.as_view()),
path(
'dataset/<str:dataset_id>/document/<str:document_id>/paragraph/migrate/dataset/<str:target_dataset_id>/document/<str:target_document_id>',

View File

@ -239,7 +239,7 @@ class Document(APIView):
class BatchRefresh(APIView):
authentication_classes = [TokenAuth]
@action(methods=['POST'], detail=False)
@action(methods=['PUT'], detail=False)
@swagger_auto_schema(operation_summary="批量刷新文档向量库",
operation_id="批量刷新文档向量库",
request_body=

View File

@ -19,7 +19,7 @@ class BedrockLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class AzureLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class DeepSeekLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class GeminiLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class KimiLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -23,7 +23,7 @@ class OllamaLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class OpenAILLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class QwenModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=2048,

View File

@ -19,7 +19,7 @@ class VLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class VolcanicEngineLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class WenxinLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=2,
_max=2048,

View File

@ -25,7 +25,7 @@ class XunFeiLLMModelGeneralParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=4096,
_min=1,
_max=4096,
@ -42,7 +42,7 @@ class XunFeiLLMModelProParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=4096,
_min=1,
_max=8192,

View File

@ -19,7 +19,7 @@ class XinferenceLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=800,
_min=1,
_max=4096,

View File

@ -25,7 +25,7 @@ class ZhiPuLLMModelParams(BaseForm):
precision=2)
max_tokens = forms.SliderField(
TooltipLabel('输出最大Tokens', '较高的数值会使输出更加随机,而较低的数值会使其更加集中和确定'),
TooltipLabel('输出最大Tokens', '指定模型可生成的最大token个数'),
required=True, default_value=1024,
_min=1,
_max=4096,

View File

@ -3,6 +3,7 @@ import { get, post, del, put, exportExcel } from '@/request/index'
import type { Ref } from 'vue'
import type { KeyValue } from '@/api/type/common'
import type { pageRequest } from '@/api/type/common'
const prefix = '/dataset'
/**
@ -26,14 +27,14 @@ const listSplitPattern: (
/**
*
* @param dataset_id,
* @param dataset_id,
* page {
"current_page": "string",
"page_size": "string",
}
* param {
"name": "string",
}
"current_page": "string",
"page_size": "string",
}
* param {
"name": "string",
}
*/
const getDocument: (
@ -58,22 +59,22 @@ const getAllDocument: (dataset_id: string, loading?: Ref<boolean>) => Promise<Re
/**
*
* @param
* @param
* {
"name": "string",
"paragraphs": [
{
"content": "string",
"title": "string",
"problem_list": [
{
"id": "string",
"content": "string"
}
]
}
]
}
"name": "string",
"paragraphs": [
{
"content": "string",
"title": "string",
"problem_list": [
{
"id": "string",
"content": "string"
}
]
}
]
}
*/
const postDocument: (
dataset_id: string,
@ -85,13 +86,13 @@ const postDocument: (
/**
*
* @param
* dataset_id, document_id,
* @param
* dataset_id, document_id,
* {
"name": "string",
"is_active": true,
"meta": {}
}
"name": "string",
"is_active": true,
"meta": {}
}
*/
const putDocument: (
dataset_id: string,
@ -124,6 +125,19 @@ const delMulDocument: (
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
return del(`${prefix}/${dataset_id}/document/_bach`, undefined, { id_list: data }, loading)
}
const batchRefresh: (
dataset_id: string,
data: any,
loading?: Ref<boolean>
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
return put(
`${prefix}/${dataset_id}/document/batch_refresh`,
{ id_list: data },
undefined,
loading
)
}
/**
*
* @param dataset_id
@ -180,14 +194,14 @@ const delMulSyncDocument: (
/**
* Web站点文档
* @param
* @param
* {
"source_url_list": [
"string"
],
"selector": "string"
"source_url_list": [
"string"
],
"selector": "string"
}
}
}
*/
const postWebDocument: (
dataset_id: string,
@ -199,9 +213,9 @@ const postWebDocument: (
/**
* QA文档
* @param
* @param
* file
}
}
*/
const postQADocument: (
dataset_id: string,
@ -323,5 +337,6 @@ export default {
exportTableTemplate,
postQADocument,
postTableDocument,
exportDocument
exportDocument,
batchRefresh
}

View File

@ -28,6 +28,10 @@
</el-input>
</el-form-item>
</template>
<div v-if="configType === 'wechat'" class="flex align-center" style="margin-bottom: 8px">
<span class="el-form-item__label">是否是订阅号</span>
<el-switch v-if="configType === 'wechat'" v-model="form[configType].is_personal" />
</div>
<h4 class="title-decoration-1 mb-16">回调地址</h4>
<el-form-item label="URL" prop="callback_url">
@ -102,7 +106,14 @@ const {
} = route as any
const form = reactive<any>({
wechat: { app_id: '', app_secret: '', token: '', encoding_aes_key: '', callback_url: '' },
wechat: {
app_id: '',
app_secret: '',
token: '',
encoding_aes_key: '',
is_personal: false,
callback_url: ''
},
dingtalk: { client_id: '', client_secret: '', callback_url: '' },
wecom: {
app_id: '',

View File

@ -23,6 +23,9 @@
<el-button @click="openDatasetDialog()" :disabled="multipleSelection.length === 0">
迁移
</el-button>
<el-button @click="batchRefresh" :disabled="multipleSelection.length === 0">
重新向量化
</el-button>
<el-button @click="openBatchEditDocument" :disabled="multipleSelection.length === 0">
设置
</el-button>
@ -538,6 +541,19 @@ function deleteMulDocument() {
})
}
function batchRefresh() {
const arr: string[] = []
multipleSelection.value.map((v) => {
if (v) {
arr.push(v.id)
}
})
documentApi.batchRefresh(id, arr, loading).then(() => {
MsgSuccess('批量重新向量化成功')
multipleTableRef.value?.clearSelection()
})
}
function deleteDocument(row: any) {
MsgConfirm(
`是否删除文档:${row.name} ?`,