mirror of
https://github.com/1Panel-dev/MaxKB.git
synced 2025-12-26 01:33:05 +00:00
Compare commits
26 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a1d5c83f2f | ||
|
|
e73414d9a9 | ||
|
|
c6e9a99cad | ||
|
|
e6e4b68100 | ||
|
|
9acc11786c | ||
|
|
3b529ab386 | ||
|
|
c6abb6d77d | ||
|
|
8450a5b06d | ||
|
|
b4ffe656ad | ||
|
|
1bfaf2f024 | ||
|
|
ae56590f3f | ||
|
|
1605fb5388 | ||
|
|
fc54a845f4 | ||
|
|
e59b531754 | ||
|
|
c2962e9ecb | ||
|
|
6861f95cdb | ||
|
|
ea28f67696 | ||
|
|
7eaac94649 | ||
|
|
18581c9622 | ||
|
|
a2eef10234 | ||
|
|
e254951ea8 | ||
|
|
96763fe140 | ||
|
|
bd8aebdbb2 | ||
|
|
7eedf1da28 | ||
|
|
8d6dc492d8 | ||
|
|
30975698de |
|
|
@ -10,6 +10,7 @@ import concurrent
|
|||
import json
|
||||
import threading
|
||||
import traceback
|
||||
import uuid
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from functools import reduce
|
||||
from typing import List, Dict
|
||||
|
|
@ -341,15 +342,19 @@ class WorkflowManage:
|
|||
self.run_chain_async(current_node, node_result_future)
|
||||
return tools.to_stream_response_simple(self.await_result())
|
||||
|
||||
def is_run(self, timeout=0.1):
|
||||
self.lock.acquire()
|
||||
def is_run(self, timeout=0.5):
|
||||
future_list_len = len(self.future_list)
|
||||
try:
|
||||
r = concurrent.futures.wait(self.future_list, timeout)
|
||||
return len(r.not_done) > 0
|
||||
if len(r.not_done) > 0:
|
||||
return True
|
||||
else:
|
||||
if future_list_len == len(self.future_list):
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
except Exception as e:
|
||||
return True
|
||||
finally:
|
||||
self.lock.release()
|
||||
|
||||
def await_result(self):
|
||||
try:
|
||||
|
|
@ -402,12 +407,8 @@ class WorkflowManage:
|
|||
# 获取到可执行的子节点
|
||||
result_list = [{'node': node, 'future': executor.submit(self.run_chain_manage, node, None)} for node in
|
||||
sorted_node_run_list]
|
||||
try:
|
||||
self.lock.acquire()
|
||||
for r in result_list:
|
||||
self.future_list.append(r.get('future'))
|
||||
finally:
|
||||
self.lock.release()
|
||||
for r in result_list:
|
||||
self.future_list.append(r.get('future'))
|
||||
|
||||
def run_chain(self, current_node, node_result_future=None):
|
||||
if node_result_future is None:
|
||||
|
|
@ -575,7 +576,7 @@ class WorkflowManage:
|
|||
details['node_id'] = node.id
|
||||
details['up_node_id_list'] = node.up_node_id_list
|
||||
details['runtime_node_id'] = node.runtime_node_id
|
||||
details_result[node.runtime_node_id] = details
|
||||
details_result[str(uuid.uuid1())] = details
|
||||
return details_result
|
||||
|
||||
def get_answer_text_list(self):
|
||||
|
|
@ -664,9 +665,18 @@ class WorkflowManage:
|
|||
for edge in self.flow.edges:
|
||||
if (edge.sourceNodeId == current_node.id and
|
||||
f"{edge.sourceNodeId}_{current_node_result.node_variable.get('branch_id')}_right" == edge.sourceAnchorId):
|
||||
if self.dependent_node_been_executed(edge.targetNodeId):
|
||||
next_node = [node for node in self.flow.nodes if node.id == edge.targetNodeId]
|
||||
if len(next_node) == 0:
|
||||
continue
|
||||
if next_node[0].properties.get('condition', "AND") == 'AND':
|
||||
if self.dependent_node_been_executed(edge.targetNodeId):
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
else:
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId, self.get_up_node_id_list(edge.targetNodeId)))
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
else:
|
||||
for edge in self.flow.edges:
|
||||
if edge.sourceNodeId == current_node.id:
|
||||
|
|
@ -676,10 +686,12 @@ class WorkflowManage:
|
|||
if next_node[0].properties.get('condition', "AND") == 'AND':
|
||||
if self.dependent_node_been_executed(edge.targetNodeId):
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId, self.get_up_node_id_list(edge.targetNodeId)))
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
else:
|
||||
node_list.append(
|
||||
self.get_node_cls_by_id(edge.targetNodeId, [current_node.node.id]))
|
||||
self.get_node_cls_by_id(edge.targetNodeId,
|
||||
[*current_node.up_node_id_list, current_node.node.id]))
|
||||
return node_list
|
||||
|
||||
def get_reference_field(self, node_id: str, fields: List[str]):
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ from common.config.embedding_config import VectorStore
|
|||
from common.constants.authentication_type import AuthenticationType
|
||||
from common.db.search import get_dynamics_model, native_search, native_page_search
|
||||
from common.db.sql_execute import select_list
|
||||
from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed
|
||||
from common.exception.app_exception import AppApiException, NotFound404, AppUnauthorizedFailed, ChatException
|
||||
from common.field.common import UploadedImageField, UploadedFileField
|
||||
from common.models.db_model_manage import DBModelManage
|
||||
from common.response import result
|
||||
|
|
@ -268,7 +268,8 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
float_location = application_setting.float_location
|
||||
if application_setting.custom_theme is not None and len(
|
||||
application_setting.custom_theme.get('header_font_color', 'rgb(100, 106, 115)')) > 0:
|
||||
header_font_color = application_setting.custom_theme.get('header_font_color', 'rgb(100, 106, 115)')
|
||||
header_font_color = application_setting.custom_theme.get('header_font_color',
|
||||
'rgb(100, 106, 115)')
|
||||
|
||||
is_auth = 'true' if application_access_token is not None and application_access_token.is_active else 'false'
|
||||
t = Template(content)
|
||||
|
|
@ -916,6 +917,12 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
if application_access_token is None:
|
||||
raise AppUnauthorizedFailed(500, "非法用户")
|
||||
application_setting_model = DBModelManage.get_model('application_setting')
|
||||
if application.type == ApplicationTypeChoices.WORK_FLOW:
|
||||
work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=application.id).order_by(
|
||||
'-create_time')[0:1].first()
|
||||
if work_flow_version is not None:
|
||||
application.work_flow = work_flow_version.work_flow
|
||||
|
||||
xpack_cache = DBModelManage.get_model('xpack_cache')
|
||||
X_PACK_LICENSE_IS_VALID = False if xpack_cache is None else xpack_cache.get('XPACK_LICENSE_IS_VALID', False)
|
||||
application_setting_dict = {}
|
||||
|
|
@ -1146,9 +1153,23 @@ class ApplicationSerializer(serializers.Serializer):
|
|||
def get_application(self, app_id, with_valid=True):
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
application = QuerySet(Application).filter(id=self.data.get("application_id")).first()
|
||||
return ApplicationSerializer.Operate(data={'user_id': application.user_id, 'application_id': app_id}).one(
|
||||
with_valid=True)
|
||||
if with_valid:
|
||||
self.is_valid()
|
||||
embed_application = QuerySet(Application).get(id=app_id)
|
||||
if embed_application.type == ApplicationTypeChoices.WORK_FLOW:
|
||||
work_flow_version = QuerySet(WorkFlowVersion).filter(application_id=embed_application.id).order_by(
|
||||
'-create_time')[0:1].first()
|
||||
if work_flow_version is not None:
|
||||
embed_application.work_flow = work_flow_version.work_flow
|
||||
dataset_list = self.list_dataset(with_valid=False)
|
||||
mapping_dataset_id_list = [adm.dataset_id for adm in
|
||||
QuerySet(ApplicationDatasetMapping).filter(application_id=app_id)]
|
||||
dataset_id_list = [d.get('id') for d in
|
||||
list(filter(lambda row: mapping_dataset_id_list.__contains__(row.get('id')),
|
||||
dataset_list))]
|
||||
self.update_search_node(embed_application.work_flow, [str(dataset.get('id')) for dataset in dataset_list])
|
||||
return {**ApplicationSerializer.Query.reset_application(ApplicationSerializerModel(embed_application).data),
|
||||
'dataset_id_list': dataset_id_list}
|
||||
|
||||
class ApplicationKeySerializerModel(serializers.ModelSerializer):
|
||||
class Meta:
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
import setting.models
|
||||
from setting.models import Model
|
||||
from .listener_manage import *
|
||||
from common.db.sql_execute import update_execute
|
||||
|
||||
update_document_status_sql = """
|
||||
UPDATE "public"."document"
|
||||
|
|
|
|||
|
|
@ -6,26 +6,22 @@
|
|||
@date:2023/10/20 14:01
|
||||
@desc:
|
||||
"""
|
||||
import datetime
|
||||
import logging
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
from typing import List
|
||||
|
||||
import django.db.models
|
||||
from django.db import models, transaction
|
||||
from django.db.models import QuerySet
|
||||
from django.db.models.functions import Substr, Reverse
|
||||
from langchain_core.embeddings import Embeddings
|
||||
|
||||
from common.config.embedding_config import VectorStore
|
||||
from common.db.search import native_search, get_dynamics_model, native_update
|
||||
from common.db.sql_execute import sql_execute, update_execute
|
||||
from common.util.file_util import get_file_content
|
||||
from common.util.lock import try_lock, un_lock
|
||||
from common.util.page_utils import page
|
||||
from common.util.page_utils import page_desc
|
||||
from dataset.models import Paragraph, Status, Document, ProblemParagraphMapping, TaskType, State
|
||||
from embedding.models import SourceType, SearchMode
|
||||
from smartdoc.conf import PROJECT_DIR
|
||||
|
|
@ -162,7 +158,7 @@ class ListenerManagement:
|
|||
if is_the_task_interrupted():
|
||||
break
|
||||
ListenerManagement.embedding_by_paragraph(str(paragraph.get('id')), embedding_model)
|
||||
post_apply()
|
||||
post_apply()
|
||||
|
||||
return embedding_paragraph_apply
|
||||
|
||||
|
|
@ -241,13 +237,16 @@ class ListenerManagement:
|
|||
lock.release()
|
||||
|
||||
@staticmethod
|
||||
def embedding_by_document(document_id, embedding_model: Embeddings):
|
||||
def embedding_by_document(document_id, embedding_model: Embeddings, state_list=None):
|
||||
"""
|
||||
向量化文档
|
||||
@param state_list:
|
||||
@param document_id: 文档id
|
||||
@param embedding_model 向量模型
|
||||
:return: None
|
||||
"""
|
||||
if state_list is None:
|
||||
state_list = [State.PENDING, State.SUCCESS, State.FAILURE, State.REVOKE, State.REVOKED]
|
||||
if not try_lock('embedding' + str(document_id)):
|
||||
return
|
||||
try:
|
||||
|
|
@ -268,11 +267,17 @@ class ListenerManagement:
|
|||
VectorStore.get_embedding_vector().delete_by_document_id(document_id)
|
||||
|
||||
# 根据段落进行向量化处理
|
||||
page(QuerySet(Paragraph).filter(document_id=document_id).values('id'), 5,
|
||||
ListenerManagement.get_embedding_paragraph_apply(embedding_model, is_the_task_interrupted,
|
||||
ListenerManagement.get_aggregation_document_status(
|
||||
document_id)),
|
||||
is_the_task_interrupted)
|
||||
page_desc(QuerySet(Paragraph)
|
||||
.annotate(
|
||||
reversed_status=Reverse('status'),
|
||||
task_type_status=Substr('reversed_status', TaskType.EMBEDDING.value,
|
||||
1),
|
||||
).filter(task_type_status__in=state_list, document_id=document_id)
|
||||
.values('id'), 5,
|
||||
ListenerManagement.get_embedding_paragraph_apply(embedding_model, is_the_task_interrupted,
|
||||
ListenerManagement.get_aggregation_document_status(
|
||||
document_id)),
|
||||
is_the_task_interrupted)
|
||||
except Exception as e:
|
||||
max_kb_error.error(f'向量化文档:{document_id}出现错误{str(e)}{traceback.format_exc()}')
|
||||
finally:
|
||||
|
|
|
|||
|
|
@ -113,8 +113,10 @@ class DocSplitHandle(BaseSplitHandle):
|
|||
def paragraph_to_md(paragraph: Paragraph, doc: Document, images_list, get_image_id):
|
||||
try:
|
||||
psn = paragraph.style.name
|
||||
if psn.startswith('Heading'):
|
||||
title = "".join(["#" for i in range(int(psn.replace("Heading ", '')))]) + " " + paragraph.text
|
||||
if psn.startswith('Heading') or psn.startswith('TOC 标题') or psn.startswith('标题'):
|
||||
title = "".join(["#" for i in range(
|
||||
int(psn.replace("Heading ", '').replace('TOC 标题', '').replace('标题',
|
||||
'')))]) + " " + paragraph.text
|
||||
images = reduce(lambda x, y: [*x, *y],
|
||||
[get_paragraph_element_images(e, doc, images_list, get_image_id) for e in
|
||||
paragraph._element],
|
||||
|
|
@ -202,4 +204,4 @@ class DocSplitHandle(BaseSplitHandle):
|
|||
return content
|
||||
except BaseException as e:
|
||||
traceback.print_exception(e)
|
||||
return f'{e}'
|
||||
return f'{e}'
|
||||
|
|
|
|||
|
|
@ -26,3 +26,22 @@ def page(query_set, page_size, handler, is_the_task_interrupted=lambda: False):
|
|||
offset = i * page_size
|
||||
paragraph_list = query.all()[offset: offset + page_size]
|
||||
handler(paragraph_list)
|
||||
|
||||
|
||||
def page_desc(query_set, page_size, handler, is_the_task_interrupted=lambda: False):
|
||||
"""
|
||||
|
||||
@param query_set: 查询query_set
|
||||
@param page_size: 每次查询大小
|
||||
@param handler: 数据处理器
|
||||
@param is_the_task_interrupted: 任务是否被中断
|
||||
@return:
|
||||
"""
|
||||
query = query_set.order_by("id")
|
||||
count = query_set.count()
|
||||
for i in sorted(range(0, ceil(count / page_size)), reverse=True):
|
||||
if is_the_task_interrupted():
|
||||
return
|
||||
offset = i * page_size
|
||||
paragraph_list = query.all()[offset: offset + page_size]
|
||||
handler(paragraph_list)
|
||||
|
|
|
|||
|
|
@ -700,20 +700,28 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
|
|||
_document.save()
|
||||
return self.one()
|
||||
|
||||
@transaction.atomic
|
||||
def refresh(self, with_valid=True):
|
||||
def refresh(self, state_list=None, with_valid=True):
|
||||
if state_list is None:
|
||||
state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value,
|
||||
State.REVOKE.value,
|
||||
State.REVOKED.value, State.IGNORED.value]
|
||||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
document_id = self.data.get("document_id")
|
||||
ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
|
||||
State.PENDING)
|
||||
ListenerManagement.update_status(QuerySet(Paragraph).filter(document_id=document_id),
|
||||
ListenerManagement.update_status(QuerySet(Paragraph).annotate(
|
||||
reversed_status=Reverse('status'),
|
||||
task_type_status=Substr('reversed_status', TaskType.EMBEDDING.value,
|
||||
1),
|
||||
).filter(task_type_status__in=state_list, document_id=document_id)
|
||||
.values('id'),
|
||||
TaskType.EMBEDDING,
|
||||
State.PENDING)
|
||||
ListenerManagement.get_aggregation_document_status(document_id)()
|
||||
embedding_model_id = get_embedding_model_id_by_dataset_id(dataset_id=self.data.get('dataset_id'))
|
||||
try:
|
||||
embedding_by_document.delay(document_id, embedding_model_id)
|
||||
embedding_by_document.delay(document_id, embedding_model_id, state_list)
|
||||
except AlreadyQueued as e:
|
||||
raise AppApiException(500, "任务正在执行中,请勿重复下发")
|
||||
|
||||
|
|
@ -1122,14 +1130,14 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
|
|||
if with_valid:
|
||||
self.is_valid(raise_exception=True)
|
||||
document_id_list = instance.get("id_list")
|
||||
with transaction.atomic():
|
||||
dataset_id = self.data.get('dataset_id')
|
||||
for document_id in document_id_list:
|
||||
try:
|
||||
DocumentSerializers.Operate(
|
||||
data={'dataset_id': dataset_id, 'document_id': document_id}).refresh()
|
||||
except AlreadyQueued as e:
|
||||
pass
|
||||
state_list = instance.get("state_list")
|
||||
dataset_id = self.data.get('dataset_id')
|
||||
for document_id in document_id_list:
|
||||
try:
|
||||
DocumentSerializers.Operate(
|
||||
data={'dataset_id': dataset_id, 'document_id': document_id}).refresh(state_list)
|
||||
except AlreadyQueued as e:
|
||||
pass
|
||||
|
||||
class GenerateRelated(ApiMixin, serializers.Serializer):
|
||||
document_id = serializers.UUIDField(required=True, error_messages=ErrMessage.uuid("文档id"))
|
||||
|
|
|
|||
|
|
@ -51,3 +51,16 @@ class DocumentApi(ApiMixin):
|
|||
description="1|2|3 1:向量化|2:生成问题|3:同步文档", default=1)
|
||||
}
|
||||
)
|
||||
|
||||
class EmbeddingState(ApiMixin):
|
||||
@staticmethod
|
||||
def get_request_body_api():
|
||||
return openapi.Schema(
|
||||
type=openapi.TYPE_OBJECT,
|
||||
properties={
|
||||
'state_list': openapi.Schema(type=openapi.TYPE_ARRAY,
|
||||
items=openapi.Schema(type=openapi.TYPE_STRING),
|
||||
title="状态列表",
|
||||
description="状态列表")
|
||||
}
|
||||
)
|
||||
|
|
|
|||
|
|
@ -262,6 +262,7 @@ class Document(APIView):
|
|||
@action(methods=['PUT'], detail=False)
|
||||
@swagger_auto_schema(operation_summary="刷新文档向量库",
|
||||
operation_id="刷新文档向量库",
|
||||
request_body=DocumentApi.EmbeddingState.get_request_body_api(),
|
||||
manual_parameters=DocumentSerializers.Operate.get_request_params_api(),
|
||||
responses=result.get_default_response(),
|
||||
tags=["知识库/文档"]
|
||||
|
|
@ -272,6 +273,7 @@ class Document(APIView):
|
|||
def put(self, request: Request, dataset_id: str, document_id: str):
|
||||
return result.success(
|
||||
DocumentSerializers.Operate(data={'document_id': document_id, 'dataset_id': dataset_id}).refresh(
|
||||
request.data.get('state_list')
|
||||
))
|
||||
|
||||
class BatchRefresh(APIView):
|
||||
|
|
|
|||
|
|
@ -56,14 +56,20 @@ def embedding_by_paragraph_list(paragraph_id_list, model_id):
|
|||
|
||||
|
||||
@celery_app.task(base=QueueOnce, once={'keys': ['document_id']}, name='celery:embedding_by_document')
|
||||
def embedding_by_document(document_id, model_id):
|
||||
def embedding_by_document(document_id, model_id, state_list=None):
|
||||
"""
|
||||
向量化文档
|
||||
@param state_list:
|
||||
@param document_id: 文档id
|
||||
@param model_id 向量模型
|
||||
:return: None
|
||||
"""
|
||||
|
||||
if state_list is None:
|
||||
state_list = [State.PENDING.value, State.STARTED.value, State.SUCCESS.value, State.FAILURE.value,
|
||||
State.REVOKE.value,
|
||||
State.REVOKED.value, State.IGNORED.value]
|
||||
|
||||
def exception_handler(e):
|
||||
ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType.EMBEDDING,
|
||||
State.FAILURE)
|
||||
|
|
@ -71,7 +77,7 @@ def embedding_by_document(document_id, model_id):
|
|||
f'获取向量模型失败:{str(e)}{traceback.format_exc()}')
|
||||
|
||||
embedding_model = get_embedding_model(model_id, exception_handler)
|
||||
ListenerManagement.embedding_by_document(document_id, embedding_model)
|
||||
ListenerManagement.embedding_by_document(document_id, embedding_model, state_list)
|
||||
|
||||
|
||||
@celery_app.task(name='celery:embedding_by_document_list')
|
||||
|
|
|
|||
|
|
@ -73,7 +73,9 @@ model_info_manage = (
|
|||
ModelInfoManage.builder()
|
||||
.append_model_info_list(model_info_list)
|
||||
.append_model_info_list(module_info_vl_list)
|
||||
.append_default_model_info(module_info_vl_list[0])
|
||||
.append_model_info_list(module_info_tti_list)
|
||||
.append_default_model_info(module_info_tti_list[0])
|
||||
.append_default_model_info(model_info_list[1])
|
||||
.append_default_model_info(model_info_list[2])
|
||||
.append_default_model_info(model_info_list[3])
|
||||
|
|
|
|||
|
|
@ -2,12 +2,11 @@
|
|||
|
||||
from typing import Dict
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
||||
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
from typing import Dict
|
||||
from typing import Dict, List
|
||||
|
||||
from langchain_core.messages import BaseMessage, get_buffer_string
|
||||
from langchain_openai import AzureChatOpenAI
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
|
@ -26,3 +26,17 @@ class AzureOpenAIImage(MaxKBBaseModel, AzureChatOpenAI):
|
|||
streaming=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
||||
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
|
||||
try:
|
||||
return super().get_num_tokens_from_messages(messages)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
|
||||
|
||||
def get_num_tokens(self, text: str) -> int:
|
||||
try:
|
||||
return super().get_num_tokens(text)
|
||||
except Exception as e:
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return len(tokenizer.encode(text))
|
||||
|
|
|
|||
|
|
@ -1,15 +1,8 @@
|
|||
from typing import Dict
|
||||
from urllib.parse import urlparse, ParseResult
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def get_base_url(url: str):
|
||||
|
|
@ -20,7 +13,7 @@ def get_base_url(url: str):
|
|||
return result_url[:-1] if result_url.endswith("/") else result_url
|
||||
|
||||
|
||||
class OllamaImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -34,5 +27,6 @@ class OllamaImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -171,6 +171,7 @@ model_info_manage = (
|
|||
'一个具有大令牌上下文窗口的高性能开放嵌入模型。',
|
||||
ModelTypeConst.EMBEDDING, ollama_embedding_model_credential, OllamaEmbedding), )
|
||||
.append_model_info_list(image_model_info)
|
||||
.append_default_model_info(image_model_info[0])
|
||||
.build()
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class OpenAIImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class OpenAIImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -121,7 +121,16 @@ model_info_manage = (
|
|||
.append_model_info_list(model_info_embedding_list)
|
||||
.append_default_model_info(model_info_embedding_list[0])
|
||||
.append_model_info_list(model_info_image_list)
|
||||
.append_default_model_info(model_info_image_list[0])
|
||||
.append_model_info_list(model_info_tti_list)
|
||||
.append_default_model_info(model_info_tti_list[0])
|
||||
.append_default_model_info(ModelInfo('whisper-1', '',
|
||||
ModelTypeConst.STT, openai_stt_model_credential,
|
||||
OpenAISpeechToText)
|
||||
)
|
||||
.append_default_model_info(ModelInfo('tts-1', '',
|
||||
ModelTypeConst.TTS, openai_tts_model_credential,
|
||||
OpenAITextToSpeech))
|
||||
.build()
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -2,12 +2,11 @@
|
|||
|
||||
from typing import Dict
|
||||
|
||||
from langchain_community.chat_models import ChatOpenAI
|
||||
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
||||
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -18,6 +17,7 @@ class QwenVLChatModel(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
return chat_tong_yi
|
||||
|
|
|
|||
|
|
@ -46,7 +46,9 @@ model_info_manage = (
|
|||
.append_default_model_info(
|
||||
ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel))
|
||||
.append_model_info_list(module_info_vl_list)
|
||||
.append_default_model_info(module_info_vl_list[0])
|
||||
.append_model_info_list(module_info_tti_list)
|
||||
.append_default_model_info(module_info_tti_list[0])
|
||||
.build()
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class TencentVision(MaxKBBaseModel, ChatOpenAI):
|
||||
class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class TencentVision(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -96,13 +96,15 @@ def _initialize_model_info():
|
|||
TencentTTIModelCredential,
|
||||
TencentTextToImageModel)]
|
||||
|
||||
|
||||
model_info_manage = ModelInfoManage.builder() \
|
||||
.append_model_info_list(model_info_list) \
|
||||
.append_model_info_list(model_info_embedding_list) \
|
||||
.append_model_info_list(model_info_vision_list) \
|
||||
.append_default_model_info(model_info_vision_list[0]) \
|
||||
.append_model_info_list(model_info_tti_list) \
|
||||
.append_default_model_info(model_info_tti_list[0]) \
|
||||
.append_default_model_info(model_info_list[0]) \
|
||||
.append_default_model_info(tencent_embedding_model_info) \
|
||||
.build()
|
||||
|
||||
return model_info_manage
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class VolcanicEngineImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base=model_credential.get('api_base'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -93,6 +93,9 @@ model_info_manage = (
|
|||
.append_model_info_list(model_info_list)
|
||||
.append_default_model_info(model_info_list[0])
|
||||
.append_default_model_info(model_info_list[1])
|
||||
.append_default_model_info(model_info_list[2])
|
||||
.append_default_model_info(model_info_list[3])
|
||||
.append_default_model_info(model_info_list[4])
|
||||
.build()
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -40,8 +40,20 @@ model_info_list = [
|
|||
ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding)
|
||||
]
|
||||
|
||||
model_info_manage = ModelInfoManage.builder().append_model_info_list(model_info_list).append_default_model_info(
|
||||
ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM)).build()
|
||||
model_info_manage = (
|
||||
ModelInfoManage.builder()
|
||||
.append_model_info_list(model_info_list)
|
||||
.append_default_model_info(
|
||||
ModelInfo('generalv3.5', '', ModelTypeConst.LLM, qwen_model_credential, XFChatSparkLLM))
|
||||
.append_default_model_info(
|
||||
ModelInfo('iat', '中英文识别', ModelTypeConst.STT, stt_model_credential, XFSparkSpeechToText),
|
||||
)
|
||||
.append_default_model_info(
|
||||
ModelInfo('tts', '', ModelTypeConst.TTS, tts_model_credential, XFSparkTextToSpeech))
|
||||
.append_default_model_info(
|
||||
ModelInfo('embedding', '', ModelTypeConst.EMBEDDING, embedding_model_credential, XFEmbedding))
|
||||
.build()
|
||||
)
|
||||
|
||||
|
||||
class XunFeiModelProvider(IModelProvider):
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class XinferenceImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class XinferenceImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_key=model_credential.get('api_key'),
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -521,25 +521,29 @@ embedding_model_info = [
|
|||
rerank_list = [ModelInfo('bce-reranker-base_v1',
|
||||
'发布新的重新排名器,建立在强大的 M3 和LLM (GEMMA 和 MiniCPM,实际上没那么大)骨干上,支持多语言处理和更大的输入,大幅提高 BEIR、C-MTEB/Retrieval 的排名性能、MIRACL、LlamaIndex 评估',
|
||||
ModelTypeConst.RERANKER, XInferenceRerankerModelCredential(), XInferenceReranker)]
|
||||
model_info_manage = (ModelInfoManage.builder()
|
||||
.append_model_info_list(model_info_list)
|
||||
.append_model_info_list(voice_model_info)
|
||||
.append_default_model_info(voice_model_info[0])
|
||||
.append_default_model_info(voice_model_info[1])
|
||||
.append_default_model_info(ModelInfo('phi3',
|
||||
'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。',
|
||||
ModelTypeConst.LLM, xinference_llm_model_credential,
|
||||
XinferenceChatModel))
|
||||
.append_model_info_list(embedding_model_info)
|
||||
.append_default_model_info(ModelInfo('',
|
||||
'',
|
||||
ModelTypeConst.EMBEDDING,
|
||||
xinference_embedding_model_credential, XinferenceEmbedding))
|
||||
.append_model_info_list(rerank_list)
|
||||
.append_model_info_list(image_model_info)
|
||||
.append_model_info_list(tti_model_info)
|
||||
.append_default_model_info(rerank_list[0])
|
||||
.build())
|
||||
model_info_manage = (
|
||||
ModelInfoManage.builder()
|
||||
.append_model_info_list(model_info_list)
|
||||
.append_model_info_list(voice_model_info)
|
||||
.append_default_model_info(voice_model_info[0])
|
||||
.append_default_model_info(voice_model_info[1])
|
||||
.append_default_model_info(ModelInfo('phi3',
|
||||
'Phi-3 Mini是Microsoft的3.8B参数,轻量级,最先进的开放模型。',
|
||||
ModelTypeConst.LLM, xinference_llm_model_credential,
|
||||
XinferenceChatModel))
|
||||
.append_model_info_list(embedding_model_info)
|
||||
.append_default_model_info(ModelInfo('',
|
||||
'',
|
||||
ModelTypeConst.EMBEDDING,
|
||||
xinference_embedding_model_credential, XinferenceEmbedding))
|
||||
.append_model_info_list(rerank_list)
|
||||
.append_model_info_list(image_model_info)
|
||||
.append_default_model_info(image_model_info[0])
|
||||
.append_model_info_list(tti_model_info)
|
||||
.append_default_model_info(tti_model_info[0])
|
||||
.append_default_model_info(rerank_list[0])
|
||||
.build()
|
||||
)
|
||||
|
||||
|
||||
def get_base_url(url: str):
|
||||
|
|
|
|||
|
|
@ -1,17 +1,10 @@
|
|||
from typing import Dict
|
||||
|
||||
from langchain_openai.chat_models import ChatOpenAI
|
||||
|
||||
from common.config.tokenizer_manage_config import TokenizerManage
|
||||
from setting.models_provider.base_model_provider import MaxKBBaseModel
|
||||
from setting.models_provider.impl.base_chat_open_ai import BaseChatOpenAI
|
||||
|
||||
|
||||
def custom_get_token_ids(text: str):
|
||||
tokenizer = TokenizerManage.get_tokenizer()
|
||||
return tokenizer.encode(text)
|
||||
|
||||
|
||||
class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
|
||||
class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
|
||||
|
||||
@staticmethod
|
||||
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
|
||||
|
|
@ -22,5 +15,6 @@ class ZhiPuImage(MaxKBBaseModel, ChatOpenAI):
|
|||
openai_api_base='https://open.bigmodel.cn/api/paas/v4',
|
||||
# stream_options={"include_usage": True},
|
||||
streaming=True,
|
||||
stream_usage=True,
|
||||
**optional_params,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -48,6 +48,9 @@ model_info_tti_list = [
|
|||
ModelInfo('cogview-3-plus', '根据用户文字描述生成高质量图像,支持多图片尺寸',
|
||||
ModelTypeConst.TTI, zhipu_tti_model_credential,
|
||||
ZhiPuTextToImage),
|
||||
ModelInfo('cogview-3-flash', '根据用户文字描述生成高质量图像,支持多图片尺寸(免费)',
|
||||
ModelTypeConst.TTI, zhipu_tti_model_credential,
|
||||
ZhiPuTextToImage),
|
||||
]
|
||||
|
||||
model_info_manage = (
|
||||
|
|
@ -55,7 +58,9 @@ model_info_manage = (
|
|||
.append_model_info_list(model_info_list)
|
||||
.append_default_model_info(ModelInfo('glm-4', '', ModelTypeConst.LLM, qwen_model_credential, ZhipuChatModel))
|
||||
.append_model_info_list(model_info_image_list)
|
||||
.append_default_model_info(model_info_image_list[0])
|
||||
.append_model_info_list(model_info_tti_list)
|
||||
.append_default_model_info(model_info_tti_list[0])
|
||||
.build()
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ RUN mkdir -p /opt/maxkb/app /opt/maxkb/model /opt/maxkb/conf && \
|
|||
COPY --from=web-build ui /opt/maxkb/app/ui
|
||||
WORKDIR /opt/maxkb/app
|
||||
RUN python3 -m venv /opt/py3 && \
|
||||
pip install poetry --break-system-packages && \
|
||||
pip install poetry==1.8.5 --break-system-packages && \
|
||||
poetry config virtualenvs.create false && \
|
||||
. /opt/py3/bin/activate && \
|
||||
if [ "$(uname -m)" = "x86_64" ]; then sed -i 's/^torch.*/torch = {version = "^2.2.1+cpu", source = "pytorch"}/g' pyproject.toml; fi && \
|
||||
|
|
|
|||
|
|
@ -129,11 +129,12 @@ const delMulDocument: (
|
|||
const batchRefresh: (
|
||||
dataset_id: string,
|
||||
data: any,
|
||||
stateList: Array<string>,
|
||||
loading?: Ref<boolean>
|
||||
) => Promise<Result<boolean>> = (dataset_id, data, loading) => {
|
||||
) => Promise<Result<boolean>> = (dataset_id, data, stateList, loading) => {
|
||||
return put(
|
||||
`${prefix}/${dataset_id}/document/batch_refresh`,
|
||||
{ id_list: data },
|
||||
{ id_list: data, state_list: stateList },
|
||||
undefined,
|
||||
loading
|
||||
)
|
||||
|
|
@ -157,11 +158,12 @@ const getDocumentDetail: (dataset_id: string, document_id: string) => Promise<Re
|
|||
const putDocumentRefresh: (
|
||||
dataset_id: string,
|
||||
document_id: string,
|
||||
state_list: Array<string>,
|
||||
loading?: Ref<boolean>
|
||||
) => Promise<Result<any>> = (dataset_id, document_id, loading) => {
|
||||
) => Promise<Result<any>> = (dataset_id, document_id, state_list, loading) => {
|
||||
return put(
|
||||
`${prefix}/${dataset_id}/document/${document_id}/refresh`,
|
||||
undefined,
|
||||
{ state_list },
|
||||
undefined,
|
||||
loading
|
||||
)
|
||||
|
|
|
|||
|
|
@ -121,12 +121,11 @@ export class ChatRecordManage {
|
|||
|
||||
this.chat.answer_text = this.chat.answer_text + chunk_answer
|
||||
}
|
||||
get_current_up_node() {
|
||||
for (let i = this.node_list.length - 2; i >= 0; i--) {
|
||||
const n = this.node_list[i]
|
||||
if (n.content.length > 0) {
|
||||
return n
|
||||
}
|
||||
get_current_up_node(run_node: any) {
|
||||
const index = this.node_list.findIndex((item) => item == run_node)
|
||||
if (index > 0) {
|
||||
const n = this.node_list[index - 1]
|
||||
return n
|
||||
}
|
||||
return undefined
|
||||
}
|
||||
|
|
@ -144,14 +143,13 @@ export class ChatRecordManage {
|
|||
const index = this.node_list.indexOf(run_node)
|
||||
let current_up_node = undefined
|
||||
if (index > 0) {
|
||||
current_up_node = this.get_current_up_node()
|
||||
current_up_node = this.get_current_up_node(run_node)
|
||||
}
|
||||
let answer_text_list_index = 0
|
||||
|
||||
if (
|
||||
current_up_node == undefined ||
|
||||
run_node.view_type == 'single_view' ||
|
||||
(run_node.view_type == 'many_view' && current_up_node.view_type == 'single_view')
|
||||
current_up_node.view_type == 'single_view'
|
||||
) {
|
||||
const none_index = this.findIndex(
|
||||
this.chat.answer_text_list,
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@
|
|||
<div class="flex-between">
|
||||
<div class="flex">
|
||||
<img :src="getImgUrl(item && item?.document_name)" alt="" width="20" />
|
||||
<div class="ml-4" v-if="!item.source_url">
|
||||
<div class="ml-4 ellipsis-1" :title="item?.document_name" v-if="!item.source_url">
|
||||
<p>{{ item && item?.document_name }}</p>
|
||||
</div>
|
||||
<div class="ml-8" v-else>
|
||||
|
|
|
|||
|
|
@ -28,14 +28,14 @@
|
|||
: meta?.source_url
|
||||
"
|
||||
target="_blank"
|
||||
class="ellipsis-1"
|
||||
class="ellipsis-1 break-all"
|
||||
:title="data?.document_name?.trim()"
|
||||
>
|
||||
{{ data?.document_name?.trim() }}
|
||||
</a>
|
||||
</template>
|
||||
<template v-else>
|
||||
<span class="ellipsis-1" :title="data?.document_name?.trim()">
|
||||
<span class="ellipsis-1 break-all" :title="data?.document_name?.trim()">
|
||||
{{ data?.document_name?.trim() }}
|
||||
</span>
|
||||
</template>
|
||||
|
|
@ -45,7 +45,9 @@
|
|||
<img src="@/assets/icon_document.svg" style="width: 58%" alt="" />
|
||||
</AppAvatar>
|
||||
|
||||
<span class="ellipsis-1" :title="data?.dataset_name"> {{ data?.dataset_name }}</span>
|
||||
<span class="ellipsis-1 break-all" :title="data?.dataset_name">
|
||||
{{ data?.dataset_name }}</span
|
||||
>
|
||||
</div>
|
||||
</div>
|
||||
</template>
|
||||
|
|
@ -80,7 +82,7 @@ const meta = computed(() => (isMetaObject.value ? props.data.meta : parsedMeta.v
|
|||
.paragraph-source-card {
|
||||
.footer-content {
|
||||
.item {
|
||||
width: 50%;
|
||||
max-width: 50%;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -96,7 +98,7 @@ const meta = computed(() => (isMetaObject.value ? props.data.meta : parsedMeta.v
|
|||
.footer-content {
|
||||
display: block;
|
||||
.item {
|
||||
width: 100%;
|
||||
max-width: 100%;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -401,7 +401,7 @@ const startRecording = async () => {
|
|||
2、若无 https 配置则需要修改浏览器安全配置,Chrome 设置如下:<br/>
|
||||
(1) 地址栏输入chrome://flags/#unsafely-treat-insecure-origin-as-secure;<br/>
|
||||
(2) 将 http 站点配置在文本框中,例如: http://127.0.0.1:8080。</p>
|
||||
<img src="${new URL(`../../assets/tipIMG.jpg`, import.meta.url).href}" style="width: 100%;" />`,
|
||||
<img src="${new URL(`@/assets/tipIMG.jpg`, import.meta.url).href}" style="width: 100%;" />`,
|
||||
{
|
||||
confirmButtonText: '我知道了',
|
||||
dangerouslyUseHTMLString: true,
|
||||
|
|
@ -418,7 +418,7 @@ const startRecording = async () => {
|
|||
2、若无 https 配置则需要修改浏览器安全配置,Chrome 设置如下:<br/>
|
||||
(1) 地址栏输入chrome://flags/#unsafely-treat-insecure-origin-as-secure;<br/>
|
||||
(2) 将 http 站点配置在文本框中,例如: http://127.0.0.1:8080。</p>
|
||||
<img src="${new URL(`../../assets/tipIMG.jpg`, import.meta.url).href}" style="width: 100%;" />`,
|
||||
<img src="${new URL(`@/assets/tipIMG.jpg`, import.meta.url).href}" style="width: 100%;" />`,
|
||||
{
|
||||
confirmButtonText: '我知道了',
|
||||
dangerouslyUseHTMLString: true,
|
||||
|
|
|
|||
|
|
@ -8,99 +8,15 @@
|
|||
<el-form label-position="top" ref="displayFormRef" :model="form">
|
||||
<el-form-item>
|
||||
<el-space direction="vertical" alignment="start">
|
||||
<el-checkbox v-model="form.show_source" label="显示知识来源" />
|
||||
<el-checkbox
|
||||
v-model="form.show_history"
|
||||
label="显示历史记录"
|
||||
v-if="user.isEnterprise()"
|
||||
/>
|
||||
<el-checkbox
|
||||
v-model="form.draggable"
|
||||
label="可拖拽位置(浮窗模式)"
|
||||
v-if="user.isEnterprise()"
|
||||
/>
|
||||
<el-checkbox
|
||||
v-model="form.show_guide"
|
||||
label="显示引导图(浮窗模式)"
|
||||
v-if="user.isEnterprise()"
|
||||
v-model="form.show_source"
|
||||
:label="isWorkFlow(detail.type) ? '显示执行详情' : '显示知识来源'"
|
||||
/>
|
||||
</el-space>
|
||||
</el-form-item>
|
||||
<el-form-item label="对话头像" v-if="user.isEnterprise()">
|
||||
<div class="flex mt-8">
|
||||
<div class="border border-r-4 mr-16" style="padding: 8px">
|
||||
<el-image
|
||||
v-if="imgUrl.avatar"
|
||||
:src="imgUrl.avatar"
|
||||
alt=""
|
||||
fit="cover"
|
||||
style="width: 50px; height: 50px; display: block"
|
||||
/>
|
||||
<LogoIcon v-else height="50px" style="width: 50px; height: 50px; display: block" />
|
||||
</div>
|
||||
|
||||
<el-upload
|
||||
ref="uploadRef"
|
||||
action="#"
|
||||
:auto-upload="false"
|
||||
:show-file-list="false"
|
||||
accept="image/jpeg, image/png, image/gif"
|
||||
:on-change="(file: any, fileList: any) => onChange(file, fileList, 'avatar')"
|
||||
>
|
||||
<el-button icon="Upload">{{
|
||||
$t('views.applicationOverview.appInfo.EditAvatarDialog.upload')
|
||||
}}</el-button>
|
||||
<template #tip>
|
||||
<div class="el-upload__tip info" style="margin-top: 0">
|
||||
建议尺寸 32*32,支持 JPG、PNG、GIF,大小不超过 10 MB
|
||||
</div>
|
||||
</template>
|
||||
</el-upload>
|
||||
</div>
|
||||
</el-form-item>
|
||||
<el-form-item label="浮窗入口图标" v-if="user.isEnterprise()">
|
||||
<div class="flex mt-8">
|
||||
<div class="border border-r-4 mr-16" style="padding: 8px">
|
||||
<el-image
|
||||
v-if="imgUrl.float_icon"
|
||||
:src="imgUrl.float_icon"
|
||||
alt=""
|
||||
fit="cover"
|
||||
style="width: 50px; height: 50px; display: block"
|
||||
/>
|
||||
<img
|
||||
v-else
|
||||
src="@/assets/logo/logo.svg"
|
||||
height="50px"
|
||||
style="width: 50px; height: 50px; display: block"
|
||||
/>
|
||||
</div>
|
||||
|
||||
<el-upload
|
||||
ref="uploadRef"
|
||||
action="#"
|
||||
:auto-upload="false"
|
||||
:show-file-list="false"
|
||||
accept="image/jpeg, image/png, image/gif"
|
||||
:on-change="(file: any, fileList: any) => onChange(file, fileList, 'float_icon')"
|
||||
>
|
||||
<el-button icon="Upload">{{
|
||||
$t('views.applicationOverview.appInfo.EditAvatarDialog.upload')
|
||||
}}</el-button>
|
||||
<template #tip>
|
||||
<div class="el-upload__tip info" style="margin-top: 0">
|
||||
建议尺寸 32*32,支持 JPG、PNG、GIF,大小不超过 10 MB
|
||||
</div>
|
||||
</template>
|
||||
</el-upload>
|
||||
</div>
|
||||
</el-form-item>
|
||||
</el-form>
|
||||
<template #footer>
|
||||
<span class="dialog-footer">
|
||||
<el-button v-if="user.isEnterprise()" type="primary" @click.prevent="resetForm" link
|
||||
>恢复默认
|
||||
</el-button>
|
||||
<el-button @click.prevent="dialogVisible = false"
|
||||
>{{ $t('views.applicationOverview.appInfo.LimitDialog.cancelButtonText') }}
|
||||
</el-button>
|
||||
|
|
@ -116,11 +32,9 @@ import { ref, watch } from 'vue'
|
|||
import { useRoute } from 'vue-router'
|
||||
import type { FormInstance, FormRules, UploadFiles } from 'element-plus'
|
||||
import applicationApi from '@/api/application'
|
||||
import applicationXpackApi from '@/api/application-xpack'
|
||||
import { isWorkFlow } from '@/utils/application'
|
||||
import { MsgSuccess, MsgError } from '@/utils/message'
|
||||
import { t } from '@/locales'
|
||||
import useStore from '@/stores'
|
||||
const { user } = useStore()
|
||||
|
||||
const route = useRoute()
|
||||
const {
|
||||
|
|
@ -129,33 +43,13 @@ const {
|
|||
|
||||
const emit = defineEmits(['refresh'])
|
||||
|
||||
const defaultSetting = {
|
||||
show_source: false,
|
||||
show_history: true,
|
||||
draggable: true,
|
||||
show_guide: true,
|
||||
avatar: '',
|
||||
float_icon: ''
|
||||
}
|
||||
|
||||
const displayFormRef = ref()
|
||||
const form = ref<any>({
|
||||
show_source: false
|
||||
})
|
||||
|
||||
const xpackForm = ref<any>({
|
||||
show_source: false,
|
||||
show_history: false,
|
||||
draggable: false,
|
||||
show_guide: false,
|
||||
avatar: '',
|
||||
float_icon: ''
|
||||
})
|
||||
|
||||
const imgUrl = ref<any>({
|
||||
avatar: '',
|
||||
float_icon: ''
|
||||
})
|
||||
const detail = ref<any>(null)
|
||||
|
||||
const dialogVisible = ref<boolean>(false)
|
||||
const loading = ref(false)
|
||||
|
|
@ -165,50 +59,12 @@ watch(dialogVisible, (bool) => {
|
|||
form.value = {
|
||||
show_source: false
|
||||
}
|
||||
imgUrl.value = {
|
||||
avatar: '',
|
||||
float_icon: ''
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
function resetForm() {
|
||||
form.value = {
|
||||
...defaultSetting
|
||||
}
|
||||
imgUrl.value = {
|
||||
avatar: '',
|
||||
float_icon: ''
|
||||
}
|
||||
}
|
||||
|
||||
const onChange = (file: any, fileList: UploadFiles, attr: string) => {
|
||||
//1、判断文件大小是否合法,文件限制不能大于 10 MB
|
||||
const isLimit = file?.size / 1024 / 1024 < 10
|
||||
if (!isLimit) {
|
||||
// @ts-ignore
|
||||
MsgError(t('views.applicationOverview.appInfo.EditAvatarDialog.fileSizeExceeded'))
|
||||
return false
|
||||
} else {
|
||||
xpackForm.value[attr] = file.raw
|
||||
imgUrl.value[attr] = URL.createObjectURL(file.raw)
|
||||
}
|
||||
}
|
||||
|
||||
const open = (data: any) => {
|
||||
if (user.isEnterprise()) {
|
||||
xpackForm.value.show_source = data.show_source
|
||||
xpackForm.value.show_history = data.show_history
|
||||
xpackForm.value.draggable = data.draggable
|
||||
xpackForm.value.show_guide = data.show_guide
|
||||
xpackForm.value.avatar = data.avatar
|
||||
xpackForm.value.float_icon = data.float_icon
|
||||
imgUrl.value.avatar = data.avatar
|
||||
imgUrl.value.float_icon = data.float_icon
|
||||
form.value = xpackForm.value
|
||||
} else {
|
||||
form.value.show_source = data.show_source
|
||||
}
|
||||
const open = (data: any, content: any) => {
|
||||
detail.value = content
|
||||
form.value.show_source = data.show_source
|
||||
|
||||
dialogVisible.value = true
|
||||
}
|
||||
|
|
@ -217,28 +73,15 @@ const submit = async (formEl: FormInstance | undefined) => {
|
|||
if (!formEl) return
|
||||
await formEl.validate((valid, fields) => {
|
||||
if (valid) {
|
||||
if (user.isEnterprise()) {
|
||||
let fd = new FormData()
|
||||
Object.keys(form.value).map((item) => {
|
||||
fd.append(item, form.value[item])
|
||||
})
|
||||
applicationXpackApi.putAccessToken(id as string, fd, loading).then((res) => {
|
||||
emit('refresh')
|
||||
// @ts-ignore
|
||||
MsgSuccess(t('views.applicationOverview.appInfo.LimitDialog.settingSuccessMessage'))
|
||||
dialogVisible.value = false
|
||||
})
|
||||
} else {
|
||||
const obj = {
|
||||
show_source: form.value.show_source
|
||||
}
|
||||
applicationApi.putAccessToken(id as string, obj, loading).then((res) => {
|
||||
emit('refresh')
|
||||
// @ts-ignore
|
||||
MsgSuccess(t('views.applicationOverview.appInfo.LimitDialog.settingSuccessMessage'))
|
||||
dialogVisible.value = false
|
||||
})
|
||||
const obj = {
|
||||
show_source: form.value.show_source
|
||||
}
|
||||
applicationApi.putAccessToken(id as string, obj, loading).then((res) => {
|
||||
emit('refresh')
|
||||
// @ts-ignore
|
||||
MsgSuccess(t('views.applicationOverview.appInfo.LimitDialog.settingSuccessMessage'))
|
||||
dialogVisible.value = false
|
||||
})
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -264,7 +264,10 @@
|
|||
</el-card>
|
||||
|
||||
<el-space direction="vertical" alignment="start" :size="2">
|
||||
<el-checkbox v-model="form.show_source" label="显示知识来源" />
|
||||
<el-checkbox
|
||||
v-model="form.show_source"
|
||||
:label="isWorkFlow(detail.type) ? '显示执行详情' : '显示知识来源'"
|
||||
/>
|
||||
<el-checkbox v-model="form.show_history" label="显示历史记录" />
|
||||
<el-checkbox v-model="form.show_guide" label="显示引导图(浮窗模式)" />
|
||||
<el-checkbox v-model="form.disclaimer" label="免责声明" @change="changeDisclaimer" />
|
||||
|
|
@ -298,7 +301,7 @@
|
|||
import { computed, ref, watch } from 'vue'
|
||||
import { useRoute } from 'vue-router'
|
||||
import type { FormInstance, FormRules, UploadFiles } from 'element-plus'
|
||||
import { isAppIcon } from '@/utils/application'
|
||||
import { isAppIcon, isWorkFlow } from '@/utils/application'
|
||||
import applicationXpackApi from '@/api/application-xpack'
|
||||
import { MsgSuccess, MsgError } from '@/utils/message'
|
||||
import { t } from '@/locales'
|
||||
|
|
|
|||
|
|
@ -274,7 +274,7 @@ function openDisplaySettingDialog() {
|
|||
if (user.isEnterprise()) {
|
||||
XPackDisplaySettingDialogRef.value?.open(accessToken.value, detail.value)
|
||||
} else {
|
||||
DisplaySettingDialogRef.value?.open(accessToken.value)
|
||||
DisplaySettingDialogRef.value?.open(accessToken.value, detail.value)
|
||||
}
|
||||
}
|
||||
function openEditAvatar() {
|
||||
|
|
|
|||
|
|
@ -243,7 +243,6 @@ function onmousedown(item: any, data?: any, type?: string) {
|
|||
}
|
||||
}
|
||||
}
|
||||
console.log('xx', item)
|
||||
props.workflowRef?.onmousedown(item)
|
||||
emit('onmousedown', item)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,41 @@
|
|||
<template>
|
||||
<el-dialog v-model="dialogVisible" title="选择向量化内容" width="500" :before-close="close">
|
||||
<el-radio-group v-model="state">
|
||||
<el-radio value="error" size="large">向量化未成功的分段</el-radio>
|
||||
<el-radio value="all" size="large">全部分段</el-radio>
|
||||
</el-radio-group>
|
||||
<template #footer>
|
||||
<div class="dialog-footer">
|
||||
<el-button @click="close">取消</el-button>
|
||||
<el-button type="primary" @click="submit"> 提交 </el-button>
|
||||
</div>
|
||||
</template>
|
||||
</el-dialog>
|
||||
</template>
|
||||
<script setup lang="ts">
|
||||
import { ref } from 'vue'
|
||||
const dialogVisible = ref<boolean>(false)
|
||||
const state = ref<'all' | 'error'>('error')
|
||||
const stateMap = {
|
||||
all: ['0', '1', '2', '3', '4', '5', 'n'],
|
||||
error: ['0', '1', '3', '4', '5', 'n']
|
||||
}
|
||||
const submit_handle = ref<(stateList: Array<string>) => void>()
|
||||
const submit = () => {
|
||||
if (submit_handle.value) {
|
||||
submit_handle.value(stateMap[state.value])
|
||||
}
|
||||
close()
|
||||
}
|
||||
|
||||
const open = (handle: (stateList: Array<string>) => void) => {
|
||||
submit_handle.value = handle
|
||||
dialogVisible.value = true
|
||||
}
|
||||
const close = () => {
|
||||
submit_handle.value = undefined
|
||||
dialogVisible.value = false
|
||||
}
|
||||
defineExpose({ open, close })
|
||||
</script>
|
||||
<style lang="scss" scoped></style>
|
||||
|
|
@ -422,6 +422,7 @@
|
|||
</el-text>
|
||||
<el-button class="ml-16" type="primary" link @click="clearSelection"> 清空 </el-button>
|
||||
</div>
|
||||
<EmbeddingContentDialog ref="embeddingContentDialogRef"></EmbeddingContentDialog>
|
||||
</LayoutContainer>
|
||||
</template>
|
||||
<script setup lang="ts">
|
||||
|
|
@ -439,6 +440,7 @@ import { MsgSuccess, MsgConfirm, MsgError } from '@/utils/message'
|
|||
import useStore from '@/stores'
|
||||
import StatusVlue from '@/views/document/component/Status.vue'
|
||||
import GenerateRelatedDialog from '@/components/generate-related-dialog/index.vue'
|
||||
import EmbeddingContentDialog from '@/views/document/component/EmbeddingContentDialog.vue'
|
||||
import { TaskType, State } from '@/utils/status'
|
||||
const router = useRouter()
|
||||
const route = useRoute()
|
||||
|
|
@ -469,7 +471,7 @@ onBeforeRouteLeave((to: any) => {
|
|||
})
|
||||
const beforePagination = computed(() => common.paginationConfig[storeKey])
|
||||
const beforeSearch = computed(() => common.search[storeKey])
|
||||
|
||||
const embeddingContentDialogRef = ref<InstanceType<typeof EmbeddingContentDialog>>()
|
||||
const SyncWebDialogRef = ref()
|
||||
const loading = ref(false)
|
||||
let interval: any
|
||||
|
|
@ -621,10 +623,14 @@ function syncDocument(row: any) {
|
|||
.catch(() => {})
|
||||
}
|
||||
}
|
||||
|
||||
function refreshDocument(row: any) {
|
||||
documentApi.putDocumentRefresh(row.dataset_id, row.id).then(() => {
|
||||
getList()
|
||||
})
|
||||
const embeddingDocument = (stateList: Array<string>) => {
|
||||
return documentApi.putDocumentRefresh(row.dataset_id, row.id, stateList).then(() => {
|
||||
getList()
|
||||
})
|
||||
}
|
||||
embeddingContentDialogRef.value?.open(embeddingDocument)
|
||||
}
|
||||
|
||||
function rowClickHandle(row: any, column: any) {
|
||||
|
|
@ -691,19 +697,16 @@ function deleteMulDocument() {
|
|||
}
|
||||
|
||||
function batchRefresh() {
|
||||
const arr: string[] = []
|
||||
multipleSelection.value.map((v) => {
|
||||
if (v) {
|
||||
arr.push(v.id)
|
||||
}
|
||||
})
|
||||
documentApi.batchRefresh(id, arr, loading).then(() => {
|
||||
MsgSuccess('批量向量化成功')
|
||||
multipleTableRef.value?.clearSelection()
|
||||
})
|
||||
const arr: string[] = multipleSelection.value.map((v) => v.id)
|
||||
const embeddingBatchDocument = (stateList: Array<string>) => {
|
||||
documentApi.batchRefresh(id, arr, stateList, loading).then(() => {
|
||||
MsgSuccess('批量向量化成功')
|
||||
multipleTableRef.value?.clearSelection()
|
||||
})
|
||||
}
|
||||
embeddingContentDialogRef.value?.open(embeddingBatchDocument)
|
||||
}
|
||||
|
||||
|
||||
function deleteDocument(row: any) {
|
||||
MsgConfirm(
|
||||
`是否删除文档:${row.name} ?`,
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@
|
|||
}"
|
||||
>
|
||||
<NodeCascader
|
||||
ref="nodeCascaderRef"
|
||||
ref="applicationNodeFormRef"
|
||||
:nodeModel="nodeModel"
|
||||
class="w-full"
|
||||
placeholder="请选择检索问题"
|
||||
|
|
@ -142,10 +142,10 @@
|
|||
</template>
|
||||
|
||||
<script setup lang="ts">
|
||||
import { set, groupBy } from 'lodash'
|
||||
import { set, groupBy, create } from 'lodash'
|
||||
import { app } from '@/main'
|
||||
import NodeContainer from '@/workflow/common/NodeContainer.vue'
|
||||
import { ref, computed, onMounted } from 'vue'
|
||||
import { ref, computed, onMounted, onActivated } from 'vue'
|
||||
import NodeCascader from '@/workflow/common/NodeCascader.vue'
|
||||
import type { FormInstance } from 'element-plus'
|
||||
import applicationApi from '@/api/application'
|
||||
|
|
@ -202,7 +202,6 @@ const update_field = () => {
|
|||
.then((ok) => {
|
||||
const old_api_input_field_list = props.nodeModel.properties.node_data.api_input_field_list
|
||||
const old_user_input_field_list = props.nodeModel.properties.node_data.user_input_field_list
|
||||
|
||||
if (isWorkFlow(ok.data.type)) {
|
||||
const nodeData = ok.data.work_flow.nodes[0].properties.node_data
|
||||
const new_api_input_field_list = ok.data.work_flow.nodes[0].properties.api_input_field_list
|
||||
|
|
@ -213,11 +212,17 @@ const update_field = () => {
|
|||
(old_item: any) => old_item.variable == item.variable
|
||||
)
|
||||
if (find_field) {
|
||||
return { ...item, default_value: JSON.parse(JSON.stringify(find_field.default_value)) }
|
||||
return {
|
||||
...item,
|
||||
value: find_field.value,
|
||||
label:
|
||||
typeof item.label === 'object' && item.label != null ? item.label.label : item.label
|
||||
}
|
||||
} else {
|
||||
return item
|
||||
}
|
||||
})
|
||||
console.log(merge_api_input_field_list)
|
||||
set(
|
||||
props.nodeModel.properties.node_data,
|
||||
'api_input_field_list',
|
||||
|
|
@ -228,12 +233,16 @@ const update_field = () => {
|
|||
(old_item: any) => old_item.field == item.field
|
||||
)
|
||||
if (find_field) {
|
||||
return { ...item, default_value: JSON.parse(JSON.stringify(find_field.default_value)) }
|
||||
return {
|
||||
...item,
|
||||
value: find_field.value,
|
||||
label:
|
||||
typeof item.label === 'object' && item.label != null ? item.label.label : item.label
|
||||
}
|
||||
} else {
|
||||
return item
|
||||
}
|
||||
})
|
||||
console.log(merge_user_input_field_list)
|
||||
set(
|
||||
props.nodeModel.properties.node_data,
|
||||
'user_input_field_list',
|
||||
|
|
@ -255,7 +264,7 @@ const update_field = () => {
|
|||
}
|
||||
})
|
||||
.catch((err) => {
|
||||
set(props.nodeModel.properties, 'status', 500)
|
||||
// set(props.nodeModel.properties, 'status', 500)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Reference in New Issue