This commit is contained in:
liqiang-fit2cloud 2025-04-24 12:33:38 +08:00
commit 7bcb770ee5
56 changed files with 751 additions and 303 deletions

9
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,9 @@
version: 2
updates:
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "weekly"
timezone: "Asia/Shanghai"
day: "friday"
target-branch: "v2"

View File

@ -13,7 +13,7 @@
MaxKB = Max Knowledge Brain, it is a powerful and easy-to-use AI assistant that integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
- **Flexible Orchestration**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
- **Multi Modal**: Native support for input and output text, image, audio and video.

View File

@ -40,6 +40,7 @@ class BaseStartStepNode(IStarNode):
self.context['document'] = details.get('document_list')
self.context['image'] = details.get('image_list')
self.context['audio'] = details.get('audio_list')
self.context['other'] = details.get('other_list')
self.status = details.get('status')
self.err_message = details.get('err_message')
for key, value in workflow_variable.items():
@ -59,7 +60,8 @@ class BaseStartStepNode(IStarNode):
'question': question,
'image': self.workflow_manage.image_list,
'document': self.workflow_manage.document_list,
'audio': self.workflow_manage.audio_list
'audio': self.workflow_manage.audio_list,
'other': self.workflow_manage.other_list,
}
return NodeResult(node_variable, workflow_variable)
@ -83,5 +85,6 @@ class BaseStartStepNode(IStarNode):
'image_list': self.context.get('image'),
'document_list': self.context.get('document'),
'audio_list': self.context.get('audio'),
'other_list': self.context.get('other'),
'global_fields': global_fields
}

View File

@ -238,6 +238,7 @@ class WorkflowManage:
base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None,
document_list=None,
audio_list=None,
other_list=None,
start_node_id=None,
start_node_data=None, chat_record=None, child_node=None):
if form_data is None:
@ -248,12 +249,15 @@ class WorkflowManage:
document_list = []
if audio_list is None:
audio_list = []
if other_list is None:
other_list = []
self.start_node_id = start_node_id
self.start_node = None
self.form_data = form_data
self.image_list = image_list
self.document_list = document_list
self.audio_list = audio_list
self.other_list = other_list
self.params = params
self.flow = flow
self.context = {}

View File

@ -11,7 +11,7 @@ import uuid
from django.contrib.postgres.fields import ArrayField
from django.db import models
from langchain.schema import HumanMessage, AIMessage
from django.utils.translation import gettext as _
from common.encoder.encoder import SystemEncoder
from common.mixins.app_model_mixin import AppModelMixin
from dataset.models.data_set import DataSet
@ -167,7 +167,11 @@ class ChatRecord(AppModelMixin):
return HumanMessage(content=self.problem_text)
def get_ai_message(self):
return AIMessage(content=self.answer_text)
answer_text = self.answer_text
if answer_text is None or len(str(answer_text).strip()) == 0:
answer_text = _(
'Sorry, no relevant content was found. Please re-describe your problem or provide more information. ')
return AIMessage(content=answer_text)
def get_node_details_runtime_node_id(self, runtime_node_id):
return self.details.get(runtime_node_id, None)

View File

@ -213,12 +213,21 @@ class OpenAIChatSerializer(serializers.Serializer):
return instance.get('messages')[-1].get('content')
@staticmethod
def generate_chat(chat_id, application_id, message, client_id):
def generate_chat(chat_id, application_id, message, client_id, asker=None):
if chat_id is None:
chat_id = str(uuid.uuid1())
chat = QuerySet(Chat).filter(id=chat_id).first()
if chat is None:
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id).save()
asker_dict = {'user_name': '游客'}
if asker is not None:
if isinstance(asker, str):
asker_dict = {
'user_name': asker
}
elif isinstance(asker, dict):
asker_dict = asker
Chat(id=chat_id, application_id=application_id, abstract=message[0:1024], client_id=client_id,
asker=asker_dict).save()
return chat_id
def chat(self, instance: Dict, with_valid=True):
@ -232,7 +241,8 @@ class OpenAIChatSerializer(serializers.Serializer):
application_id = self.data.get('application_id')
client_id = self.data.get('client_id')
client_type = self.data.get('client_type')
chat_id = self.generate_chat(chat_id, application_id, message, client_id)
chat_id = self.generate_chat(chat_id, application_id, message, client_id,
asker=instance.get('form_data', {}).get("asker"))
return ChatMessageSerializer(
data={
'chat_id': chat_id, 'message': message,
@ -245,6 +255,7 @@ class OpenAIChatSerializer(serializers.Serializer):
'image_list': instance.get('image_list', []),
'document_list': instance.get('document_list', []),
'audio_list': instance.get('audio_list', []),
'other_list': instance.get('other_list', []),
}
).chat(base_to_response=OpenaiToResponse())
@ -274,6 +285,7 @@ class ChatMessageSerializer(serializers.Serializer):
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
other_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Other")))
child_node = serializers.DictField(required=False, allow_null=True,
error_messages=ErrMessage.dict(_("Child Nodes")))
@ -372,6 +384,7 @@ class ChatMessageSerializer(serializers.Serializer):
image_list = self.data.get('image_list')
document_list = self.data.get('document_list')
audio_list = self.data.get('audio_list')
other_list = self.data.get('other_list')
user_id = chat_info.application.user_id
chat_record_id = self.data.get('chat_record_id')
chat_record = None
@ -388,7 +401,7 @@ class ChatMessageSerializer(serializers.Serializer):
'client_id': client_id,
'client_type': client_type,
'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type),
base_to_response, form_data, image_list, document_list, audio_list,
base_to_response, form_data, image_list, document_list, audio_list, other_list,
self.data.get('runtime_node_id'),
self.data.get('node_data'), chat_record, self.data.get('child_node'))
r = work_flow_manage.run()

View File

@ -144,6 +144,8 @@ class ChatView(APIView):
'document_list') if 'document_list' in request.data else [],
'audio_list': request.data.get(
'audio_list') if 'audio_list' in request.data else [],
'other_list': request.data.get(
'other_list') if 'other_list' in request.data else [],
'client_type': request.auth.client_type,
'node_id': request.data.get('node_id', None),
'runtime_node_id': request.data.get('runtime_node_id', None),

View File

@ -112,11 +112,7 @@ def get_image_id_func():
title_font_list = [
[36, 100],
[26, 36],
[24, 26],
[22, 24],
[18, 22],
[16, 18]
[30, 36]
]
@ -130,7 +126,7 @@ def get_title_level(paragraph: Paragraph):
if len(paragraph.runs) == 1:
font_size = paragraph.runs[0].font.size
pt = font_size.pt
if pt >= 16:
if pt >= 30:
for _value, index in zip(title_font_list, range(len(title_font_list))):
if pt >= _value[0] and pt < _value[1]:
return index + 1

View File

@ -4536,7 +4536,7 @@ msgstr "修改知识库信息"
#: community/apps/dataset/views/document.py:463
#: community/apps/dataset/views/document.py:464
msgid "Get the knowledge base paginated list"
msgstr "获取知识库分页列表"
msgstr "获取知识库文档分页列表"
#: community/apps/dataset/views/document.py:31
#: community/apps/dataset/views/document.py:32

View File

@ -4545,7 +4545,7 @@ msgstr "修改知識庫信息"
#: community/apps/dataset/views/document.py:463
#: community/apps/dataset/views/document.py:464
msgid "Get the knowledge base paginated list"
msgstr "獲取知識庫分頁列表"
msgstr "獲取知識庫文档分頁列表"
#: community/apps/dataset/views/document.py:31
#: community/apps/dataset/views/document.py:32

View File

@ -106,7 +106,10 @@ class MaxKBBaseModel(ABC):
optional_params = {}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming', 'show_ref_label']:
optional_params[key] = value
if key == 'extra_body' and isinstance(value, dict):
optional_params = {**optional_params, **value}
else:
optional_params[key] = value
return optional_params

View File

@ -15,9 +15,8 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)
return chat_tong_yi

View File

@ -20,5 +20,5 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params
extra_body=optional_params
)

View File

@ -1,10 +1,12 @@
import os
import re
from typing import Dict
from typing import Dict, List
from botocore.config import Config
from langchain_community.chat_models import BedrockChat
from langchain_core.messages import BaseMessage, get_buffer_string
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -72,6 +74,20 @@ class BedrockModel(MaxKBBaseModel, BedrockChat):
config=config
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return super().get_num_tokens_from_messages(messages)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
def get_num_tokens(self, text: str) -> int:
try:
return super().get_num_tokens(text)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))
def _update_aws_credentials(profile_name, access_key_id, secret_access_key):
credentials_path = os.path.join(os.path.expanduser("~"), ".aws", "credentials")

View File

@ -1,15 +1,16 @@
# coding=utf-8
import warnings
from typing import List, Dict, Optional, Any, Iterator, cast, Type, Union
from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping
import openai
from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, AIMessageChunk
from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \
SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk
from langchain_core.messages.ai import UsageMetadata
from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk
from langchain_core.outputs import ChatGenerationChunk
from langchain_core.runnables import RunnableConfig, ensure_config
from langchain_core.utils.pydantic import is_basemodel_subclass
from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
from langchain_openai.chat_models.base import _create_usage_metadata
from common.config.tokenizer_manage_config import TokenizerManage
@ -19,6 +20,65 @@ def custom_get_token_ids(text: str):
return tokenizer.encode(text)
def _convert_delta_to_message_chunk(
_dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
) -> BaseMessageChunk:
id_ = _dict.get("id")
role = cast(str, _dict.get("role"))
content = cast(str, _dict.get("content") or "")
additional_kwargs: dict = {}
if 'reasoning_content' in _dict:
additional_kwargs['reasoning_content'] = _dict.get('reasoning_content')
if _dict.get("function_call"):
function_call = dict(_dict["function_call"])
if "name" in function_call and function_call["name"] is None:
function_call["name"] = ""
additional_kwargs["function_call"] = function_call
tool_call_chunks = []
if raw_tool_calls := _dict.get("tool_calls"):
additional_kwargs["tool_calls"] = raw_tool_calls
try:
tool_call_chunks = [
tool_call_chunk(
name=rtc["function"].get("name"),
args=rtc["function"].get("arguments"),
id=rtc.get("id"),
index=rtc["index"],
)
for rtc in raw_tool_calls
]
except KeyError:
pass
if role == "user" or default_class == HumanMessageChunk:
return HumanMessageChunk(content=content, id=id_)
elif role == "assistant" or default_class == AIMessageChunk:
return AIMessageChunk(
content=content,
additional_kwargs=additional_kwargs,
id=id_,
tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
)
elif role in ("system", "developer") or default_class == SystemMessageChunk:
if role == "developer":
additional_kwargs = {"__openai_role__": "developer"}
else:
additional_kwargs = {}
return SystemMessageChunk(
content=content, id=id_, additional_kwargs=additional_kwargs
)
elif role == "function" or default_class == FunctionMessageChunk:
return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
elif role == "tool" or default_class == ToolMessageChunk:
return ToolMessageChunk(
content=content, tool_call_id=_dict["tool_call_id"], id=id_
)
elif role or default_class == ChatMessageChunk:
return ChatMessageChunk(content=content, role=role, id=id_)
else:
return default_class(content=content, id=id_) # type: ignore
class BaseChatOpenAI(ChatOpenAI):
usage_metadata: dict = {}
custom_get_token_ids = custom_get_token_ids
@ -26,7 +86,13 @@ class BaseChatOpenAI(ChatOpenAI):
def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
return self.usage_metadata
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Optional[
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
] = None,
) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
try:
return super().get_num_tokens_from_messages(messages)
@ -44,114 +110,77 @@ class BaseChatOpenAI(ChatOpenAI):
return len(tokenizer.encode(text))
return self.get_last_generation_info().get('output_tokens', 0)
def _stream(
def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
kwargs['stream_usage'] = True
for chunk in super()._stream(*args, **kwargs):
if chunk.message.usage_metadata is not None:
self.usage_metadata = chunk.message.usage_metadata
yield chunk
def _convert_chunk_to_generation_chunk(
self,
messages: List[BaseMessage],
stop: Optional[List[str]] = None,
run_manager: Optional[CallbackManagerForLLMRun] = None,
**kwargs: Any,
) -> Iterator[ChatGenerationChunk]:
kwargs["stream"] = True
kwargs["stream_options"] = {"include_usage": True}
"""Set default stream_options."""
stream_usage = self._should_stream_usage(kwargs.get('stream_usage'), **kwargs)
# Note: stream_options is not a valid parameter for Azure OpenAI.
# To support users proxying Azure through ChatOpenAI, here we only specify
# stream_options if include_usage is set to True.
# See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
# for release notes.
if stream_usage:
kwargs["stream_options"] = {"include_usage": stream_usage}
chunk: dict,
default_chunk_class: type,
base_generation_info: Optional[dict],
) -> Optional[ChatGenerationChunk]:
if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
return None
token_usage = chunk.get("usage")
choices = (
chunk.get("choices", [])
# from beta.chat.completions.stream
or chunk.get("chunk", {}).get("choices", [])
)
payload = self._get_request_payload(messages, stop=stop, **kwargs)
default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
base_generation_info = {}
if "response_format" in payload and is_basemodel_subclass(
payload["response_format"]
):
# TODO: Add support for streaming with Pydantic response_format.
warnings.warn("Streaming with Pydantic response_format not yet supported.")
chat_result = self._generate(
messages, stop, run_manager=run_manager, **kwargs
usage_metadata: Optional[UsageMetadata] = (
_create_usage_metadata(token_usage) if token_usage else None
)
if len(choices) == 0:
# logprobs is implicitly None
generation_chunk = ChatGenerationChunk(
message=default_chunk_class(content="", usage_metadata=usage_metadata)
)
msg = chat_result.generations[0].message
yield ChatGenerationChunk(
message=AIMessageChunk(
**msg.dict(exclude={"type", "additional_kwargs"}),
# preserve the "parsed" Pydantic object without converting to dict
additional_kwargs=msg.additional_kwargs,
),
generation_info=chat_result.generations[0].generation_info,
)
return
if self.include_response_headers:
raw_response = self.client.with_raw_response.create(**payload)
response = raw_response.parse()
base_generation_info = {"headers": dict(raw_response.headers)}
else:
response = self.client.create(**payload)
with response:
is_first_chunk = True
for chunk in response:
if not isinstance(chunk, dict):
chunk = chunk.model_dump()
return generation_chunk
generation_chunk = super()._convert_chunk_to_generation_chunk(
chunk,
default_chunk_class,
base_generation_info if is_first_chunk else {},
)
if generation_chunk is None:
continue
choice = choices[0]
if choice["delta"] is None:
return None
# custom code
if len(chunk['choices']) > 0 and 'reasoning_content' in chunk['choices'][0]['delta']:
generation_chunk.message.additional_kwargs["reasoning_content"] = chunk['choices'][0]['delta'][
'reasoning_content']
message_chunk = _convert_delta_to_message_chunk(
choice["delta"], default_chunk_class
)
generation_info = {**base_generation_info} if base_generation_info else {}
default_chunk_class = generation_chunk.message.__class__
logprobs = (generation_chunk.generation_info or {}).get("logprobs")
if run_manager:
run_manager.on_llm_new_token(
generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
)
is_first_chunk = False
# custom code
if generation_chunk.message.usage_metadata is not None:
self.usage_metadata = generation_chunk.message.usage_metadata
yield generation_chunk
if finish_reason := choice.get("finish_reason"):
generation_info["finish_reason"] = finish_reason
if model_name := chunk.get("model"):
generation_info["model_name"] = model_name
if system_fingerprint := chunk.get("system_fingerprint"):
generation_info["system_fingerprint"] = system_fingerprint
def _create_chat_result(self,
response: Union[dict, openai.BaseModel],
generation_info: Optional[Dict] = None):
result = super()._create_chat_result(response, generation_info)
try:
reasoning_content = ''
reasoning_content_enable = False
for res in response.choices:
if 'reasoning_content' in res.message.model_extra:
reasoning_content_enable = True
_reasoning_content = res.message.model_extra.get('reasoning_content')
if _reasoning_content is not None:
reasoning_content += _reasoning_content
if reasoning_content_enable:
result.llm_output['reasoning_content'] = reasoning_content
except Exception as e:
pass
return result
logprobs = choice.get("logprobs")
if logprobs:
generation_info["logprobs"] = logprobs
if usage_metadata and isinstance(message_chunk, AIMessageChunk):
message_chunk.usage_metadata = usage_metadata
generation_chunk = ChatGenerationChunk(
message=message_chunk, generation_info=generation_info or None
)
return generation_chunk
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
stop: Optional[List[str]] = None,
stop: Optional[list[str]] = None,
**kwargs: Any,
) -> BaseMessage:
config = ensure_config(config)
chat_result = cast(
ChatGeneration,
"ChatGeneration",
self.generate_prompt(
[self._convert_input(input)],
stop=stop,
@ -162,7 +191,9 @@ class BaseChatOpenAI(ChatOpenAI):
run_id=config.pop("run_id", None),
**kwargs,
).generations[0][0],
).message
self.usage_metadata = chat_result.response_metadata[
'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata
return chat_result

View File

@ -26,6 +26,6 @@ class DeepSeekChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base='https://api.deepseek.com',
openai_api_key=model_credential.get('api_key'),
**optional_params
extra_body=optional_params
)
return deepseek_chat_open_ai

View File

@ -21,11 +21,10 @@ class KimiChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
kimi_chat_open_ai = KimiChatModel(
openai_api_base=model_credential['api_base'],
openai_api_key=model_credential['api_key'],
model_name=model_name,
**optional_params
extra_body=optional_params,
)
return kimi_chat_open_ai

View File

@ -28,5 +28,5 @@ class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)

View File

@ -16,5 +16,5 @@ class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)

View File

@ -9,7 +9,6 @@
from typing import List, Dict
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_openai.chat_models import ChatOpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -35,9 +34,9 @@ class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
streaming = False
azure_chat_open_ai = OpenAIChatModel(
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params,
base_url=model_credential.get('api_base'),
api_key=model_credential.get('api_key'),
extra_body=optional_params,
streaming=streaming,
custom_get_token_ids=custom_get_token_ids
)

View File

@ -18,9 +18,8 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)
return chat_tong_yi

View File

@ -26,6 +26,6 @@ class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)
return chat_tong_yi

View File

@ -16,5 +16,5 @@ class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)

View File

@ -34,5 +34,5 @@ class SiliconCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params
extra_body=optional_params
)

View File

@ -33,21 +33,7 @@ class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params,
extra_body=optional_params,
custom_get_token_ids=custom_get_token_ids
)
return azure_chat_open_ai
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
try:
return super().get_num_tokens_from_messages(messages)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
def get_num_tokens(self, text: str) -> int:
try:
return super().get_num_tokens(text)
except Exception as e:
tokenizer = TokenizerManage.get_tokenizer()
return len(tokenizer.encode(text))

View File

@ -16,5 +16,5 @@ class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)

View File

@ -19,7 +19,7 @@ class VllmImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)
def is_cache_model(self):

View File

@ -1,9 +1,10 @@
# coding=utf-8
from typing import Dict, List
from typing import Dict, Optional, Sequence, Union, Any, Callable
from urllib.parse import urlparse, ParseResult
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.tools import BaseTool
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@ -31,13 +32,19 @@ class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params,
streaming=True,
stream_usage=True,
extra_body=optional_params
)
return vllm_chat_open_ai
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
def get_num_tokens_from_messages(
self,
messages: list[BaseMessage],
tools: Optional[
Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
] = None,
) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])

View File

@ -16,5 +16,5 @@ class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)

View File

@ -17,5 +17,5 @@ class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
**optional_params
extra_body=optional_params
)

View File

@ -27,7 +27,7 @@ class WenxinLLMModelParams(BaseForm):
_step=0.01,
precision=2)
max_tokens = forms.SliderField(
max_output_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=1024,

View File

@ -19,7 +19,7 @@ class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:

View File

@ -34,7 +34,7 @@ class XinferenceChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=base_url,
openai_api_key=model_credential.get('api_key'),
**optional_params
extra_body=optional_params
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:

View File

@ -16,5 +16,5 @@ class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
extra_body=optional_params
)

View File

@ -8,7 +8,7 @@ package-mode = false
[tool.poetry.dependencies]
python = ">=3.11,<3.12"
django = "4.2.18"
django = "4.2.20"
djangorestframework = "^3.15.2"
drf-yasg = "1.21.7"
django-filter = "23.2"

View File

@ -72,6 +72,7 @@ interface chatType {
document_list: Array<any>
image_list: Array<any>
audio_list: Array<any>
other_list: Array<any>
}
}

View File

@ -125,6 +125,28 @@
</template>
</el-space>
</div>
<div v-if="item.other_list?.length > 0">
<p class="mb-8 color-secondary">
{{ $t('common.fileUpload.document') }}:
</p>
<el-space wrap>
<template v-for="(f, i) in item.other_list" :key="i">
<el-card
shadow="never"
style="--el-card-padding: 8px"
class="file cursor"
>
<div class="flex align-center">
<img :src="getImgUrl(f && f?.name)" alt="" width="24" />
<div class="ml-4 ellipsis" :title="f && f?.name">
{{ f && f?.name }}
</div>
</div>
</el-card>
</template>
</el-space>
</div>
</div>
</div>
</template>

View File

@ -80,7 +80,7 @@ const props = defineProps<{
chatRecord: chatType
application: any
loading: boolean
sendMessage: (question: string, other_params_data?: any, chat?: chatType) => void
sendMessage: (question: string, other_params_data?: any, chat?: chatType) => Promise<boolean>
chatManagement: any
type: 'log' | 'ai-chat' | 'debug-ai-chat'
}>()
@ -98,9 +98,10 @@ const showUserAvatar = computed(() => {
const chatMessage = (question: string, type: 'old' | 'new', other_params_data?: any) => {
if (type === 'old') {
add_answer_text_list(props.chatRecord.answer_text_list)
props.sendMessage(question, other_params_data, props.chatRecord)
props.chatManagement.open(props.chatRecord.id)
props.chatManagement.write(props.chatRecord.id)
props.sendMessage(question, other_params_data, props.chatRecord).then(() => {
props.chatManagement.open(props.chatRecord.id)
props.chatManagement.write(props.chatRecord.id)
})
} else {
props.sendMessage(question, other_params_data)
}

View File

@ -10,7 +10,8 @@
uploadDocumentList.length ||
uploadImageList.length ||
uploadAudioList.length ||
uploadVideoList.length
uploadVideoList.length ||
uploadOtherList.length
"
>
<el-row :gutter="10">
@ -30,22 +31,62 @@
class="file cursor"
>
<div
class="flex align-center"
class="flex-between align-center"
@mouseenter.stop="mouseenter(item)"
@mouseleave.stop="mouseleave()"
>
<div class="flex align-center">
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
<div class="ml-4 ellipsis-1" :title="item && item?.name">
{{ item && item?.name }}
</div>
</div>
<div
@click="deleteFile(index, 'document')"
class="delete-icon color-secondary"
v-if="showDelete === item.url"
>
<el-icon>
<el-icon style="font-size: 16px; top: 2px">
<CircleCloseFilled />
</el-icon>
</div>
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
<div class="ml-4 ellipsis-1" :title="item && item?.name">
{{ item && item?.name }}
</div>
</el-card>
</el-col>
<el-col
v-for="(item, index) in uploadOtherList"
:key="index"
:xs="24"
:sm="props.type === 'debug-ai-chat' ? 24 : 12"
:md="props.type === 'debug-ai-chat' ? 24 : 12"
:lg="props.type === 'debug-ai-chat' ? 24 : 12"
:xl="props.type === 'debug-ai-chat' ? 24 : 12"
class="mb-8"
>
<el-card
shadow="never"
style="--el-card-padding: 8px; max-width: 100%"
class="file cursor"
>
<div
class="flex-between align-center"
@mouseenter.stop="mouseenter(item)"
@mouseleave.stop="mouseleave()"
>
<div class="flex align-center">
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
<div class="ml-4 ellipsis-1" :title="item && item?.name">
{{ item && item?.name }}
</div>
</div>
<div
@click="deleteFile(index, 'other')"
class="delete-icon color-secondary"
v-if="showDelete === item.url"
>
<el-icon style="font-size: 16px; top: 2px">
<CircleCloseFilled />
</el-icon>
</div>
</div>
</el-card>
@ -63,23 +104,25 @@
>
<el-card shadow="never" style="--el-card-padding: 8px" class="file cursor">
<div
class="flex align-center"
class="flex-between align-center"
@mouseenter.stop="mouseenter(item)"
@mouseleave.stop="mouseleave()"
>
<div class="flex align-center">
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
<div class="ml-4 ellipsis-1" :title="item && item?.name">
{{ item && item?.name }}
</div>
</div>
<div
@click="deleteFile(index, 'audio')"
class="delete-icon color-secondary"
v-if="showDelete === item.url"
>
<el-icon>
<el-icon style="font-size: 16px; top: 2px">
<CircleCloseFilled />
</el-icon>
</div>
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
<div class="ml-4 ellipsis-1" :title="item && item?.name">
{{ item && item?.name }}
</div>
</div>
</el-card>
</el-col>
@ -87,7 +130,7 @@
<el-space wrap>
<template v-for="(item, index) in uploadImageList" :key="index">
<div
class="file cursor border border-r-4"
class="file file-image cursor border border-r-4"
v-if="item.url"
@mouseenter.stop="mouseenter(item)"
@mouseleave.stop="mouseleave()"
@ -97,7 +140,7 @@
class="delete-icon color-secondary"
v-if="showDelete === item.url"
>
<el-icon>
<el-icon style="font-size: 16px; top: 2px">
<CircleCloseFilled />
</el-icon>
</div>
@ -137,6 +180,8 @@
type="textarea"
:maxlength="100000"
@keydown.enter="sendChatHandle($event)"
@paste="handlePaste"
@drop="handleDrop"
/>
<div class="operate flex align-center">
@ -188,6 +233,7 @@
:show-file-list="false"
:accept="getAcceptList()"
:on-change="(file: any, fileList: any) => uploadFile(file, fileList)"
ref="upload"
>
<el-tooltip
:disabled="mode === 'mobile'"
@ -301,13 +347,16 @@ const localLoading = computed({
}
})
const imageExtensions = ['jpg', 'jpeg', 'png', 'gif', 'bmp']
const documentExtensions = ['pdf', 'docx', 'txt', 'xls', 'xlsx', 'md', 'html', 'csv']
const videoExtensions = ['mp4', 'avi', 'mov', 'mkv', 'flv']
const audioExtensions = ['mp3', 'wav', 'ogg', 'aac', 'm4a']
const upload = ref()
const imageExtensions = ['JPG', 'JPEG', 'PNG', 'GIF', 'BMP']
const documentExtensions = ['PDF', 'DOCX', 'TXT', 'XLS', 'XLSX', 'MD', 'HTML', 'CSV']
const videoExtensions: any = []
const audioExtensions = ['MP3', 'WAV', 'OGG', 'AAC', 'M4A']
let otherExtensions = ['PPT', 'DOC']
const getAcceptList = () => {
const { image, document, audio, video } = props.applicationDetails.file_upload_setting
const { image, document, audio, video, other } = props.applicationDetails.file_upload_setting
let accepts: any = []
if (image) {
accepts = [...imageExtensions]
@ -321,6 +370,11 @@ const getAcceptList = () => {
if (video) {
accepts = [...accepts, ...videoExtensions]
}
if (other) {
//
otherExtensions = props.applicationDetails.file_upload_setting.otherExtensions
accepts = [...accepts, ...otherExtensions]
}
if (accepts.length === 0) {
return `.${t('chat.uploadFile.tipMessage')}`
@ -334,7 +388,8 @@ const checkMaxFilesLimit = () => {
uploadImageList.value.length +
uploadDocumentList.value.length +
uploadAudioList.value.length +
uploadVideoList.value.length
uploadVideoList.value.length +
uploadOtherList.value.length
)
}
@ -345,7 +400,8 @@ const uploadFile = async (file: any, fileList: any) => {
uploadImageList.value.length +
uploadDocumentList.value.length +
uploadAudioList.value.length +
uploadVideoList.value.length
uploadVideoList.value.length +
uploadOtherList.value.length
if (file_limit_once >= maxFiles) {
MsgWarning(t('chat.uploadFile.limitMessage1') + maxFiles + t('chat.uploadFile.limitMessage2'))
fileList.splice(0, fileList.length)
@ -361,7 +417,7 @@ const uploadFile = async (file: any, fileList: any) => {
const formData = new FormData()
formData.append('file', file.raw, file.name)
//
const extension = file.name.split('.').pop().toLowerCase() //
const extension = file.name.split('.').pop().toUpperCase() //
if (imageExtensions.includes(extension)) {
uploadImageList.value.push(file)
@ -371,6 +427,8 @@ const uploadFile = async (file: any, fileList: any) => {
uploadVideoList.value.push(file)
} else if (audioExtensions.includes(extension)) {
uploadAudioList.value.push(file)
} else if (otherExtensions.includes(extension)) {
uploadOtherList.value.push(file)
}
if (!chatId_context.value) {
@ -429,11 +487,68 @@ const uploadFile = async (file: any, fileList: any) => {
file.file_id = f[0].file_id
}
})
uploadOtherList.value.forEach((file: any) => {
const f = response.data.filter(
(f: any) => f.name.replaceAll(' ', '') === file.name.replaceAll(' ', '')
)
if (f.length > 0) {
file.url = f[0].url
file.file_id = f[0].file_id
}
})
if (!inputValue.value && uploadImageList.value.length > 0) {
inputValue.value = t('chat.uploadFile.imageMessage')
}
})
}
//
const handlePaste = (event: ClipboardEvent) => {
if (!props.applicationDetails.file_upload_enable) return
const clipboardData = event.clipboardData
if (!clipboardData) return
//
const files = clipboardData.files
if (files.length === 0) return
// FileList
Array.from(files).forEach((rawFile: File) => {
// el-upload
const elFile = {
uid: Date.now(), // ID
name: rawFile.name,
size: rawFile.size,
raw: rawFile, //
status: 'ready', //
percentage: 0 //
}
// on-change
uploadFile(elFile, [elFile])
})
//
event.preventDefault()
}
//
const handleDrop = (event: DragEvent) => {
if (!props.applicationDetails.file_upload_enable) return
event.preventDefault()
const files = event.dataTransfer?.files
if (!files) return
Array.from(files).forEach((rawFile) => {
const elFile = {
uid: Date.now(),
name: rawFile.name,
size: rawFile.size,
raw: rawFile,
status: 'ready',
percentage: 0
}
uploadFile(elFile, [elFile])
})
}
// id
const intervalId = ref<any | null>(null)
//
@ -446,6 +561,7 @@ const uploadImageList = ref<Array<any>>([])
const uploadDocumentList = ref<Array<any>>([])
const uploadVideoList = ref<Array<any>>([])
const uploadAudioList = ref<Array<any>>([])
const uploadOtherList = ref<Array<any>>([])
const showDelete = ref('')
@ -578,7 +694,9 @@ const uploadRecording = async (audioBlob: Blob) => {
recorderStatus.value = 'TRANSCRIBING'
const formData = new FormData()
formData.append('file', audioBlob, 'recording.mp3')
bus.emit('on:transcribing', true)
if (props.applicationDetails.stt_autosend) {
bus.emit('on:transcribing', true)
}
applicationApi
.postSpeechToText(props.applicationDetails.id as string, formData, localLoading)
.then((response) => {
@ -656,13 +774,15 @@ function autoSendMessage() {
image_list: uploadImageList.value,
document_list: uploadDocumentList.value,
audio_list: uploadAudioList.value,
video_list: uploadVideoList.value
video_list: uploadVideoList.value,
other_list: uploadOtherList.value
})
inputValue.value = ''
uploadImageList.value = []
uploadDocumentList.value = []
uploadAudioList.value = []
uploadVideoList.value = []
uploadOtherList.value = []
if (quickInputRef.value) {
quickInputRef.value.textareaStyle.height = '45px'
}
@ -718,6 +838,8 @@ function deleteFile(index: number, val: string) {
uploadVideoList.value.splice(index, 1)
} else if (val === 'audio') {
uploadAudioList.value.splice(index, 1)
} else if (val === 'other') {
uploadOtherList.value.splice(index, 1)
}
}
@ -823,7 +945,7 @@ onMounted(() => {
}
}
}
.file {
.file-image {
position: relative;
overflow: inherit;

View File

@ -1,10 +1,7 @@
<template>
<!-- 问题内容 -->
<div class="question-content item-content mb-16 lighter">
<div
class="content p-12-16 border-r-8"
:class="document_list.length >= 2 ? 'media_2' : `media_${document_list.length}`"
>
<div class="content p-12-16 border-r-8" :class="getClassName">
<div class="text break-all pre-wrap">
<div class="mb-8" v-if="document_list.length">
<el-space wrap class="w-full media-file-width">
@ -60,6 +57,26 @@
</template>
</el-space>
</div>
<div class="mb-8" v-if="other_list.length">
<el-space wrap class="w-full media-file-width">
<template v-for="(item, index) in other_list" :key="index">
<el-card shadow="never" style="--el-card-padding: 8px" class="download-file cursor">
<div class="download-button flex align-center" @click="downloadFile(item)">
<el-icon class="mr-4">
<Download />
</el-icon>
{{ $t('chat.download') }}
</div>
<div class="show flex align-center">
<img :src="getImgUrl(item && item?.name)" alt="" width="24" />
<div class="ml-4 ellipsis-1" :title="item && item?.name">
{{ item && item?.name }}
</div>
</div>
</el-card>
</template>
</el-space>
</div>
<span> {{ chatRecord.problem_text }}</span>
</div>
</div>
@ -121,7 +138,24 @@ const audio_list = computed(() => {
)
return startNode?.audio_list || []
})
const other_list = computed(() => {
if (props.chatRecord?.upload_meta) {
return props.chatRecord.upload_meta?.other_list || []
}
const startNode = props.chatRecord.execution_details?.find(
(detail) => detail.type === 'start-node'
)
return startNode?.other_list || []
})
const getClassName = computed(() => {
return document_list.value.length >= 2 || other_list.value.length >= 2
? 'media_2'
: document_list.value.length
? `media_${document_list.value.length}`
: other_list.value.length
? `media_${other_list.value.length}`
: `media_0`
})
function downloadFile(item: any) {
downloadByURL(item.url, item.name)
}

View File

@ -162,7 +162,10 @@ const initialApiFormData = ref({})
const isUserInput = computed(
() =>
props.applicationDetails.work_flow?.nodes?.filter((v: any) => v.id === 'base-node')[0]
.properties.user_input_field_list.length > 0
.properties.user_input_field_list.length > 0 ||
(props.type === 'debug-ai-chat' &&
props.applicationDetails.work_flow?.nodes?.filter((v: any) => v.id === 'base-node')[0]
.properties.api_input_field_list.length > 0)
)
const showUserInputContent = computed(() => {
return ((isUserInput.value && firsUserInput.value) || showUserInput.value) && props.type !== 'log'
@ -224,33 +227,41 @@ const validate = () => {
return userFormRef.value?.validate() || Promise.reject(false)
}
function sendMessage(val: string, other_params_data?: any, chat?: chatType) {
function sendMessage(val: string, other_params_data?: any, chat?: chatType): Promise<boolean> {
if (isUserInput.value) {
userFormRef.value
?.validate()
.then((ok) => {
let userFormData = JSON.parse(localStorage.getItem(`${accessToken}userForm`) || '{}')
const newData = Object.keys(form_data.value).reduce((result: any, key: string) => {
result[key] = Object.prototype.hasOwnProperty.call(userFormData, key)
? userFormData[key]
: form_data.value[key]
return result
}, {})
localStorage.setItem(`${accessToken}userForm`, JSON.stringify(newData))
showUserInput.value = false
if (!loading.value && props.applicationDetails?.name) {
handleDebounceClick(val, other_params_data, chat)
}
})
.catch((e) => {
showUserInput.value = true
return
})
if (userFormRef.value) {
return userFormRef.value
?.validate()
.then((ok) => {
let userFormData = JSON.parse(localStorage.getItem(`${accessToken}userForm`) || '{}')
const newData = Object.keys(form_data.value).reduce((result: any, key: string) => {
result[key] = Object.prototype.hasOwnProperty.call(userFormData, key)
? userFormData[key]
: form_data.value[key]
return result
}, {})
localStorage.setItem(`${accessToken}userForm`, JSON.stringify(newData))
showUserInput.value = false
if (!loading.value && props.applicationDetails?.name) {
handleDebounceClick(val, other_params_data, chat)
return true
}
throw 'err: no send'
})
.catch((e) => {
showUserInput.value = true
return false
})
} else {
return Promise.reject(false)
}
} else {
showUserInput.value = false
if (!loading.value && props.applicationDetails?.name) {
handleDebounceClick(val, other_params_data, chat)
return Promise.resolve(true)
}
return Promise.reject(false)
}
}
@ -416,7 +427,9 @@ function chatMessage(chat?: any, problem?: string, re_chat?: boolean, other_para
? other_params_data.document_list
: [],
audio_list:
other_params_data && other_params_data.audio_list ? other_params_data.audio_list : []
other_params_data && other_params_data.audio_list ? other_params_data.audio_list : [],
other_list:
other_params_data && other_params_data.other_list ? other_params_data.other_list : []
}
})
chatList.value.push(chat)

View File

@ -6,10 +6,10 @@
<div
v-if="item.type === 'question'"
@click="sendMessage ? sendMessage(item.content, 'new') : (content: string) => {}"
class="problem-button ellipsis-2 mt-4 mb-4"
class="problem-button mt-4 mb-4 flex"
:class="sendMessage ? 'cursor' : 'disabled'"
>
<el-icon>
<el-icon class="mr-8" style="margin-top: 2px;">
<EditPen />
</el-icon>
{{ item.content }}
@ -234,12 +234,9 @@ const split_form_rander_ = (source: string, type: string) => {
border: none;
border-radius: 8px;
background: var(--app-layout-bg-color);
height: 46px;
padding: 0 12px;
line-height: 46px;
padding: 12px;
box-sizing: border-box;
color: var(--el-text-color-regular);
-webkit-line-clamp: 1;
word-break: break-all;
&:hover {

View File

@ -45,7 +45,10 @@ export default {
document: 'Documents',
image: 'Image',
audio: 'Audio',
video: 'Video'
video: 'Video',
other: 'Other',
addExtensions: 'Add suffix',
existingExtensionsTip: 'File suffix already exists',
},
status: {
label: 'Status',
@ -55,7 +58,7 @@ export default {
param: {
outputParam: 'Output Parameters',
inputParam: 'Input Parameters',
initParam: 'Startup Parameters',
initParam: 'Startup Parameters'
},
inputPlaceholder: 'Please input',

View File

@ -104,7 +104,8 @@ export default {
label: 'File types allowed for upload',
documentText: 'Requires "Document Content Extraction" node to parse document content',
imageText: 'Requires "Image Understanding" node to parse image content',
audioText: 'Requires "Speech-to-Text" node to parse audio content'
audioText: 'Requires "Speech-to-Text" node to parse audio content',
otherText: 'Need to parse this type of file by yourself'
}
}
},

View File

@ -45,7 +45,10 @@ export default {
document: '文档',
image: '图片',
audio: '音频',
video: '视频'
video: '视频',
other: '其他文件',
addExtensions: '添加后缀名',
existingExtensionsTip: '文件后缀已存在',
},
status: {
label: '状态',

View File

@ -105,8 +105,10 @@ export default {
label: '上传的文件类型',
documentText: '需要使用“文档内容提取”节点解析文档内容',
imageText: '需要使用“视觉模型”节点解析图片内容',
audioText: '需要使用“语音转文本”节点解析音频内容'
}
audioText: '需要使用“语音转文本”节点解析音频内容',
otherText: '需要自行解析该类型文件'
},
}
},
aiChatNode: {

View File

@ -45,7 +45,10 @@ export default {
document: '文檔',
image: '圖片',
audio: '音頻',
video: '視頻'
video: '視頻',
other: '其他文件',
addExtensions: '添加後綴名',
existingExtensionsTip: '文件後綴已存在',
},
status: {
label: '狀態',

View File

@ -105,7 +105,8 @@ export default {
label: '上傳的文件類型',
documentText: '需要使用「文檔內容提取」節點解析文檔內容',
imageText: '需要使用「圖片理解」節點解析圖片內容',
audioText: '需要使用「語音轉文本」節點解析音頻內容'
audioText: '需要使用「語音轉文本」節點解析音頻內容',
otherText: '需要自行解析該類型文件'
}
}
},

View File

@ -65,7 +65,7 @@ const useUserStore = defineStore({
if (token) {
return token
}
const local_token = localStorage.getItem(`${token}-accessToken`)
const local_token = localStorage.getItem(`${this.userAccessToken}-accessToken`)
if (local_token) {
return local_token
}

View File

@ -61,6 +61,15 @@
show-password
/>
</el-form-item>
<el-form-item
:label="$t('views.system.authentication.oauth2.filedMapping')"
prop="config_data.fieldMapping"
>
<el-input
v-model="form.config_data.fieldMapping"
:placeholder="$t('views.system.authentication.oauth2.filedMappingPlaceholder')"
/>
</el-form-item>
<el-form-item
:label="$t('views.system.authentication.oidc.redirectUrl')"
prop="config_data.redirectUrl"
@ -104,6 +113,7 @@ const form = ref<any>({
state: '',
clientId: '',
clientSecret: '',
fieldMapping: '{"username": "preferred_username", "email": "email"}',
redirectUrl: ''
},
is_active: true
@ -156,6 +166,13 @@ const rules = reactive<FormRules<any>>({
trigger: 'blur'
}
],
'config_data.fieldMapping': [
{
required: true,
message: t('views.system.authentication.oauth2.filedMappingPlaceholder'),
trigger: 'blur'
}
],
'config_data.redirectUrl': [
{
required: true,
@ -187,6 +204,12 @@ function getDetail() {
authApi.getAuthSetting(form.value.auth_type, loading).then((res: any) => {
if (res.data && JSON.stringify(res.data) !== '{}') {
form.value = res.data
if (
form.value.config_data.fieldMapping === '' ||
form.value.config_data.fieldMapping === undefined
) {
form.value.config_data.fieldMapping = '{"username": "preferred_username", "email": "email"}'
}
}
})
}

View File

@ -1,6 +1,10 @@
<template>
<login-layout>
<LoginContainer :subTitle="$t('views.system.theme.defaultSlogan')">
<login-layout v-if="!loading" v-loading="loading || sendLoading">
<LoginContainer
:subTitle="
user.themeInfo?.slogan ? user.themeInfo?.slogan : $t('views.system.theme.defaultSlogan')
"
>
<h2 class="mb-24">{{ $t('views.login.forgotPassword') }}</h2>
<el-form
class="register-form"
@ -41,15 +45,15 @@
isDisabled
? `${$t('views.login.verificationCode.resend')}${time}s`
: $t('views.login.verificationCode.getVerificationCode')
}}</el-button
>
}}
</el-button>
</div>
</el-form-item>
</div>
</el-form>
<el-button size="large" type="primary" class="w-full" @click="checkCode">{{
$t('views.login.buttons.checkCode')
}}</el-button>
<el-button size="large" type="primary" class="w-full" @click="checkCode"
>{{ $t('views.login.buttons.checkCode') }}
</el-button>
<div class="operate-container mt-12">
<el-button
class="register"
@ -65,14 +69,18 @@
</login-layout>
</template>
<script setup lang="ts">
import { ref } from 'vue'
import { onBeforeMount, ref } from 'vue'
import type { CheckCodeRequest } from '@/api/type/user'
import { useRouter } from 'vue-router'
import type { FormInstance, FormRules } from 'element-plus'
import UserApi from '@/api/user'
import { MsgSuccess } from '@/utils/message'
import { t } from '@/locales'
import useStore from '@/stores'
const router = useRouter()
const { user } = useStore()
const CheckEmailForm = ref<CheckCodeRequest>({
email: '',
code: '',
@ -104,11 +112,11 @@ const rules = ref<FormRules<CheckCodeRequest>>({
const loading = ref<boolean>(false)
const isDisabled = ref<boolean>(false)
const time = ref<number>(60)
const sendLoading = ref<boolean>(false)
const checkCode = () => {
resetPasswordFormRef.value
?.validate()
.then(() => UserApi.checkCode(CheckEmailForm.value, loading))
.then(() => UserApi.checkCode(CheckEmailForm.value, sendLoading))
.then(() => router.push({ name: 'reset_password', params: CheckEmailForm.value }))
}
/**
@ -117,7 +125,7 @@ const checkCode = () => {
const sendEmail = () => {
resetPasswordFormRef.value?.validateField('email', (v: boolean) => {
if (v) {
UserApi.sendEmit(CheckEmailForm.value.email, 'reset_password', loading).then(() => {
UserApi.sendEmit(CheckEmailForm.value.email, 'reset_password', sendLoading).then(() => {
MsgSuccess(t('views.login.verificationCode.successMessage'))
isDisabled.value = true
handleTimeChange()
@ -136,5 +144,11 @@ const handleTimeChange = () => {
}, 1000)
}
}
onBeforeMount(() => {
loading.value = true
user.asyncGetProfile().then(() => {
loading.value = false
})
})
</script>
<style lang="scss" scoped></style>

View File

@ -1,6 +1,10 @@
<template>
<login-layout>
<LoginContainer :subTitle="$t('views.system.theme.defaultSlogan')">
<login-layout v-if="!loading" v-loading="loading || sendLoading">
<LoginContainer
:subTitle="
user.themeInfo?.slogan ? user.themeInfo?.slogan : $t('views.system.theme.defaultSlogan')
"
>
<h2 class="mb-24">{{ $t('views.login.resetPassword') }}</h2>
<el-form
class="reset-password-form"
@ -35,9 +39,9 @@
</el-form-item>
</div>
</el-form>
<el-button size="large" type="primary" class="w-full" @click="resetPassword">{{
$t('common.confirm')
}}</el-button>
<el-button size="large" type="primary" class="w-full" @click="resetPassword"
>{{ $t('common.confirm') }}
</el-button>
<div class="operate-container mt-12">
<el-button
size="large"
@ -54,13 +58,16 @@
</login-layout>
</template>
<script setup lang="ts">
import { ref, onMounted } from 'vue'
import { ref, onMounted, onBeforeMount } from 'vue'
import type { ResetPasswordRequest } from '@/api/type/user'
import { useRouter, useRoute } from 'vue-router'
import { MsgSuccess } from '@/utils/message'
import type { FormInstance, FormRules } from 'element-plus'
import UserApi from '@/api/user'
import { t } from '@/locales'
import useStore from '@/stores'
const { user } = useStore()
const router = useRouter()
const route = useRoute()
const {
@ -81,7 +88,12 @@ onMounted(() => {
router.push('forgot_password')
}
})
onBeforeMount(() => {
loading.value = true
user.asyncGetProfile().then(() => {
loading.value = false
})
})
const rules = ref<FormRules<ResetPasswordRequest>>({
password: [
{
@ -122,10 +134,11 @@ const rules = ref<FormRules<ResetPasswordRequest>>({
})
const resetPasswordFormRef = ref<FormInstance>()
const loading = ref<boolean>(false)
const sendLoading = ref<boolean>(false)
const resetPassword = () => {
resetPasswordFormRef.value
?.validate()
.then(() => UserApi.resetPassword(resetPasswordForm.value, loading))
.then(() => UserApi.resetPassword(resetPasswordForm.value, sendLoading))
.then(() => {
MsgSuccess(t('common.modifySuccess'))
router.push({ name: 'login' })

View File

@ -238,41 +238,49 @@ const update_field = () => {
const new_user_input_field_list = cloneDeep(
ok.data.work_flow.nodes[0].properties.user_input_field_list
)
const merge_api_input_field_list = new_api_input_field_list.map((item: any) => {
const find_field = old_api_input_field_list.find(
(old_item: any) => old_item.variable == item.variable
)
if (find_field) {
return {
...item,
value: find_field.value,
label:
typeof item.label === 'object' && item.label != null ? item.label.label : item.label
const merge_api_input_field_list =
new_api_input_field_list ||
[].map((item: any) => {
const find_field = old_api_input_field_list.find(
(old_item: any) => old_item.variable == item.variable
)
if (find_field) {
return {
...item,
value: find_field.value,
label:
typeof item.label === 'object' && item.label != null
? item.label.label
: item.label
}
} else {
return item
}
} else {
return item
}
})
})
set(
props.nodeModel.properties.node_data,
'api_input_field_list',
merge_api_input_field_list
)
const merge_user_input_field_list = new_user_input_field_list.map((item: any) => {
const find_field = old_user_input_field_list.find(
(old_item: any) => old_item.field == item.field
)
if (find_field) {
return {
...item,
value: find_field.value,
label:
typeof item.label === 'object' && item.label != null ? item.label.label : item.label
const merge_user_input_field_list =
new_user_input_field_list ||
[].map((item: any) => {
const find_field = old_user_input_field_list.find(
(old_item: any) => old_item.field == item.field
)
if (find_field) {
return {
...item,
value: find_field.value,
label:
typeof item.label === 'object' && item.label != null
? item.label.label
: item.label
}
} else {
return item
}
} else {
return item
}
})
})
set(
props.nodeModel.properties.node_data,
'user_input_field_list',
@ -294,6 +302,7 @@ const update_field = () => {
}
})
.catch((err) => {
console.log(err)
set(props.nodeModel.properties, 'status', 500)
})
}

View File

@ -7,7 +7,7 @@
:destroy-on-close="true"
:before-close="close"
append-to-body
width="600"
width="800"
>
<el-form
label-position="top"
@ -54,13 +54,16 @@
<img class="mr-12" src="@/assets/icon_file-doc.svg" alt="" />
<div>
<p class="line-height-22 mt-4">
{{ $t('common.fileUpload.document') }}TXTMDDOCXHTMLCSVXLSXXLSPDF
{{ $t('common.fileUpload.document') }}
<el-text class="color-secondary"
>{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.documentText'
)
}}
</el-text>
</p>
<el-text class="color-secondary">{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.documentText'
)
}}</el-text>
<p>{{ documentExtensions.join('、') }}</p>
</div>
</div>
<el-checkbox
@ -81,13 +84,16 @@
<img class="mr-12" src="@/assets/icon_file-image.svg" alt="" />
<div>
<p class="line-height-22 mt-4">
{{ $t('common.fileUpload.image') }}JPGJPEGPNGGIF
{{ $t('common.fileUpload.image') }}
<el-text class="color-secondary"
>{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.imageText'
)
}}
</el-text>
</p>
<el-text class="color-secondary">{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.imageText'
)
}}</el-text>
<p>{{ imageExtensions.join('、') }}</p>
</div>
</div>
<el-checkbox v-model="form_data.image" @change="form_data.image = !form_data.image" />
@ -106,18 +112,76 @@
<img class="mr-12" src="@/assets/icon_file-audio.svg" alt="" />
<div>
<p class="line-height-22 mt-4">
{{ $t('common.fileUpload.audio') }}MP3WAVOGGACCM4A
{{ $t('common.fileUpload.audio') }}
<el-text class="color-secondary"
>{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.audioText'
)
}}
</el-text>
</p>
<el-text class="color-secondary">{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.audioText'
)
}}</el-text>
<p>{{ audioExtensions.join('、') }}</p>
</div>
</div>
<el-checkbox v-model="form_data.audio" @change="form_data.audio = !form_data.audio" />
</div>
</el-card>
<el-card
shadow="hover"
class="card-checkbox cursor w-full mb-8"
:class="form_data.other ? 'active' : ''"
style="--el-card-padding: 8px 16px"
@click.stop="form_data.other = !form_data.other"
>
<div class="flex-between">
<div class="flex align-center">
<img class="mr-12" :width="32" src="@/assets/fileType/unknown-icon.svg" alt="" />
<div>
<p class="line-height-22 mt-4">
{{ $t('common.fileUpload.other') }}
<el-text class="color-secondary"
>{{
$t(
'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.otherText'
)
}}
</el-text>
</p>
<el-space wrap :size="2" class="mt-4">
<el-tag
v-for="tag in form_data.otherExtensions"
:key="tag"
closable
:disable-transitions="false"
@close="handleClose(tag)"
type="info"
class="mr-4"
effect="plain"
style="
--el-tag-border-radius: 4px;
--el-tag-border-color: var(--el-border-color);
"
>
{{ tag }}
</el-tag>
<el-input
v-if="inputVisible"
ref="InputRef"
v-model="inputValue"
size="small"
@keyup.enter="handleInputConfirm"
@blur="handleInputConfirm"
/>
<el-button v-else class="button-new-tag" size="small" @click.stop="showInput">
+ {{ $t('common.fileUpload.addExtensions') }}
</el-button>
</el-space>
</div>
</div>
<el-checkbox v-model="form_data.other" @change="form_data.other = !form_data.other" />
</div>
</el-card>
</el-form-item>
</el-form>
<template #footer>
@ -133,20 +197,34 @@
<script setup lang="ts">
import { nextTick, ref } from 'vue'
import type { InputInstance } from 'element-plus'
import { cloneDeep } from 'lodash'
import { MsgWarning } from '@/utils/message'
import { t } from '@/locales'
const emit = defineEmits(['refresh'])
const props = defineProps<{ nodeModel: any }>()
const dialogVisible = ref(false)
const inputVisible = ref(false)
const inputValue = ref('')
const loading = ref(false)
const fieldFormRef = ref()
const InputRef = ref<InputInstance>()
const documentExtensions = ['TXT', 'MD', 'DOCX', 'HTML', 'CSV', 'XLSX', 'XLS', 'PDF']
const imageExtensions = ['JPG', 'JPEG', 'PNG', 'GIF']
const audioExtensions = ['MP3', 'WAV', 'OGG', 'ACC', 'M4A']
const form_data = ref({
maxFiles: 3,
fileLimit: 50,
document: true,
image: false,
audio: false,
video: false
video: false,
other: false,
otherExtensions: ['PPT', 'DOC']
})
function open(data: any) {
@ -160,11 +238,43 @@ function close() {
dialogVisible.value = false
}
const handleClose = (tag: string) => {
form_data.value.otherExtensions = form_data.value.otherExtensions.filter((item) => item !== tag)
}
const showInput = () => {
inputVisible.value = true
nextTick(() => {
InputRef.value!.input!.focus()
})
}
const handleInputConfirm = () => {
if (inputValue.value) {
inputValue.value = inputValue.value.toUpperCase()
if (
form_data.value.otherExtensions.includes(inputValue.value) ||
documentExtensions.includes(inputValue.value) ||
imageExtensions.includes(inputValue.value) ||
audioExtensions.includes(inputValue.value)
) {
inputVisible.value = false
inputValue.value = ''
MsgWarning(t('common.fileUpload.existingExtensionsTip'))
return
}
form_data.value.otherExtensions.push(inputValue.value)
}
inputVisible.value = false
inputValue.value = ''
}
async function submit() {
const formEl = fieldFormRef.value
if (!formEl) return
await formEl.validate().then(() => {
emit('refresh', form_data.value)
const formattedData = cloneDeep(form_data.value)
emit('refresh', formattedData)
// emit('refresh', form_data.value)
props.nodeModel.graphModel.eventCenter.emit('refreshFileUploadConfig')
dialogVisible.value = false
})

View File

@ -314,7 +314,9 @@ const switchFileUpload = () => {
document: true,
image: false,
audio: false,
video: false
video: false,
other: false,
otherExtensions: ['ppt', 'doc']
}
if (form_data.value.file_upload_enable) {

View File

@ -78,7 +78,8 @@ const refreshFileUploadConfig = () => {
item.value !== 'image' &&
item.value !== 'document' &&
item.value !== 'audio' &&
item.value !== 'video'
item.value !== 'video' &&
item.value !== 'other'
)
if (form_data.length === 0) {
@ -98,6 +99,9 @@ const refreshFileUploadConfig = () => {
if (form_data[0].video) {
fileUploadFields.push({ label: t('common.fileUpload.video'), value: 'video' })
}
if (form_data[0].other) {
fileUploadFields.push({ label: t('common.fileUpload.other'), value: 'other' })
}
set(props.nodeModel.properties.config, 'fields', [...fields, ...fileUploadFields])
}