From b5fda0e0207640d870aae10222775378dd772ef9 Mon Sep 17 00:00:00 2001
From: CaptainB
Date: Fri, 18 Apr 2025 13:54:31 +0800
Subject: [PATCH 01/32] chore: add dependabot configuration for weekly pip
updates for v2
---
.github/dependabot.yml | 9 +++++++++
1 file changed, 9 insertions(+)
create mode 100644 .github/dependabot.yml
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
new file mode 100644
index 000000000..2a5e6736d
--- /dev/null
+++ b/.github/dependabot.yml
@@ -0,0 +1,9 @@
+version: 2
+updates:
+ - package-ecosystem: "pip"
+ directory: "/"
+ schedule:
+ interval: "weekly"
+ timezone: "Asia/Shanghai"
+ day: "friday"
+ target-branch: "v2"
\ No newline at end of file
From 2ecec57d2f7d3393ea7df3f5b49e944e89897c60 Mon Sep 17 00:00:00 2001
From: maninhill <41712985+maninhill@users.noreply.github.com>
Date: Fri, 18 Apr 2025 17:34:11 +0800
Subject: [PATCH 02/32] chore: Update README.md (#2927)
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index f365fb37b..06025c069 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@
MaxKB = Max Knowledge Brain, it is a powerful and easy-to-use AI assistant that integrates Retrieval-Augmented Generation (RAG) pipelines, supports robust workflows, and provides advanced MCP tool-use capabilities. MaxKB is widely applied in scenarios such as intelligent customer service, corporate internal knowledge bases, academic research, and education.
- **RAG Pipeline**: Supports direct uploading of documents / automatic crawling of online documents, with features for automatic text splitting, vectorization. This effectively reduces hallucinations in large models, providing a superior smart Q&A interaction experience.
-- **Flexible Orchestration**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
+- **Agentic Workflow**: Equipped with a powerful workflow engine, function library and MCP tool-use, enabling the orchestration of AI processes to meet the needs of complex business scenarios.
- **Seamless Integration**: Facilitates zero-coding rapid integration into third-party business systems, quickly equipping existing systems with intelligent Q&A capabilities to enhance user satisfaction.
- **Model-Agnostic**: Supports various large models, including private models (such as DeepSeek, Llama, Qwen, etc.) and public models (like OpenAI, Claude, Gemini, etc.).
- **Multi Modal**: Native support for input and output text, image, audio and video.
From bf52dd817498a1616b13b329d6e5728fb0f10d44 Mon Sep 17 00:00:00 2001
From: wxg0103 <727495428@qq.com>
Date: Mon, 21 Apr 2025 10:19:42 +0800
Subject: [PATCH 03/32] fix: i18n error
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
--bug=1054853 --user=王孝刚 【API文档】-获取知识库文档分页列表接口的名称错误 https://www.tapd.cn/57709429/s/1688248
---
apps/locales/zh_CN/LC_MESSAGES/django.po | 2 +-
apps/locales/zh_Hant/LC_MESSAGES/django.po | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/apps/locales/zh_CN/LC_MESSAGES/django.po b/apps/locales/zh_CN/LC_MESSAGES/django.po
index b0ab7871b..75fec7099 100644
--- a/apps/locales/zh_CN/LC_MESSAGES/django.po
+++ b/apps/locales/zh_CN/LC_MESSAGES/django.po
@@ -4536,7 +4536,7 @@ msgstr "修改知识库信息"
#: community/apps/dataset/views/document.py:463
#: community/apps/dataset/views/document.py:464
msgid "Get the knowledge base paginated list"
-msgstr "获取知识库分页列表"
+msgstr "获取知识库文档分页列表"
#: community/apps/dataset/views/document.py:31
#: community/apps/dataset/views/document.py:32
diff --git a/apps/locales/zh_Hant/LC_MESSAGES/django.po b/apps/locales/zh_Hant/LC_MESSAGES/django.po
index dab1d176c..870f282ae 100644
--- a/apps/locales/zh_Hant/LC_MESSAGES/django.po
+++ b/apps/locales/zh_Hant/LC_MESSAGES/django.po
@@ -4545,7 +4545,7 @@ msgstr "修改知識庫信息"
#: community/apps/dataset/views/document.py:463
#: community/apps/dataset/views/document.py:464
msgid "Get the knowledge base paginated list"
-msgstr "獲取知識庫分頁列表"
+msgstr "獲取知識庫文档分頁列表"
#: community/apps/dataset/views/document.py:31
#: community/apps/dataset/views/document.py:32
From 2550324003b85f4abe3b9e122053d71e8feae855 Mon Sep 17 00:00:00 2001
From: wxg0103 <727495428@qq.com>
Date: Mon, 21 Apr 2025 17:45:49 +0800
Subject: [PATCH 04/32] refactor: oidc add field_mapping
---
.../views/authentication/component/OIDC.vue | 23 +++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/ui/src/views/authentication/component/OIDC.vue b/ui/src/views/authentication/component/OIDC.vue
index 2666bc647..d71158b9a 100644
--- a/ui/src/views/authentication/component/OIDC.vue
+++ b/ui/src/views/authentication/component/OIDC.vue
@@ -61,6 +61,15 @@
show-password
/>
+
+
+
({
state: '',
clientId: '',
clientSecret: '',
+ fieldMapping: '{"username": "preferred_username", "email": "email"}',
redirectUrl: ''
},
is_active: true
@@ -156,6 +166,13 @@ const rules = reactive>({
trigger: 'blur'
}
],
+ 'config_data.fieldMapping': [
+ {
+ required: true,
+ message: t('views.system.authentication.oauth2.filedMappingPlaceholder'),
+ trigger: 'blur'
+ }
+ ],
'config_data.redirectUrl': [
{
required: true,
@@ -187,6 +204,12 @@ function getDetail() {
authApi.getAuthSetting(form.value.auth_type, loading).then((res: any) => {
if (res.data && JSON.stringify(res.data) !== '{}') {
form.value = res.data
+ if (
+ form.value.config_data.fieldMapping === '' ||
+ form.value.config_data.fieldMapping === undefined
+ ) {
+ form.value.config_data.fieldMapping = '{"username": "preferred_username", "email": "email"}'
+ }
}
})
}
From d2637c3de2691cba72f35e209501e0fc784836d0 Mon Sep 17 00:00:00 2001
From: shaohuzhang1 <80892890+shaohuzhang1@users.noreply.github.com>
Date: Mon, 21 Apr 2025 18:06:09 +0800
Subject: [PATCH 05/32] fix: Model parameters are not effective (#2937)
---
.../models_provider/base_model_provider.py | 5 +-
.../model/image.py | 3 +-
.../model/llm.py | 2 +-
.../models_provider/impl/base_chat_open_ai.py | 228 ++++++++++--------
.../impl/deepseek_model_provider/model/llm.py | 2 +-
.../impl/kimi_model_provider/model/llm.py | 3 +-
.../impl/ollama_model_provider/model/image.py | 2 +-
.../impl/openai_model_provider/model/image.py | 2 +-
.../impl/openai_model_provider/model/llm.py | 7 +-
.../impl/qwen_model_provider/model/image.py | 3 +-
.../impl/qwen_model_provider/model/llm.py | 2 +-
.../model/image.py | 2 +-
.../siliconCloud_model_provider/model/llm.py | 2 +-
.../tencent_cloud_model_provider/model/llm.py | 16 +-
.../tencent_model_provider/model/image.py | 2 +-
.../impl/vllm_model_provider/model/image.py | 2 +-
.../impl/vllm_model_provider/model/llm.py | 13 +-
.../model/image.py | 2 +-
.../model/llm.py | 2 +-
.../xinference_model_provider/model/image.py | 2 +-
.../xinference_model_provider/model/llm.py | 2 +-
.../impl/zhipu_model_provider/model/image.py | 2 +-
22 files changed, 164 insertions(+), 142 deletions(-)
diff --git a/apps/setting/models_provider/base_model_provider.py b/apps/setting/models_provider/base_model_provider.py
index 622be703d..2b02bdc1f 100644
--- a/apps/setting/models_provider/base_model_provider.py
+++ b/apps/setting/models_provider/base_model_provider.py
@@ -106,7 +106,10 @@ class MaxKBBaseModel(ABC):
optional_params = {}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming', 'show_ref_label']:
- optional_params[key] = value
+ if key == 'extra_body' and isinstance(value, dict):
+ optional_params = {**optional_params, **value}
+ else:
+ optional_params[key] = value
return optional_params
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py
index 2b1fe31f2..7cda97f23 100644
--- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/image.py
@@ -15,9 +15,8 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
- # stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
return chat_tong_yi
diff --git a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py
index d914f7c8a..ee3ee6488 100644
--- a/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/aliyun_bai_lian_model_provider/model/llm.py
@@ -20,5 +20,5 @@ class BaiLianChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
- **optional_params
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/base_chat_open_ai.py b/apps/setting/models_provider/impl/base_chat_open_ai.py
index 54076b7ef..e0d0e762e 100644
--- a/apps/setting/models_provider/impl/base_chat_open_ai.py
+++ b/apps/setting/models_provider/impl/base_chat_open_ai.py
@@ -1,15 +1,16 @@
# coding=utf-8
-import warnings
-from typing import List, Dict, Optional, Any, Iterator, cast, Type, Union
+from typing import Dict, Optional, Any, Iterator, cast, Union, Sequence, Callable, Mapping
-import openai
-from langchain_core.callbacks import CallbackManagerForLLMRun
from langchain_core.language_models import LanguageModelInput
-from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, AIMessageChunk
-from langchain_core.outputs import ChatGenerationChunk, ChatGeneration
+from langchain_core.messages import BaseMessage, get_buffer_string, BaseMessageChunk, HumanMessageChunk, AIMessageChunk, \
+ SystemMessageChunk, FunctionMessageChunk, ChatMessageChunk
+from langchain_core.messages.ai import UsageMetadata
+from langchain_core.messages.tool import tool_call_chunk, ToolMessageChunk
+from langchain_core.outputs import ChatGenerationChunk
from langchain_core.runnables import RunnableConfig, ensure_config
-from langchain_core.utils.pydantic import is_basemodel_subclass
+from langchain_core.tools import BaseTool
from langchain_openai import ChatOpenAI
+from langchain_openai.chat_models.base import _create_usage_metadata
from common.config.tokenizer_manage_config import TokenizerManage
@@ -19,6 +20,64 @@ def custom_get_token_ids(text: str):
return tokenizer.encode(text)
+def _convert_delta_to_message_chunk(
+ _dict: Mapping[str, Any], default_class: type[BaseMessageChunk]
+) -> BaseMessageChunk:
+ id_ = _dict.get("id")
+ reasoning_content = cast(str, _dict.get("reasoning_content") or "")
+ role = cast(str, _dict.get("role"))
+ content = cast(str, _dict.get("content") or "")
+ additional_kwargs: dict = {'reasoning_content': reasoning_content}
+ if _dict.get("function_call"):
+ function_call = dict(_dict["function_call"])
+ if "name" in function_call and function_call["name"] is None:
+ function_call["name"] = ""
+ additional_kwargs["function_call"] = function_call
+ tool_call_chunks = []
+ if raw_tool_calls := _dict.get("tool_calls"):
+ additional_kwargs["tool_calls"] = raw_tool_calls
+ try:
+ tool_call_chunks = [
+ tool_call_chunk(
+ name=rtc["function"].get("name"),
+ args=rtc["function"].get("arguments"),
+ id=rtc.get("id"),
+ index=rtc["index"],
+ )
+ for rtc in raw_tool_calls
+ ]
+ except KeyError:
+ pass
+
+ if role == "user" or default_class == HumanMessageChunk:
+ return HumanMessageChunk(content=content, id=id_)
+ elif role == "assistant" or default_class == AIMessageChunk:
+ return AIMessageChunk(
+ content=content,
+ additional_kwargs=additional_kwargs,
+ id=id_,
+ tool_call_chunks=tool_call_chunks, # type: ignore[arg-type]
+ )
+ elif role in ("system", "developer") or default_class == SystemMessageChunk:
+ if role == "developer":
+ additional_kwargs = {"__openai_role__": "developer"}
+ else:
+ additional_kwargs = {}
+ return SystemMessageChunk(
+ content=content, id=id_, additional_kwargs=additional_kwargs
+ )
+ elif role == "function" or default_class == FunctionMessageChunk:
+ return FunctionMessageChunk(content=content, name=_dict["name"], id=id_)
+ elif role == "tool" or default_class == ToolMessageChunk:
+ return ToolMessageChunk(
+ content=content, tool_call_id=_dict["tool_call_id"], id=id_
+ )
+ elif role or default_class == ChatMessageChunk:
+ return ChatMessageChunk(content=content, role=role, id=id_)
+ else:
+ return default_class(content=content, id=id_) # type: ignore
+
+
class BaseChatOpenAI(ChatOpenAI):
usage_metadata: dict = {}
custom_get_token_ids = custom_get_token_ids
@@ -26,7 +85,13 @@ class BaseChatOpenAI(ChatOpenAI):
def get_last_generation_info(self) -> Optional[Dict[str, Any]]:
return self.usage_metadata
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ def get_num_tokens_from_messages(
+ self,
+ messages: list[BaseMessage],
+ tools: Optional[
+ Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
+ ] = None,
+ ) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
try:
return super().get_num_tokens_from_messages(messages)
@@ -44,114 +109,77 @@ class BaseChatOpenAI(ChatOpenAI):
return len(tokenizer.encode(text))
return self.get_last_generation_info().get('output_tokens', 0)
- def _stream(
+ def _stream(self, *args: Any, **kwargs: Any) -> Iterator[ChatGenerationChunk]:
+ kwargs['stream_usage'] = True
+ for chunk in super()._stream(*args, **kwargs):
+ if chunk.message.usage_metadata is not None:
+ self.usage_metadata = chunk.message.usage_metadata
+ yield chunk
+
+ def _convert_chunk_to_generation_chunk(
self,
- messages: List[BaseMessage],
- stop: Optional[List[str]] = None,
- run_manager: Optional[CallbackManagerForLLMRun] = None,
- **kwargs: Any,
- ) -> Iterator[ChatGenerationChunk]:
- kwargs["stream"] = True
- kwargs["stream_options"] = {"include_usage": True}
- """Set default stream_options."""
- stream_usage = self._should_stream_usage(kwargs.get('stream_usage'), **kwargs)
- # Note: stream_options is not a valid parameter for Azure OpenAI.
- # To support users proxying Azure through ChatOpenAI, here we only specify
- # stream_options if include_usage is set to True.
- # See https://learn.microsoft.com/en-us/azure/ai-services/openai/whats-new
- # for release notes.
- if stream_usage:
- kwargs["stream_options"] = {"include_usage": stream_usage}
+ chunk: dict,
+ default_chunk_class: type,
+ base_generation_info: Optional[dict],
+ ) -> Optional[ChatGenerationChunk]:
+ if chunk.get("type") == "content.delta": # from beta.chat.completions.stream
+ return None
+ token_usage = chunk.get("usage")
+ choices = (
+ chunk.get("choices", [])
+ # from beta.chat.completions.stream
+ or chunk.get("chunk", {}).get("choices", [])
+ )
- payload = self._get_request_payload(messages, stop=stop, **kwargs)
- default_chunk_class: Type[BaseMessageChunk] = AIMessageChunk
- base_generation_info = {}
-
- if "response_format" in payload and is_basemodel_subclass(
- payload["response_format"]
- ):
- # TODO: Add support for streaming with Pydantic response_format.
- warnings.warn("Streaming with Pydantic response_format not yet supported.")
- chat_result = self._generate(
- messages, stop, run_manager=run_manager, **kwargs
+ usage_metadata: Optional[UsageMetadata] = (
+ _create_usage_metadata(token_usage) if token_usage else None
+ )
+ if len(choices) == 0:
+ # logprobs is implicitly None
+ generation_chunk = ChatGenerationChunk(
+ message=default_chunk_class(content="", usage_metadata=usage_metadata)
)
- msg = chat_result.generations[0].message
- yield ChatGenerationChunk(
- message=AIMessageChunk(
- **msg.dict(exclude={"type", "additional_kwargs"}),
- # preserve the "parsed" Pydantic object without converting to dict
- additional_kwargs=msg.additional_kwargs,
- ),
- generation_info=chat_result.generations[0].generation_info,
- )
- return
- if self.include_response_headers:
- raw_response = self.client.with_raw_response.create(**payload)
- response = raw_response.parse()
- base_generation_info = {"headers": dict(raw_response.headers)}
- else:
- response = self.client.create(**payload)
- with response:
- is_first_chunk = True
- for chunk in response:
- if not isinstance(chunk, dict):
- chunk = chunk.model_dump()
+ return generation_chunk
- generation_chunk = super()._convert_chunk_to_generation_chunk(
- chunk,
- default_chunk_class,
- base_generation_info if is_first_chunk else {},
- )
- if generation_chunk is None:
- continue
+ choice = choices[0]
+ if choice["delta"] is None:
+ return None
- # custom code
- if len(chunk['choices']) > 0 and 'reasoning_content' in chunk['choices'][0]['delta']:
- generation_chunk.message.additional_kwargs["reasoning_content"] = chunk['choices'][0]['delta'][
- 'reasoning_content']
+ message_chunk = _convert_delta_to_message_chunk(
+ choice["delta"], default_chunk_class
+ )
+ generation_info = {**base_generation_info} if base_generation_info else {}
- default_chunk_class = generation_chunk.message.__class__
- logprobs = (generation_chunk.generation_info or {}).get("logprobs")
- if run_manager:
- run_manager.on_llm_new_token(
- generation_chunk.text, chunk=generation_chunk, logprobs=logprobs
- )
- is_first_chunk = False
- # custom code
- if generation_chunk.message.usage_metadata is not None:
- self.usage_metadata = generation_chunk.message.usage_metadata
- yield generation_chunk
+ if finish_reason := choice.get("finish_reason"):
+ generation_info["finish_reason"] = finish_reason
+ if model_name := chunk.get("model"):
+ generation_info["model_name"] = model_name
+ if system_fingerprint := chunk.get("system_fingerprint"):
+ generation_info["system_fingerprint"] = system_fingerprint
- def _create_chat_result(self,
- response: Union[dict, openai.BaseModel],
- generation_info: Optional[Dict] = None):
- result = super()._create_chat_result(response, generation_info)
- try:
- reasoning_content = ''
- reasoning_content_enable = False
- for res in response.choices:
- if 'reasoning_content' in res.message.model_extra:
- reasoning_content_enable = True
- _reasoning_content = res.message.model_extra.get('reasoning_content')
- if _reasoning_content is not None:
- reasoning_content += _reasoning_content
- if reasoning_content_enable:
- result.llm_output['reasoning_content'] = reasoning_content
- except Exception as e:
- pass
- return result
+ logprobs = choice.get("logprobs")
+ if logprobs:
+ generation_info["logprobs"] = logprobs
+
+ if usage_metadata and isinstance(message_chunk, AIMessageChunk):
+ message_chunk.usage_metadata = usage_metadata
+
+ generation_chunk = ChatGenerationChunk(
+ message=message_chunk, generation_info=generation_info or None
+ )
+ return generation_chunk
def invoke(
self,
input: LanguageModelInput,
config: Optional[RunnableConfig] = None,
*,
- stop: Optional[List[str]] = None,
+ stop: Optional[list[str]] = None,
**kwargs: Any,
) -> BaseMessage:
config = ensure_config(config)
chat_result = cast(
- ChatGeneration,
+ "ChatGeneration",
self.generate_prompt(
[self._convert_input(input)],
stop=stop,
@@ -162,7 +190,9 @@ class BaseChatOpenAI(ChatOpenAI):
run_id=config.pop("run_id", None),
**kwargs,
).generations[0][0],
+
).message
+
self.usage_metadata = chat_result.response_metadata[
'token_usage'] if 'token_usage' in chat_result.response_metadata else chat_result.usage_metadata
return chat_result
diff --git a/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py b/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py
index 9db4faca7..081d648a7 100644
--- a/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/deepseek_model_provider/model/llm.py
@@ -26,6 +26,6 @@ class DeepSeekChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base='https://api.deepseek.com',
openai_api_key=model_credential.get('api_key'),
- **optional_params
+ extra_body=optional_params
)
return deepseek_chat_open_ai
diff --git a/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py b/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py
index c389c177e..c0ce2ec02 100644
--- a/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/kimi_model_provider/model/llm.py
@@ -21,11 +21,10 @@ class KimiChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
-
kimi_chat_open_ai = KimiChatModel(
openai_api_base=model_credential['api_base'],
openai_api_key=model_credential['api_key'],
model_name=model_name,
- **optional_params
+ extra_body=optional_params,
)
return kimi_chat_open_ai
diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py
index 4cf0f1d56..215ce0130 100644
--- a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py
@@ -28,5 +28,5 @@ class OllamaImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/image.py b/apps/setting/models_provider/impl/openai_model_provider/model/image.py
index 731f476c4..7ac0906a7 100644
--- a/apps/setting/models_provider/impl/openai_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/image.py
@@ -16,5 +16,5 @@ class OpenAIImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/openai_model_provider/model/llm.py b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py
index 2e6dd89ac..189385210 100644
--- a/apps/setting/models_provider/impl/openai_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/openai_model_provider/model/llm.py
@@ -9,7 +9,6 @@
from typing import List, Dict
from langchain_core.messages import BaseMessage, get_buffer_string
-from langchain_openai.chat_models import ChatOpenAI
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@@ -35,9 +34,9 @@ class OpenAIChatModel(MaxKBBaseModel, BaseChatOpenAI):
streaming = False
azure_chat_open_ai = OpenAIChatModel(
model=model_name,
- openai_api_base=model_credential.get('api_base'),
- openai_api_key=model_credential.get('api_key'),
- **optional_params,
+ base_url=model_credential.get('api_base'),
+ api_key=model_credential.get('api_key'),
+ extra_body=optional_params,
streaming=streaming,
custom_get_token_ids=custom_get_token_ids
)
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py
index 97166757e..bf3af0e34 100644
--- a/apps/setting/models_provider/impl/qwen_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/qwen_model_provider/model/image.py
@@ -18,9 +18,8 @@ class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
- # stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
return chat_tong_yi
diff --git a/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py b/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py
index 3b66ddfd6..c4df28af9 100644
--- a/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/qwen_model_provider/model/llm.py
@@ -26,6 +26,6 @@ class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
return chat_tong_yi
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py
index bb840f8c6..2ec0689d4 100644
--- a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/image.py
@@ -16,5 +16,5 @@ class SiliconCloudImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py
index 9d79c6e07..6fb0c7816 100644
--- a/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/siliconCloud_model_provider/model/llm.py
@@ -34,5 +34,5 @@ class SiliconCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
- **optional_params
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py b/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py
index 7653cfc2f..cfcdf7aca 100644
--- a/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/tencent_cloud_model_provider/model/llm.py
@@ -33,21 +33,7 @@ class TencentCloudChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
- **optional_params,
+ extra_body=optional_params,
custom_get_token_ids=custom_get_token_ids
)
return azure_chat_open_ai
-
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
- try:
- return super().get_num_tokens_from_messages(messages)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
-
- def get_num_tokens(self, text: str) -> int:
- try:
- return super().get_num_tokens(text)
- except Exception as e:
- tokenizer = TokenizerManage.get_tokenizer()
- return len(tokenizer.encode(text))
diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py
index 1b66ab6d2..6800cdd56 100644
--- a/apps/setting/models_provider/impl/tencent_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/tencent_model_provider/model/image.py
@@ -16,5 +16,5 @@ class TencentVision(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/image.py b/apps/setting/models_provider/impl/vllm_model_provider/model/image.py
index 4d5dda29d..c8cb0a84d 100644
--- a/apps/setting/models_provider/impl/vllm_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/vllm_model_provider/model/image.py
@@ -19,7 +19,7 @@ class VllmImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
def is_cache_model(self):
diff --git a/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py b/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py
index 7d2a63acd..4662a6169 100644
--- a/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/vllm_model_provider/model/llm.py
@@ -1,9 +1,10 @@
# coding=utf-8
-from typing import Dict, List
+from typing import Dict, Optional, Sequence, Union, Any, Callable
from urllib.parse import urlparse, ParseResult
from langchain_core.messages import BaseMessage, get_buffer_string
+from langchain_core.tools import BaseTool
from common.config.tokenizer_manage_config import TokenizerManage
from setting.models_provider.base_model_provider import MaxKBBaseModel
@@ -31,13 +32,19 @@ class VllmChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
- **optional_params,
streaming=True,
stream_usage=True,
+ extra_body=optional_params
)
return vllm_chat_open_ai
- def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
+ def get_num_tokens_from_messages(
+ self,
+ messages: list[BaseMessage],
+ tools: Optional[
+ Sequence[Union[dict[str, Any], type, Callable, BaseTool]]
+ ] = None,
+ ) -> int:
if self.usage_metadata is None or self.usage_metadata == {}:
tokenizer = TokenizerManage.get_tokenizer()
return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages])
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py
index 39446b4e1..6e2517bd4 100644
--- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/image.py
@@ -16,5 +16,5 @@ class VolcanicEngineImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py
index 181ad2971..8f089f269 100644
--- a/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/volcanic_engine_model_provider/model/llm.py
@@ -17,5 +17,5 @@ class VolcanicEngineChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=model_credential.get('api_base'),
openai_api_key=model_credential.get('api_key'),
- **optional_params
+ extra_body=optional_params
)
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/image.py b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py
index a195b8649..66a766ba8 100644
--- a/apps/setting/models_provider/impl/xinference_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/image.py
@@ -19,7 +19,7 @@ class XinferenceImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
diff --git a/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py b/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py
index d76979bd3..9c0316ad2 100644
--- a/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py
+++ b/apps/setting/models_provider/impl/xinference_model_provider/model/llm.py
@@ -34,7 +34,7 @@ class XinferenceChatModel(MaxKBBaseModel, BaseChatOpenAI):
model=model_name,
openai_api_base=base_url,
openai_api_key=model_credential.get('api_key'),
- **optional_params
+ extra_body=optional_params
)
def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int:
diff --git a/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py
index f13c71538..6ac7830d8 100644
--- a/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py
+++ b/apps/setting/models_provider/impl/zhipu_model_provider/model/image.py
@@ -16,5 +16,5 @@ class ZhiPuImage(MaxKBBaseModel, BaseChatOpenAI):
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
- **optional_params,
+ extra_body=optional_params
)
From 54c9d4e725532007981293794fc891ac56261282 Mon Sep 17 00:00:00 2001
From: wangdan-fit2cloud <79562285+wangdan-fit2cloud@users.noreply.github.com>
Date: Mon, 21 Apr 2025 20:30:40 +0800
Subject: [PATCH 06/32] feat: Support uploading files by copying, pasting,
dragging and dropping (#2939)
---
.../component/chat-input-operate/index.vue | 53 +++++++++++++++++++
1 file changed, 53 insertions(+)
diff --git a/ui/src/components/ai-chat/component/chat-input-operate/index.vue b/ui/src/components/ai-chat/component/chat-input-operate/index.vue
index acf3085ed..2face1708 100644
--- a/ui/src/components/ai-chat/component/chat-input-operate/index.vue
+++ b/ui/src/components/ai-chat/component/chat-input-operate/index.vue
@@ -137,6 +137,8 @@
type="textarea"
:maxlength="100000"
@keydown.enter="sendChatHandle($event)"
+ @paste="handlePaste"
+ @drop="handleDrop"
/>
@@ -188,6 +190,7 @@
:show-file-list="false"
:accept="getAcceptList()"
:on-change="(file: any, fileList: any) => uploadFile(file, fileList)"
+ ref="upload"
>
{
}
})
}
+// 粘贴处理
+const handlePaste = (event: ClipboardEvent) => {
+ if (!props.applicationDetails.file_upload_enable) return
+ const clipboardData = event.clipboardData
+ if (!clipboardData) return
+
+ // 获取剪贴板中的文件
+ const files = clipboardData.files
+ if (files.length === 0) return
+
+ // 转换 FileList 为数组并遍历处理
+ Array.from(files).forEach((rawFile: File) => {
+ // 创建符合 el-upload 要求的文件对象
+ const elFile = {
+ uid: Date.now(), // 生成唯一ID
+ name: rawFile.name,
+ size: rawFile.size,
+ raw: rawFile, // 原始文件对象
+ status: 'ready', // 文件状态
+ percentage: 0 // 上传进度
+ }
+
+ // 手动触发上传逻辑(模拟 on-change 事件)
+ uploadFile(elFile, [elFile])
+ })
+
+ // 阻止默认粘贴行为
+ event.preventDefault()
+}
+// 新增拖拽处理
+const handleDrop = (event: DragEvent) => {
+ if (!props.applicationDetails.file_upload_enable) return
+ event.preventDefault()
+ const files = event.dataTransfer?.files
+ if (!files) return
+
+ Array.from(files).forEach((rawFile) => {
+ const elFile = {
+ uid: Date.now(),
+ name: rawFile.name,
+ size: rawFile.size,
+ raw: rawFile,
+ status: 'ready',
+ percentage: 0
+ }
+ uploadFile(elFile, [elFile])
+ })
+}
// 语音录制任务id
const intervalId = ref(null)
// 语音录制开始秒数
From d32f7d36a6ed6e4bcd13790dc2fe8e9f368aa369 Mon Sep 17 00:00:00 2001
From: CaptainB
Date: Mon, 21 Apr 2025 18:58:46 +0800
Subject: [PATCH 07/32] feat: add support for uploading other file types and
extend file upload settings
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
--story=1018411 --user=刘瑞斌 【工作流应用】文件上传可以支持其它文件类型 https://www.tapd.cn/57709429/s/1688679
---
.../start_node/impl/base_start_node.py | 5 +-
apps/application/flow/workflow_manage.py | 4 +
.../serializers/chat_message_serializers.py | 5 +-
apps/application/views/chat_views.py | 2 +
.../component/chat-input-operate/index.vue | 71 +++++++++++-
ui/src/locales/lang/en-US/common.ts | 6 +-
ui/src/locales/lang/zh-CN/common.ts | 4 +-
ui/src/locales/lang/zh-Hant/common.ts | 4 +-
.../component/FileUploadSettingDialog.vue | 101 +++++++++++++++---
ui/src/workflow/nodes/base-node/index.vue | 4 +-
ui/src/workflow/nodes/start-node/index.vue | 6 +-
11 files changed, 185 insertions(+), 27 deletions(-)
diff --git a/apps/application/flow/step_node/start_node/impl/base_start_node.py b/apps/application/flow/step_node/start_node/impl/base_start_node.py
index bf5203274..bd5bcbeee 100644
--- a/apps/application/flow/step_node/start_node/impl/base_start_node.py
+++ b/apps/application/flow/step_node/start_node/impl/base_start_node.py
@@ -40,6 +40,7 @@ class BaseStartStepNode(IStarNode):
self.context['document'] = details.get('document_list')
self.context['image'] = details.get('image_list')
self.context['audio'] = details.get('audio_list')
+ self.context['other'] = details.get('other_list')
self.status = details.get('status')
self.err_message = details.get('err_message')
for key, value in workflow_variable.items():
@@ -59,7 +60,8 @@ class BaseStartStepNode(IStarNode):
'question': question,
'image': self.workflow_manage.image_list,
'document': self.workflow_manage.document_list,
- 'audio': self.workflow_manage.audio_list
+ 'audio': self.workflow_manage.audio_list,
+ 'other': self.workflow_manage.other_list,
}
return NodeResult(node_variable, workflow_variable)
@@ -83,5 +85,6 @@ class BaseStartStepNode(IStarNode):
'image_list': self.context.get('image'),
'document_list': self.context.get('document'),
'audio_list': self.context.get('audio'),
+ 'other_list': self.context.get('other'),
'global_fields': global_fields
}
diff --git a/apps/application/flow/workflow_manage.py b/apps/application/flow/workflow_manage.py
index be91f69be..0f7bc9c75 100644
--- a/apps/application/flow/workflow_manage.py
+++ b/apps/application/flow/workflow_manage.py
@@ -238,6 +238,7 @@ class WorkflowManage:
base_to_response: BaseToResponse = SystemToResponse(), form_data=None, image_list=None,
document_list=None,
audio_list=None,
+ other_list=None,
start_node_id=None,
start_node_data=None, chat_record=None, child_node=None):
if form_data is None:
@@ -248,12 +249,15 @@ class WorkflowManage:
document_list = []
if audio_list is None:
audio_list = []
+ if other_list is None:
+ other_list = []
self.start_node_id = start_node_id
self.start_node = None
self.form_data = form_data
self.image_list = image_list
self.document_list = document_list
self.audio_list = audio_list
+ self.other_list = other_list
self.params = params
self.flow = flow
self.context = {}
diff --git a/apps/application/serializers/chat_message_serializers.py b/apps/application/serializers/chat_message_serializers.py
index 2194028e6..3eaf9a1ae 100644
--- a/apps/application/serializers/chat_message_serializers.py
+++ b/apps/application/serializers/chat_message_serializers.py
@@ -245,6 +245,7 @@ class OpenAIChatSerializer(serializers.Serializer):
'image_list': instance.get('image_list', []),
'document_list': instance.get('document_list', []),
'audio_list': instance.get('audio_list', []),
+ 'other_list': instance.get('other_list', []),
}
).chat(base_to_response=OpenaiToResponse())
@@ -274,6 +275,7 @@ class ChatMessageSerializer(serializers.Serializer):
image_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("picture")))
document_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("document")))
audio_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Audio")))
+ other_list = serializers.ListField(required=False, error_messages=ErrMessage.list(_("Other")))
child_node = serializers.DictField(required=False, allow_null=True,
error_messages=ErrMessage.dict(_("Child Nodes")))
@@ -372,6 +374,7 @@ class ChatMessageSerializer(serializers.Serializer):
image_list = self.data.get('image_list')
document_list = self.data.get('document_list')
audio_list = self.data.get('audio_list')
+ other_list = self.data.get('other_list')
user_id = chat_info.application.user_id
chat_record_id = self.data.get('chat_record_id')
chat_record = None
@@ -388,7 +391,7 @@ class ChatMessageSerializer(serializers.Serializer):
'client_id': client_id,
'client_type': client_type,
'user_id': user_id}, WorkFlowPostHandler(chat_info, client_id, client_type),
- base_to_response, form_data, image_list, document_list, audio_list,
+ base_to_response, form_data, image_list, document_list, audio_list, other_list,
self.data.get('runtime_node_id'),
self.data.get('node_data'), chat_record, self.data.get('child_node'))
r = work_flow_manage.run()
diff --git a/apps/application/views/chat_views.py b/apps/application/views/chat_views.py
index 0415f8208..77a087351 100644
--- a/apps/application/views/chat_views.py
+++ b/apps/application/views/chat_views.py
@@ -144,6 +144,8 @@ class ChatView(APIView):
'document_list') if 'document_list' in request.data else [],
'audio_list': request.data.get(
'audio_list') if 'audio_list' in request.data else [],
+ 'other_list': request.data.get(
+ 'other_list') if 'other_list' in request.data else [],
'client_type': request.auth.client_type,
'node_id': request.data.get('node_id', None),
'runtime_node_id': request.data.get('runtime_node_id', None),
diff --git a/ui/src/components/ai-chat/component/chat-input-operate/index.vue b/ui/src/components/ai-chat/component/chat-input-operate/index.vue
index 2face1708..5babf0904 100644
--- a/ui/src/components/ai-chat/component/chat-input-operate/index.vue
+++ b/ui/src/components/ai-chat/component/chat-input-operate/index.vue
@@ -10,7 +10,8 @@
uploadDocumentList.length ||
uploadImageList.length ||
uploadAudioList.length ||
- uploadVideoList.length
+ uploadVideoList.length ||
+ uploadOtherList.length
"
>
@@ -50,6 +51,42 @@
+
+
+
+
+
+
+
+
+
![]()
+
+ {{ item && item?.name }}
+
+
+
+
{
- const { image, document, audio, video } = props.applicationDetails.file_upload_setting
+ const { image, document, audio, video, other } = props.applicationDetails.file_upload_setting
let accepts: any = []
if (image) {
accepts = [...imageExtensions]
@@ -326,6 +364,11 @@ const getAcceptList = () => {
if (video) {
accepts = [...accepts, ...videoExtensions]
}
+ if (other) {
+ // 其他文件类型
+ otherExtensions = props.applicationDetails.file_upload_setting.otherExtensions
+ accepts = [...accepts, ...otherExtensions]
+ }
if (accepts.length === 0) {
return `.${t('chat.uploadFile.tipMessage')}`
@@ -339,7 +382,8 @@ const checkMaxFilesLimit = () => {
uploadImageList.value.length +
uploadDocumentList.value.length +
uploadAudioList.value.length +
- uploadVideoList.value.length
+ uploadVideoList.value.length +
+ uploadOtherList.value.length
)
}
@@ -350,7 +394,8 @@ const uploadFile = async (file: any, fileList: any) => {
uploadImageList.value.length +
uploadDocumentList.value.length +
uploadAudioList.value.length +
- uploadVideoList.value.length
+ uploadVideoList.value.length +
+ uploadOtherList.value.length
if (file_limit_once >= maxFiles) {
MsgWarning(t('chat.uploadFile.limitMessage1') + maxFiles + t('chat.uploadFile.limitMessage2'))
fileList.splice(0, fileList.length)
@@ -376,6 +421,8 @@ const uploadFile = async (file: any, fileList: any) => {
uploadVideoList.value.push(file)
} else if (audioExtensions.includes(extension)) {
uploadAudioList.value.push(file)
+ } else if (otherExtensions.includes(extension)) {
+ uploadOtherList.value.push(file)
}
if (!chatId_context.value) {
@@ -434,6 +481,15 @@ const uploadFile = async (file: any, fileList: any) => {
file.file_id = f[0].file_id
}
})
+ uploadOtherList.value.forEach((file: any) => {
+ const f = response.data.filter(
+ (f: any) => f.name.replaceAll(' ', '') === file.name.replaceAll(' ', '')
+ )
+ if (f.length > 0) {
+ file.url = f[0].url
+ file.file_id = f[0].file_id
+ }
+ })
if (!inputValue.value && uploadImageList.value.length > 0) {
inputValue.value = t('chat.uploadFile.imageMessage')
}
@@ -499,6 +555,7 @@ const uploadImageList = ref>([])
const uploadDocumentList = ref>([])
const uploadVideoList = ref>([])
const uploadAudioList = ref>([])
+const uploadOtherList = ref>([])
const showDelete = ref('')
@@ -709,13 +766,15 @@ function autoSendMessage() {
image_list: uploadImageList.value,
document_list: uploadDocumentList.value,
audio_list: uploadAudioList.value,
- video_list: uploadVideoList.value
+ video_list: uploadVideoList.value,
+ other_list: uploadOtherList.value,
})
inputValue.value = ''
uploadImageList.value = []
uploadDocumentList.value = []
uploadAudioList.value = []
uploadVideoList.value = []
+ uploadOtherList.value = []
if (quickInputRef.value) {
quickInputRef.value.textareaStyle.height = '45px'
}
@@ -771,6 +830,8 @@ function deleteFile(index: number, val: string) {
uploadVideoList.value.splice(index, 1)
} else if (val === 'audio') {
uploadAudioList.value.splice(index, 1)
+ } else if (val === 'other') {
+ uploadOtherList.value.splice(index, 1)
}
}
diff --git a/ui/src/locales/lang/en-US/common.ts b/ui/src/locales/lang/en-US/common.ts
index 96afd9916..4e96b7069 100644
--- a/ui/src/locales/lang/en-US/common.ts
+++ b/ui/src/locales/lang/en-US/common.ts
@@ -45,7 +45,9 @@ export default {
document: 'Documents',
image: 'Image',
audio: 'Audio',
- video: 'Video'
+ video: 'Video',
+ other: 'Other file',
+ addExtensions: 'Add file extensions',
},
status: {
label: 'Status',
@@ -55,7 +57,7 @@ export default {
param: {
outputParam: 'Output Parameters',
inputParam: 'Input Parameters',
- initParam: 'Startup Parameters',
+ initParam: 'Startup Parameters'
},
inputPlaceholder: 'Please input',
diff --git a/ui/src/locales/lang/zh-CN/common.ts b/ui/src/locales/lang/zh-CN/common.ts
index 97e25b704..a3b3b8409 100644
--- a/ui/src/locales/lang/zh-CN/common.ts
+++ b/ui/src/locales/lang/zh-CN/common.ts
@@ -45,7 +45,9 @@ export default {
document: '文档',
image: '图片',
audio: '音频',
- video: '视频'
+ video: '视频',
+ other: '其他文件',
+ addExtensions: '添加文件扩展名',
},
status: {
label: '状态',
diff --git a/ui/src/locales/lang/zh-Hant/common.ts b/ui/src/locales/lang/zh-Hant/common.ts
index 0ccbb5c11..c1a1f93a0 100644
--- a/ui/src/locales/lang/zh-Hant/common.ts
+++ b/ui/src/locales/lang/zh-Hant/common.ts
@@ -45,7 +45,9 @@ export default {
document: '文檔',
image: '圖片',
audio: '音頻',
- video: '視頻'
+ video: '視頻',
+ other: '其他文件',
+ addExtensions: '添加文件擴展名'
},
status: {
label: '狀態',
diff --git a/ui/src/workflow/nodes/base-node/component/FileUploadSettingDialog.vue b/ui/src/workflow/nodes/base-node/component/FileUploadSettingDialog.vue
index ec4f9bebd..c22043fa3 100644
--- a/ui/src/workflow/nodes/base-node/component/FileUploadSettingDialog.vue
+++ b/ui/src/workflow/nodes/base-node/component/FileUploadSettingDialog.vue
@@ -57,10 +57,11 @@
{{ $t('common.fileUpload.document') }}(TXT、MD、DOCX、HTML、CSV、XLSX、XLS、PDF)
{{
- $t(
- 'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.documentText'
- )
- }}
+ $t(
+ 'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.documentText'
+ )
+ }}
+
{{
- $t(
- 'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.imageText'
- )
- }}
+ $t(
+ 'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.imageText'
+ )
+ }}
+
@@ -109,15 +111,58 @@
{{ $t('common.fileUpload.audio') }}(MP3、WAV、OGG、ACC、M4A)
{{
- $t(
- 'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.audioText'
- )
- }}
+ $t(
+ 'views.applicationWorkflow.nodes.baseNode.FileUploadSetting.fileUploadType.audioText'
+ )
+ }}
+
+
+
+
+

+
+
+ {{ $t('common.fileUpload.other') }}
+
+
+
+ {{ tag }}
+
+
+
+ + {{ $t('common.fileUpload.addExtensions') }}
+
+
+
+
+
+
+
@@ -133,20 +178,28 @@
diff --git a/ui/src/views/login/reset-password/index.vue b/ui/src/views/login/reset-password/index.vue
index 2c2ff0257..576e6f340 100644
--- a/ui/src/views/login/reset-password/index.vue
+++ b/ui/src/views/login/reset-password/index.vue
@@ -1,6 +1,10 @@
-
-
+
+
{{ $t('views.login.resetPassword') }}
- {{
- $t('common.confirm')
- }}
+ {{ $t('common.confirm') }}
+