diff --git a/apps/setting/models_provider/base_model_provider.py b/apps/setting/models_provider/base_model_provider.py index dd1cdf1d5..622be703d 100644 --- a/apps/setting/models_provider/base_model_provider.py +++ b/apps/setting/models_provider/base_model_provider.py @@ -11,7 +11,7 @@ from enum import Enum from functools import reduce from typing import Dict, Iterator, Type, List -from pydantic.v1 import BaseModel +from pydantic import BaseModel from common.exception.app_exception import AppApiException from django.utils.translation import gettext_lazy as _ diff --git a/apps/setting/models_provider/impl/base_chat_open_ai.py b/apps/setting/models_provider/impl/base_chat_open_ai.py index 6ad1baac6..54076b7ef 100644 --- a/apps/setting/models_provider/impl/base_chat_open_ai.py +++ b/apps/setting/models_provider/impl/base_chat_open_ai.py @@ -10,7 +10,6 @@ from langchain_core.outputs import ChatGenerationChunk, ChatGeneration from langchain_core.runnables import RunnableConfig, ensure_config from langchain_core.utils.pydantic import is_basemodel_subclass from langchain_openai import ChatOpenAI -from langchain_openai.chat_models.base import _convert_chunk_to_generation_chunk from common.config.tokenizer_manage_config import TokenizerManage @@ -98,7 +97,7 @@ class BaseChatOpenAI(ChatOpenAI): if not isinstance(chunk, dict): chunk = chunk.model_dump() - generation_chunk = _convert_chunk_to_generation_chunk( + generation_chunk = super()._convert_chunk_to_generation_chunk( chunk, default_chunk_class, base_generation_info if is_first_chunk else {}, diff --git a/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py b/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py index 93c06fc0e..4106cc1d6 100644 --- a/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py +++ b/apps/setting/models_provider/impl/gemini_model_provider/model/llm.py @@ -9,16 +9,19 @@ from typing import List, Dict, Optional, Sequence, Union, Any, Iterator, cast from google.ai.generativelanguage_v1 import GenerateContentResponse -from google.generativeai.responder import ToolDict -from google.generativeai.types import FunctionDeclarationType, SafetySettingDict -from google.generativeai.types import Tool as GoogleTool +from google.ai.generativelanguage_v1beta.types import ( + Tool as GoogleTool, +) from langchain_core.callbacks import CallbackManagerForLLMRun from langchain_core.messages import BaseMessage from langchain_core.outputs import ChatGenerationChunk from langchain_google_genai import ChatGoogleGenerativeAI -from langchain_google_genai._function_utils import _ToolConfigDict -from langchain_google_genai.chat_models import _chat_with_retry, _response_to_result - +from langchain_google_genai._function_utils import _ToolConfigDict, _ToolDict +from langchain_google_genai.chat_models import _chat_with_retry, _response_to_result, \ + _FunctionDeclarationType +from langchain_google_genai._common import ( + SafetySettingDict, +) from setting.models_provider.base_model_provider import MaxKBBaseModel @@ -54,8 +57,8 @@ class GeminiChatModel(MaxKBBaseModel, ChatGoogleGenerativeAI): stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, *, - tools: Optional[Sequence[Union[ToolDict, GoogleTool]]] = None, - functions: Optional[Sequence[FunctionDeclarationType]] = None, + tools: Optional[Sequence[Union[_ToolDict, GoogleTool]]] = None, + functions: Optional[Sequence[_FunctionDeclarationType]] = None, safety_settings: Optional[SafetySettingDict] = None, tool_config: Optional[Union[Dict, _ToolConfigDict]] = None, generation_config: Optional[Dict[str, Any]] = None, diff --git a/apps/setting/models_provider/impl/local_model_provider/model/embedding.py b/apps/setting/models_provider/impl/local_model_provider/model/embedding.py index 820b93e5f..4d6c65b9f 100644 --- a/apps/setting/models_provider/impl/local_model_provider/model/embedding.py +++ b/apps/setting/models_provider/impl/local_model_provider/model/embedding.py @@ -10,7 +10,7 @@ from typing import Dict, List import requests from langchain_core.embeddings import Embeddings -from langchain_core.pydantic_v1 import BaseModel +from pydantic import BaseModel from langchain_huggingface import HuggingFaceEmbeddings from setting.models_provider.base_model_provider import MaxKBBaseModel diff --git a/apps/setting/models_provider/impl/local_model_provider/model/reranker.py b/apps/setting/models_provider/impl/local_model_provider/model/reranker.py index f5056b2cc..7d219c020 100644 --- a/apps/setting/models_provider/impl/local_model_provider/model/reranker.py +++ b/apps/setting/models_provider/impl/local_model_provider/model/reranker.py @@ -6,7 +6,7 @@ @date:2024/9/2 16:42 @desc: """ -from typing import Sequence, Optional, Dict, Any +from typing import Sequence, Optional, Dict, Any, ClassVar import requests import torch @@ -69,7 +69,7 @@ class LocalBaseReranker(MaxKBBaseModel, BaseDocumentCompressor): tokenizer: Any = None model: Optional[str] = None cache_dir: Optional[str] = None - model_kwargs = {} + model_kwargs: ClassVar = {} def __init__(self, model_name, cache_dir=None, **model_kwargs): super().__init__() diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py b/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py index f82c9a21a..9704537a5 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/reranker.py @@ -1,10 +1,11 @@ from typing import Sequence, Optional, Any, Dict + +from langchain_community.embeddings import OllamaEmbeddings from langchain_core.callbacks import Callbacks from langchain_core.documents import Document -from langchain_community.embeddings import OllamaEmbeddings from setting.models_provider.base_model_provider import MaxKBBaseModel from sklearn.metrics.pairwise import cosine_similarity -from pydantic.v1 import BaseModel, Field +from pydantic import BaseModel, Field class OllamaReranker(MaxKBBaseModel, OllamaEmbeddings, BaseModel): diff --git a/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py b/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py index 7e913f9ad..9055c4cb1 100644 --- a/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py +++ b/apps/setting/models_provider/impl/tencent_model_provider/model/hunyuan.py @@ -18,7 +18,7 @@ from langchain_core.messages import ( HumanMessageChunk, SystemMessage, ) from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult -from langchain_core.pydantic_v1 import Field, SecretStr, root_validator +from pydantic import Field, SecretStr, root_validator from langchain_core.utils import ( convert_to_secret_str, get_from_dict_or_env, @@ -137,7 +137,7 @@ class ChatHunyuan(BaseChatModel): class Config: """Configuration for this pydantic object.""" - allow_population_by_field_name = True + validate_by_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: diff --git a/pyproject.toml b/pyproject.toml index 863cc3434..39e02a839 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -12,9 +12,16 @@ django = "4.2.18" djangorestframework = "^3.15.2" drf-yasg = "1.21.7" django-filter = "23.2" -langchain = "0.2.16" -langchain_community = "0.2.17" -langchain-huggingface = "^0.0.3" +langchain-openai = "^0.3.0" +langchain-anthropic = "^0.3.0" +langchain-community = "^0.3.0" +langchain-deepseek = "^0.1.0" +langchain-google-genai = "^2.0.9" +langchain-mcp-adapters = "^0.0.5" +langchain-huggingface = "^0.1.2" +langchain-ollama = "^0.2.3" +langgraph = "^0.3.0" +mcp = "^1.4.1" psycopg2-binary = "2.9.10" jieba = "^0.42.1" diskcache = "^5.6.3" @@ -28,8 +35,6 @@ qianfan = "^0.3.6.1" pycryptodome = "^3.19.0" beautifulsoup4 = "^4.12.2" html2text = "^2024.2.26" -langchain-openai = "^0.1.8" -langchain-ollama = "0.1.3" django-ipware = "^6.0.4" django-apscheduler = "^0.6.2" pymupdf = "1.24.9" @@ -42,8 +47,6 @@ zhipuai = "^2.0.1" httpx = "^0.27.0" httpx-sse = "^0.4.0" websockets = "^13.0" -langchain-google-genai = "^1.0.3" -langchain-anthropic= "^0.1.0" openpyxl = "^3.1.2" xlrd = "^2.0.1" gunicorn = "^22.0.0" @@ -55,7 +58,7 @@ psutil = "^6.0.0" celery = { extras = ["sqlalchemy"], version = "^5.4.0" } django-celery-beat = "^2.6.0" celery-once = "^3.0.1" -anthropic = "^0.34.2" +anthropic = "^0.39.0" pylint = "3.1.0" pydub = "^0.25.1" cffi = "^1.17.1"