diff --git a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py index 82c8df92e..8a764dea8 100644 --- a/apps/setting/models_provider/impl/ollama_model_provider/model/image.py +++ b/apps/setting/models_provider/impl/ollama_model_provider/model/image.py @@ -1,4 +1,5 @@ from typing import Dict +from urllib.parse import urlparse, ParseResult from langchain_openai.chat_models import ChatOpenAI @@ -11,14 +12,25 @@ def custom_get_token_ids(text: str): return tokenizer.encode(text) +def get_base_url(url: str): + parse = urlparse(url) + result_url = ParseResult(scheme=parse.scheme, netloc=parse.netloc, path=parse.path, params='', + query='', + fragment='').geturl() + return result_url[:-1] if result_url.endswith("/") else result_url + + class OllamaImage(MaxKBBaseModel, ChatOpenAI): @staticmethod def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs): + api_base = model_credential.get('api_base', '') + base_url = get_base_url(api_base) + base_url = base_url if base_url.endswith('/v1') else (base_url + '/v1') optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs) return OllamaImage( model_name=model_name, - openai_api_base=model_credential.get('api_base'), + openai_api_base=base_url, openai_api_key=model_credential.get('api_key'), # stream_options={"include_usage": True}, streaming=True,