diff --git a/apps/models_provider/impl/base_chat_open_ai.py b/apps/models_provider/impl/base_chat_open_ai.py index b4959f298..2d4119f55 100644 --- a/apps/models_provider/impl/base_chat_open_ai.py +++ b/apps/models_provider/impl/base_chat_open_ai.py @@ -16,7 +16,7 @@ from langchain_openai import ChatOpenAI from langchain_openai.chat_models.base import _create_usage_metadata from common.config.tokenizer_manage_config import TokenizerManage - +from common.utils.logger import maxkb_logger def custom_get_token_ids(text: str): tokenizer = TokenizerManage.get_tokenizer() @@ -103,13 +103,13 @@ class BaseChatOpenAI(ChatOpenAI): future = executor.submit(super().get_num_tokens_from_messages, messages, tools) try: response = future.result() - print("请求成功(未超时)") + maxkb_logger.info("请求成功(未超时)") return response except Exception as e: if isinstance(e, ReadTimeout): raise # 继续抛出 else: - print("except:", e) + maxkb_logger.error("except:", e) tokenizer = TokenizerManage.get_tokenizer() return sum([len(tokenizer.encode(get_buffer_string([m]))) for m in messages]) diff --git a/apps/models_provider/impl/xf_model_provider/model/stt.py b/apps/models_provider/impl/xf_model_provider/model/stt.py index b43320746..68f624961 100644 --- a/apps/models_provider/impl/xf_model_provider/model/stt.py +++ b/apps/models_provider/impl/xf_model_provider/model/stt.py @@ -159,7 +159,6 @@ class XFSparkSpeechToText(MaxKBBaseModel, BaseSpeechToText): "audio": str(base64.b64encode(buf), 'utf-8'), "encoding": "lame"} } - print(d) d = json.dumps(d) await ws.send(d) status = STATUS_CONTINUE_FRAME