diff --git a/apps/common/handle/impl/common_handle.py b/apps/common/handle/impl/common_handle.py index 48692f244..1a796fbe4 100644 --- a/apps/common/handle/impl/common_handle.py +++ b/apps/common/handle/impl/common_handle.py @@ -7,6 +7,8 @@ @desc: """ import io +import logging + import uuid_utils.compat as uuid from functools import reduce from io import BytesIO @@ -73,7 +75,7 @@ def handle_images(deps, archive: ZipFile) -> []: image_io = archive.read(dep.target) image = openpyxl_Image(BytesIO(image_io)) except Exception as e: - print(e) + logging.getLogger("max_kb_error").error(f"Error reading image {dep.target}: {e}") continue image.embed = dep.id # 文件rId image.target = dep.target # 文件地址 diff --git a/apps/common/handle/impl/text/doc_split_handle.py b/apps/common/handle/impl/text/doc_split_handle.py index 740f37c6c..2c7472b5d 100644 --- a/apps/common/handle/impl/text/doc_split_handle.py +++ b/apps/common/handle/impl/text/doc_split_handle.py @@ -7,6 +7,7 @@ @desc: """ import io +import logging import os import re import traceback @@ -78,7 +79,7 @@ def get_paragraph_element_txt(paragraph_element, doc: Document, images_list, get return paragraph_element.text return "" except Exception as e: - print(e) + logging.getLogger("max_kb_error").error(f'Error getting paragraph element text: {e}') return "" diff --git a/apps/common/utils/fork.py b/apps/common/utils/fork.py index 4405b9b76..0672e4f90 100644 --- a/apps/common/utils/fork.py +++ b/apps/common/utils/fork.py @@ -173,6 +173,6 @@ class Fork: def handler(base_url, response: Fork.Response): - print(base_url.url, base_url.tag.text if base_url.tag else None, response.content) + logging.getLogger("max_kb").info(base_url.url, base_url.tag.text if base_url.tag else None, response.content) # ForkManage('https://bbs.fit2cloud.com/c/de/6', ['.md-content']).fork(3, set(), handler) diff --git a/apps/knowledge/serializers/knowledge.py b/apps/knowledge/serializers/knowledge.py index 05f606efc..693eae6bd 100644 --- a/apps/knowledge/serializers/knowledge.py +++ b/apps/knowledge/serializers/knowledge.py @@ -617,7 +617,7 @@ class KnowledgeSerializer(serializers.Serializer): document_name = child_link.tag.text if child_link.tag is not None and len( child_link.tag.text.strip()) > 0 else child_link.url paragraphs = get_split_model('web.md').parse(response.content) - print(child_link.url.strip()) + logging.getLogger("max_kb").info(child_link.url.strip()) first = QuerySet(Document).filter( meta__source_url=child_link.url.strip(), knowledge=knowledge diff --git a/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py b/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py index 903424fe1..cf91d1622 100644 --- a/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py +++ b/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/image.py @@ -48,8 +48,6 @@ class QwenVLModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py b/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py index d256d6ce8..5731b5a6d 100644 --- a/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py +++ b/apps/models_provider/impl/aliyun_bai_lian_model_provider/credential/tti.py @@ -109,7 +109,6 @@ class QwenTextToImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.check_auth() - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py b/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py index 48db77831..3e25f47c9 100644 --- a/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py +++ b/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tti.py @@ -6,10 +6,12 @@ from dashscope import ImageSynthesis from django.utils.translation import gettext from langchain_community.chat_models import ChatTongyi from langchain_core.messages import HumanMessage - +import logging from models_provider.base_model_provider import MaxKBBaseModel from models_provider.impl.base_tti import BaseTextToImage +max_kb_error = logging.getLogger("max_kb_error") + class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): api_key: str @@ -54,6 +56,6 @@ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage): for result in rsp.output.results: file_urls.append(result.url) else: - print('sync_call Failed, status_code: %s, code: %s, message: %s' % - (rsp.status_code, rsp.code, rsp.message)) + max_kb_error.error('sync_call Failed, status_code: %s, code: %s, message: %s' % + (rsp.status_code, rsp.code, rsp.message)) return file_urls diff --git a/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py b/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py index a42d1824c..cceabce41 100644 --- a/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py +++ b/apps/models_provider/impl/aliyun_bai_lian_model_provider/model/tts.py @@ -49,7 +49,6 @@ class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): if audio is None: raise Exception('Failed to generate audio') if type(audio) == str: - print(audio) raise Exception(audio) return audio diff --git a/apps/models_provider/impl/anthropic_model_provider/credential/image.py b/apps/models_provider/impl/anthropic_model_provider/credential/image.py index 50bec62ce..1d23e3688 100644 --- a/apps/models_provider/impl/anthropic_model_provider/credential/image.py +++ b/apps/models_provider/impl/anthropic_model_provider/credential/image.py @@ -31,8 +31,6 @@ class AnthropicImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext("Hello")}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py b/apps/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py index 6a7192a92..0bb5e52af 100644 --- a/apps/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py +++ b/apps/models_provider/impl/aws_bedrock_model_provider/credential/embedding.py @@ -32,7 +32,6 @@ class BedrockEmbeddingCredential(BaseForm, BaseModelCredential): try: model: BedrockEmbeddingModel = provider.get_model(model_type, model_name, model_credential) aa = model.embed_query(_('Hello')) - print(aa) except AppApiException: raise except Exception as e: diff --git a/apps/models_provider/impl/azure_model_provider/credential/image.py b/apps/models_provider/impl/azure_model_provider/credential/image.py index fb2e7c0fd..e0322e043 100644 --- a/apps/models_provider/impl/azure_model_provider/credential/image.py +++ b/apps/models_provider/impl/azure_model_provider/credential/image.py @@ -38,8 +38,6 @@ class AzureOpenAIImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/azure_model_provider/credential/tti.py b/apps/models_provider/impl/azure_model_provider/credential/tti.py index bd53d6574..a4eef6b61 100644 --- a/apps/models_provider/impl/azure_model_provider/credential/tti.py +++ b/apps/models_provider/impl/azure_model_provider/credential/tti.py @@ -66,7 +66,6 @@ class AzureOpenAITextToImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.check_auth() - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/gemini_model_provider/credential/image.py b/apps/models_provider/impl/gemini_model_provider/credential/image.py index a7919b230..a83551bca 100644 --- a/apps/models_provider/impl/gemini_model_provider/credential/image.py +++ b/apps/models_provider/impl/gemini_model_provider/credential/image.py @@ -30,8 +30,6 @@ class GeminiImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/gemini_model_provider/credential/llm.py b/apps/models_provider/impl/gemini_model_provider/credential/llm.py index 1bc6f5750..f369eb309 100644 --- a/apps/models_provider/impl/gemini_model_provider/credential/llm.py +++ b/apps/models_provider/impl/gemini_model_provider/credential/llm.py @@ -55,7 +55,6 @@ class GeminiLLMModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.invoke([HumanMessage(content=gettext('Hello'))]) - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/openai_model_provider/credential/image.py b/apps/models_provider/impl/openai_model_provider/credential/image.py index a3177f30b..85519a960 100644 --- a/apps/models_provider/impl/openai_model_provider/credential/image.py +++ b/apps/models_provider/impl/openai_model_provider/credential/image.py @@ -33,8 +33,6 @@ class OpenAIImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/openai_model_provider/credential/tti.py b/apps/models_provider/impl/openai_model_provider/credential/tti.py index a266d9b87..e999f385c 100644 --- a/apps/models_provider/impl/openai_model_provider/credential/tti.py +++ b/apps/models_provider/impl/openai_model_provider/credential/tti.py @@ -69,7 +69,6 @@ class OpenAITextToImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.check_auth() - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/siliconCloud_model_provider/credential/image.py b/apps/models_provider/impl/siliconCloud_model_provider/credential/image.py index 584daeaae..6aefdf50c 100644 --- a/apps/models_provider/impl/siliconCloud_model_provider/credential/image.py +++ b/apps/models_provider/impl/siliconCloud_model_provider/credential/image.py @@ -33,8 +33,6 @@ class SiliconCloudImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/siliconCloud_model_provider/credential/tti.py b/apps/models_provider/impl/siliconCloud_model_provider/credential/tti.py index 7a29d6e44..c90cf135a 100644 --- a/apps/models_provider/impl/siliconCloud_model_provider/credential/tti.py +++ b/apps/models_provider/impl/siliconCloud_model_provider/credential/tti.py @@ -69,7 +69,6 @@ class SiliconCloudTextToImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.check_auth() - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/siliconCloud_model_provider/model/tts.py b/apps/models_provider/impl/siliconCloud_model_provider/model/tts.py index 9d3bb77f5..b5ddd6c86 100644 --- a/apps/models_provider/impl/siliconCloud_model_provider/model/tts.py +++ b/apps/models_provider/impl/siliconCloud_model_provider/model/tts.py @@ -81,7 +81,5 @@ class SiliconCloudTextToSpeech(MaxKBBaseModel, BaseTextToSpeech): response = requests.request("POST", url, json=payload, headers=headers) - print(response.text) - def is_cache_model(self): return False diff --git a/apps/models_provider/impl/tencent_model_provider/credential/image.py b/apps/models_provider/impl/tencent_model_provider/credential/image.py index 67701ae01..4ad6fa83b 100644 --- a/apps/models_provider/impl/tencent_model_provider/credential/image.py +++ b/apps/models_provider/impl/tencent_model_provider/credential/image.py @@ -35,8 +35,6 @@ class TencentVisionModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/tencent_model_provider/model/tti.py b/apps/models_provider/impl/tencent_model_provider/model/tti.py index d737f5f85..bad4433b8 100644 --- a/apps/models_provider/impl/tencent_model_provider/model/tti.py +++ b/apps/models_provider/impl/tencent_model_provider/model/tti.py @@ -1,6 +1,7 @@ # coding=utf-8 import json +import logging from typing import Dict from django.utils.translation import gettext as _ @@ -13,6 +14,7 @@ from tencentcloud.hunyuan.v20230901 import hunyuan_client, models from models_provider.base_model_provider import MaxKBBaseModel from models_provider.impl.base_tti import BaseTextToImage from models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan +max_kb_error = logging.getLogger("max_kb_error") class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage): @@ -82,11 +84,9 @@ class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage): # 返回的resp是一个TextToImageLiteResponse的实例,与请求对象对应 resp = client.TextToImageLite(req) - # 输出json格式的字符串回包 - print(resp.to_json_string()) file_urls = [] file_urls.append(resp.ResultImage) return file_urls except TencentCloudSDKException as err: - print(err) + max_kb_error.error(f"Tencent Text to Image API call failed: {err}") diff --git a/apps/models_provider/impl/vllm_model_provider/credential/image.py b/apps/models_provider/impl/vllm_model_provider/credential/image.py index 62ab56881..bcafece89 100644 --- a/apps/models_provider/impl/vllm_model_provider/credential/image.py +++ b/apps/models_provider/impl/vllm_model_provider/credential/image.py @@ -30,8 +30,6 @@ class VllmImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/volcanic_engine_model_provider/credential/image.py b/apps/models_provider/impl/volcanic_engine_model_provider/credential/image.py index 753664bc1..2670719c3 100644 --- a/apps/models_provider/impl/volcanic_engine_model_provider/credential/image.py +++ b/apps/models_provider/impl/volcanic_engine_model_provider/credential/image.py @@ -30,8 +30,6 @@ class VolcanicEngineImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/volcanic_engine_model_provider/credential/llm.py b/apps/models_provider/impl/volcanic_engine_model_provider/credential/llm.py index 70c373a35..bc15cff4c 100644 --- a/apps/models_provider/impl/volcanic_engine_model_provider/credential/llm.py +++ b/apps/models_provider/impl/volcanic_engine_model_provider/credential/llm.py @@ -55,7 +55,6 @@ class VolcanicEngineLLMModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.invoke([HumanMessage(content=gettext('Hello'))]) - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/volcanic_engine_model_provider/model/stt.py b/apps/models_provider/impl/volcanic_engine_model_provider/model/stt.py index 35e9167cf..35890251e 100644 --- a/apps/models_provider/impl/volcanic_engine_model_provider/model/stt.py +++ b/apps/models_provider/impl/volcanic_engine_model_provider/model/stt.py @@ -11,6 +11,7 @@ import base64 import gzip import hmac import json +import logging import os import ssl import uuid_utils.compat as uuid @@ -24,6 +25,7 @@ import websockets from models_provider.base_model_provider import MaxKBBaseModel from models_provider.impl.base_stt import BaseSpeechToText +max_kb_error = logging.getLogger("max_kb_error") audio_format = "mp3" # wav 或者 mp3,根据实际音频格式设置 @@ -145,7 +147,7 @@ def parse_response(res): result['code'] = code payload_size = int.from_bytes(payload[4:8], "big", signed=False) payload_msg = payload[8:] - print(f"Error code: {code}, message: {payload_msg}") + max_kb_error.error(f"Error code: {code}, message: {payload_msg}") if payload_msg is None: return result if message_compression == GZIP: diff --git a/apps/models_provider/impl/volcanic_engine_model_provider/model/tti.py b/apps/models_provider/impl/volcanic_engine_model_provider/model/tti.py index 9b478b442..b07200a19 100644 --- a/apps/models_provider/impl/volcanic_engine_model_provider/model/tti.py +++ b/apps/models_provider/impl/volcanic_engine_model_provider/model/tti.py @@ -12,6 +12,7 @@ import datetime import hashlib import hmac import json +import logging import sys from typing import Dict @@ -34,6 +35,8 @@ req_key_dict = { 'anime_v1.3.1': 'high_aes', } +max_kb = logging.getLogger("max_kb") + def sign(key, msg): return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest() @@ -57,7 +60,7 @@ def formatQuery(parameters): def signV4Request(access_key, secret_key, service, req_query, req_body): if access_key is None or secret_key is None: - print('No access key is available.') + max_kb.info('No access key is available.') sys.exit() t = datetime.datetime.utcnow() @@ -74,47 +77,46 @@ def signV4Request(access_key, secret_key, service, req_query, req_body): '\n' + 'x-date:' + current_date + '\n' canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + \ '\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash - # print(canonical_request) + # max_kb.info(canonical_request) algorithm = 'HMAC-SHA256' credential_scope = datestamp + '/' + region + '/' + service + '/' + 'request' string_to_sign = algorithm + '\n' + current_date + '\n' + credential_scope + '\n' + hashlib.sha256( canonical_request.encode('utf-8')).hexdigest() - # print(string_to_sign) + # max_kb.info(string_to_sign) signing_key = getSignatureKey(secret_key, datestamp, region, service) - # print(signing_key) + # max_kb.info(signing_key) signature = hmac.new(signing_key, (string_to_sign).encode( 'utf-8'), hashlib.sha256).hexdigest() - # print(signature) + # max_kb.info(signature) authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + \ credential_scope + ', ' + 'SignedHeaders=' + \ signed_headers + ', ' + 'Signature=' + signature - # print(authorization_header) + # max_kb.info(authorization_header) headers = {'X-Date': current_date, 'Authorization': authorization_header, 'X-Content-Sha256': payload_hash, 'Content-Type': content_type } - # print(headers) + # max_kb.info(headers) # ************* SEND THE REQUEST ************* request_url = endpoint + '?' + canonical_querystring - print('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++') - print('Request URL = ' + request_url) + max_kb.info('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++') + max_kb.info('Request URL = ' + request_url) try: r = requests.post(request_url, headers=headers, data=req_body) except Exception as err: - print(f'error occurred: {err}') + max_kb.info(f'error occurred: {err}') raise else: - print('\nRESPONSE++++++++++++++++++++++++++++++++++++') - print(f'Response code: {r.status_code}\n') + max_kb.info('\nRESPONSE++++++++++++++++++++++++++++++++++++') + max_kb.info(f'Response code: {r.status_code}\n') # 使用 replace 方法将 \u0026 替换为 & resp_str = r.text.replace("\\u0026", "&") if r.status_code != 200: raise Exception(f'Error: {resp_str}') - print(f'Response body: {resp_str}\n') return json.loads(resp_str)['data']['image_urls'] @@ -146,7 +148,6 @@ class VolcanicEngineTextToImage(MaxKBBaseModel, BaseTextToImage): def check_auth(self): res = self.generate_image('生成一张小猫图片') - print(res) def generate_image(self, prompt: str, negative_prompt: str = None): # 请求Query,按照接口文档中填入即可 diff --git a/apps/models_provider/impl/xinference_model_provider/credential/image.py b/apps/models_provider/impl/xinference_model_provider/credential/image.py index 0bbfb1622..e9ebbf7e2 100644 --- a/apps/models_provider/impl/xinference_model_provider/credential/image.py +++ b/apps/models_provider/impl/xinference_model_provider/credential/image.py @@ -30,8 +30,6 @@ class XinferenceImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: if isinstance(e, AppApiException): raise e diff --git a/apps/models_provider/impl/xinference_model_provider/credential/tti.py b/apps/models_provider/impl/xinference_model_provider/credential/tti.py index 20e899493..2abc4f691 100644 --- a/apps/models_provider/impl/xinference_model_provider/credential/tti.py +++ b/apps/models_provider/impl/xinference_model_provider/credential/tti.py @@ -67,7 +67,6 @@ class XinferenceTextToImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.check_auth() - print(res) except Exception as e: if isinstance(e, AppApiException): raise e diff --git a/apps/models_provider/impl/zhipu_model_provider/credential/image.py b/apps/models_provider/impl/zhipu_model_provider/credential/image.py index 34fd599bd..5e28ddc27 100644 --- a/apps/models_provider/impl/zhipu_model_provider/credential/image.py +++ b/apps/models_provider/impl/zhipu_model_provider/credential/image.py @@ -29,8 +29,6 @@ class ZhiPuImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])]) - for chunk in res: - print(chunk) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/models_provider/impl/zhipu_model_provider/credential/tti.py b/apps/models_provider/impl/zhipu_model_provider/credential/tti.py index 82cf3a483..9cca94dab 100644 --- a/apps/models_provider/impl/zhipu_model_provider/credential/tti.py +++ b/apps/models_provider/impl/zhipu_model_provider/credential/tti.py @@ -48,7 +48,6 @@ class ZhiPuTextToImageModelCredential(BaseForm, BaseModelCredential): try: model = provider.get_model(model_type, model_name, model_credential, **model_params) res = model.check_auth() - print(res) except Exception as e: traceback.print_exc() if isinstance(e, AppApiException): diff --git a/apps/ops/celery/logger.py b/apps/ops/celery/logger.py index 1b2843c2b..9ac47511b 100644 --- a/apps/ops/celery/logger.py +++ b/apps/ops/celery/logger.py @@ -1,3 +1,4 @@ +import logging from logging import StreamHandler from threading import get_ident @@ -208,7 +209,7 @@ class CeleryThreadTaskFileHandler(CeleryThreadingLoggerHandler): f.flush() def handle_task_start(self, task_id): - print('handle_task_start') + logging.getLogger("max_kb").info('handle_task_start') log_path = get_celery_task_log_path(task_id) thread_id = self.get_current_thread_id() self.task_id_thread_id_mapper[task_id] = thread_id