refactor: remove print

This commit is contained in:
wxg0103 2025-06-24 15:30:42 +08:00
parent 69e60b5800
commit c253e8b696
31 changed files with 35 additions and 60 deletions

View File

@ -7,6 +7,8 @@
@desc:
"""
import io
import logging
import uuid_utils.compat as uuid
from functools import reduce
from io import BytesIO
@ -73,7 +75,7 @@ def handle_images(deps, archive: ZipFile) -> []:
image_io = archive.read(dep.target)
image = openpyxl_Image(BytesIO(image_io))
except Exception as e:
print(e)
logging.getLogger("max_kb_error").error(f"Error reading image {dep.target}: {e}")
continue
image.embed = dep.id # 文件rId
image.target = dep.target # 文件地址

View File

@ -7,6 +7,7 @@
@desc:
"""
import io
import logging
import os
import re
import traceback
@ -78,7 +79,7 @@ def get_paragraph_element_txt(paragraph_element, doc: Document, images_list, get
return paragraph_element.text
return ""
except Exception as e:
print(e)
logging.getLogger("max_kb_error").error(f'Error getting paragraph element text: {e}')
return ""

View File

@ -173,6 +173,6 @@ class Fork:
def handler(base_url, response: Fork.Response):
print(base_url.url, base_url.tag.text if base_url.tag else None, response.content)
logging.getLogger("max_kb").info(base_url.url, base_url.tag.text if base_url.tag else None, response.content)
# ForkManage('https://bbs.fit2cloud.com/c/de/6', ['.md-content']).fork(3, set(), handler)

View File

@ -617,7 +617,7 @@ class KnowledgeSerializer(serializers.Serializer):
document_name = child_link.tag.text if child_link.tag is not None and len(
child_link.tag.text.strip()) > 0 else child_link.url
paragraphs = get_split_model('web.md').parse(response.content)
print(child_link.url.strip())
logging.getLogger("max_kb").info(child_link.url.strip())
first = QuerySet(Document).filter(
meta__source_url=child_link.url.strip(),
knowledge=knowledge

View File

@ -48,8 +48,6 @@ class QwenVLModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -109,7 +109,6 @@ class QwenTextToImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -6,10 +6,12 @@ from dashscope import ImageSynthesis
from django.utils.translation import gettext
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import HumanMessage
import logging
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tti import BaseTextToImage
max_kb_error = logging.getLogger("max_kb_error")
class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
api_key: str
@ -54,6 +56,6 @@ class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
for result in rsp.output.results:
file_urls.append(result.url)
else:
print('sync_call Failed, status_code: %s, code: %s, message: %s' %
(rsp.status_code, rsp.code, rsp.message))
max_kb_error.error('sync_call Failed, status_code: %s, code: %s, message: %s' %
(rsp.status_code, rsp.code, rsp.message))
return file_urls

View File

@ -49,7 +49,6 @@ class AliyunBaiLianTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
if audio is None:
raise Exception('Failed to generate audio')
if type(audio) == str:
print(audio)
raise Exception(audio)
return audio

View File

@ -31,8 +31,6 @@ class AnthropicImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext("Hello")}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -32,7 +32,6 @@ class BedrockEmbeddingCredential(BaseForm, BaseModelCredential):
try:
model: BedrockEmbeddingModel = provider.get_model(model_type, model_name, model_credential)
aa = model.embed_query(_('Hello'))
print(aa)
except AppApiException:
raise
except Exception as e:

View File

@ -38,8 +38,6 @@ class AzureOpenAIImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -66,7 +66,6 @@ class AzureOpenAITextToImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -30,8 +30,6 @@ class GeminiImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -55,7 +55,6 @@ class GeminiLLMModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.invoke([HumanMessage(content=gettext('Hello'))])
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -33,8 +33,6 @@ class OpenAIImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -69,7 +69,6 @@ class OpenAITextToImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -33,8 +33,6 @@ class SiliconCloudImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -69,7 +69,6 @@ class SiliconCloudTextToImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -81,7 +81,5 @@ class SiliconCloudTextToSpeech(MaxKBBaseModel, BaseTextToSpeech):
response = requests.request("POST", url, json=payload, headers=headers)
print(response.text)
def is_cache_model(self):
return False

View File

@ -35,8 +35,6 @@ class TencentVisionModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -1,6 +1,7 @@
# coding=utf-8
import json
import logging
from typing import Dict
from django.utils.translation import gettext as _
@ -13,6 +14,7 @@ from tencentcloud.hunyuan.v20230901 import hunyuan_client, models
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tti import BaseTextToImage
from models_provider.impl.tencent_model_provider.model.hunyuan import ChatHunyuan
max_kb_error = logging.getLogger("max_kb_error")
class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage):
@ -82,11 +84,9 @@ class TencentTextToImageModel(MaxKBBaseModel, BaseTextToImage):
# 返回的resp是一个TextToImageLiteResponse的实例与请求对象对应
resp = client.TextToImageLite(req)
# 输出json格式的字符串回包
print(resp.to_json_string())
file_urls = []
file_urls.append(resp.ResultImage)
return file_urls
except TencentCloudSDKException as err:
print(err)
max_kb_error.error(f"Tencent Text to Image API call failed: {err}")

View File

@ -30,8 +30,6 @@ class VllmImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": "你好"}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -30,8 +30,6 @@ class VolcanicEngineImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -55,7 +55,6 @@ class VolcanicEngineLLMModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.invoke([HumanMessage(content=gettext('Hello'))])
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -11,6 +11,7 @@ import base64
import gzip
import hmac
import json
import logging
import os
import ssl
import uuid_utils.compat as uuid
@ -24,6 +25,7 @@ import websockets
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_stt import BaseSpeechToText
max_kb_error = logging.getLogger("max_kb_error")
audio_format = "mp3" # wav 或者 mp3根据实际音频格式设置
@ -145,7 +147,7 @@ def parse_response(res):
result['code'] = code
payload_size = int.from_bytes(payload[4:8], "big", signed=False)
payload_msg = payload[8:]
print(f"Error code: {code}, message: {payload_msg}")
max_kb_error.error(f"Error code: {code}, message: {payload_msg}")
if payload_msg is None:
return result
if message_compression == GZIP:

View File

@ -12,6 +12,7 @@ import datetime
import hashlib
import hmac
import json
import logging
import sys
from typing import Dict
@ -34,6 +35,8 @@ req_key_dict = {
'anime_v1.3.1': 'high_aes',
}
max_kb = logging.getLogger("max_kb")
def sign(key, msg):
return hmac.new(key, msg.encode('utf-8'), hashlib.sha256).digest()
@ -57,7 +60,7 @@ def formatQuery(parameters):
def signV4Request(access_key, secret_key, service, req_query, req_body):
if access_key is None or secret_key is None:
print('No access key is available.')
max_kb.info('No access key is available.')
sys.exit()
t = datetime.datetime.utcnow()
@ -74,47 +77,46 @@ def signV4Request(access_key, secret_key, service, req_query, req_body):
'\n' + 'x-date:' + current_date + '\n'
canonical_request = method + '\n' + canonical_uri + '\n' + canonical_querystring + \
'\n' + canonical_headers + '\n' + signed_headers + '\n' + payload_hash
# print(canonical_request)
# max_kb.info(canonical_request)
algorithm = 'HMAC-SHA256'
credential_scope = datestamp + '/' + region + '/' + service + '/' + 'request'
string_to_sign = algorithm + '\n' + current_date + '\n' + credential_scope + '\n' + hashlib.sha256(
canonical_request.encode('utf-8')).hexdigest()
# print(string_to_sign)
# max_kb.info(string_to_sign)
signing_key = getSignatureKey(secret_key, datestamp, region, service)
# print(signing_key)
# max_kb.info(signing_key)
signature = hmac.new(signing_key, (string_to_sign).encode(
'utf-8'), hashlib.sha256).hexdigest()
# print(signature)
# max_kb.info(signature)
authorization_header = algorithm + ' ' + 'Credential=' + access_key + '/' + \
credential_scope + ', ' + 'SignedHeaders=' + \
signed_headers + ', ' + 'Signature=' + signature
# print(authorization_header)
# max_kb.info(authorization_header)
headers = {'X-Date': current_date,
'Authorization': authorization_header,
'X-Content-Sha256': payload_hash,
'Content-Type': content_type
}
# print(headers)
# max_kb.info(headers)
# ************* SEND THE REQUEST *************
request_url = endpoint + '?' + canonical_querystring
print('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++')
print('Request URL = ' + request_url)
max_kb.info('\nBEGIN REQUEST++++++++++++++++++++++++++++++++++++')
max_kb.info('Request URL = ' + request_url)
try:
r = requests.post(request_url, headers=headers, data=req_body)
except Exception as err:
print(f'error occurred: {err}')
max_kb.info(f'error occurred: {err}')
raise
else:
print('\nRESPONSE++++++++++++++++++++++++++++++++++++')
print(f'Response code: {r.status_code}\n')
max_kb.info('\nRESPONSE++++++++++++++++++++++++++++++++++++')
max_kb.info(f'Response code: {r.status_code}\n')
# 使用 replace 方法将 \u0026 替换为 &
resp_str = r.text.replace("\\u0026", "&")
if r.status_code != 200:
raise Exception(f'Error: {resp_str}')
print(f'Response body: {resp_str}\n')
return json.loads(resp_str)['data']['image_urls']
@ -146,7 +148,6 @@ class VolcanicEngineTextToImage(MaxKBBaseModel, BaseTextToImage):
def check_auth(self):
res = self.generate_image('生成一张小猫图片')
print(res)
def generate_image(self, prompt: str, negative_prompt: str = None):
# 请求Query按照接口文档中填入即可

View File

@ -30,8 +30,6 @@ class XinferenceImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
if isinstance(e, AppApiException):
raise e

View File

@ -67,7 +67,6 @@ class XinferenceTextToImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
if isinstance(e, AppApiException):
raise e

View File

@ -29,8 +29,6 @@ class ZhiPuImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -48,7 +48,6 @@ class ZhiPuTextToImageModelCredential(BaseForm, BaseModelCredential):
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):

View File

@ -1,3 +1,4 @@
import logging
from logging import StreamHandler
from threading import get_ident
@ -208,7 +209,7 @@ class CeleryThreadTaskFileHandler(CeleryThreadingLoggerHandler):
f.flush()
def handle_task_start(self, task_id):
print('handle_task_start')
logging.getLogger("max_kb").info('handle_task_start')
log_path = get_celery_task_log_path(task_id)
thread_id = self.get_current_thread_id()
self.task_id_thread_id_mapper[task_id] = thread_id