feat: remove qwen

This commit is contained in:
wxg0103 2025-04-22 14:35:09 +08:00
parent 6d4f5dc1c3
commit 55705593a9
10 changed files with 0 additions and 444 deletions

View File

@ -12,7 +12,6 @@ from models_provider.impl.kimi_model_provider.kimi_model_provider import KimiMod
from models_provider.impl.local_model_provider.local_model_provider import LocalModelProvider
from models_provider.impl.ollama_model_provider.ollama_model_provider import OllamaModelProvider
from models_provider.impl.openai_model_provider.openai_model_provider import OpenAIModelProvider
from models_provider.impl.qwen_model_provider.qwen_model_provider import QwenModelProvider
from models_provider.impl.siliconCloud_model_provider.siliconCloud_model_provider import SiliconCloudModelProvider
from models_provider.impl.tencent_cloud_model_provider.tencent_cloud_model_provider import TencentCloudModelProvider
from models_provider.impl.tencent_model_provider.tencent_model_provider import TencentModelProvider
@ -31,7 +30,6 @@ class ModelProvideConstants(Enum):
model_ollama_provider = OllamaModelProvider()
model_openai_provider = OpenAIModelProvider()
model_kimi_provider = KimiModelProvider()
model_qwen_provider = QwenModelProvider()
model_zhipu_provider = ZhiPuModelProvider()
model_xf_provider = XunFeiModelProvider()
model_deepseek_provider = DeepSeekModelProvider()

View File

@ -1,8 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file __init__.py.py
@date2023/10/31 17:16
@desc:
"""

View File

@ -1,78 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file llm.py
@date2024/7/11 18:41
@desc:
"""
import traceback
from typing import Dict
from django.utils.translation import gettext_lazy as _, gettext
from langchain_core.messages import HumanMessage
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from models_provider.base_model_provider import BaseModelCredential, ValidCode
class QwenModelParams(BaseForm):
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
required=True, default_value=1.0,
_min=0.1,
_max=1.9,
_step=0.01,
precision=2)
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=800,
_min=1,
_max=100000,
_step=1,
precision=0)
class QwenVLModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.stream([HumanMessage(content=[{"type": "text", "text": gettext('Hello')}])])
for chunk in res:
print(chunk)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_key = forms.PasswordInputField('API Key', required=True)
def get_model_params_setting_form(self, model_name):
return QwenModelParams()

View File

@ -1,76 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file llm.py
@date2024/7/11 18:41
@desc:
"""
import traceback
from typing import Dict
from django.utils.translation import gettext_lazy as _, gettext
from langchain_core.messages import HumanMessage
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from models_provider.base_model_provider import BaseModelCredential, ValidCode
class QwenModelParams(BaseForm):
temperature = forms.SliderField(TooltipLabel(_('Temperature'),
_('Higher values make the output more random, while lower values make it more focused and deterministic')),
required=True, default_value=1.0,
_min=0.1,
_max=1.9,
_step=0.01,
precision=2)
max_tokens = forms.SliderField(
TooltipLabel(_('Output the maximum Tokens'),
_('Specify the maximum number of tokens that the model can generate')),
required=True, default_value=800,
_min=1,
_max=100000,
_step=1,
precision=0)
class OpenAILLMModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
model.invoke([HumanMessage(content=gettext('Hello'))])
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_key = forms.PasswordInputField('API Key', required=True)
def get_model_params_setting_form(self, model_name):
return QwenModelParams()

View File

@ -1,98 +0,0 @@
# coding=utf-8
"""
@project: MaxKB
@Author
@file llm.py
@date2024/7/11 18:41
@desc:
"""
import traceback
from typing import Dict
from django.utils.translation import gettext_lazy as _, gettext
from common import forms
from common.exception.app_exception import AppApiException
from common.forms import BaseForm, TooltipLabel
from models_provider.base_model_provider import BaseModelCredential, ValidCode
class QwenModelParams(BaseForm):
size = forms.SingleSelect(
TooltipLabel(_('Image size'), _('Specify the size of the generated image, such as: 1024x1024')),
required=True,
default_value='1024*1024',
option_list=[
{'value': '1024*1024', 'label': '1024*1024'},
{'value': '720*1280', 'label': '720*1280'},
{'value': '768*1152', 'label': '768*1152'},
{'value': '1280*720', 'label': '1280*720'},
],
text_field='label',
value_field='value')
n = forms.SliderField(
TooltipLabel(_('Number of pictures'), _('Specify the number of generated images')),
required=True, default_value=1,
_min=1,
_max=4,
_step=1,
precision=0)
style = forms.SingleSelect(
TooltipLabel(_('Style'), _('Specify the style of generated images')),
required=True,
default_value='<auto>',
option_list=[
{'value': '<auto>', 'label': _('Default value, the image style is randomly output by the model')},
{'value': '<photography>', 'label': _('photography')},
{'value': '<portrait>', 'label': _('Portraits')},
{'value': '<3d cartoon>', 'label': _('3D cartoon')},
{'value': '<anime>', 'label': _('animation')},
{'value': '<oil painting>', 'label': _('painting')},
{'value': '<watercolor>', 'label': _('watercolor')},
{'value': '<sketch>', 'label': _('sketch')},
{'value': '<chinese painting>', 'label': _('Chinese painting')},
{'value': '<flat illustration>', 'label': _('flat illustration')},
],
text_field='label',
value_field='value'
)
class QwenTextToImageModelCredential(BaseForm, BaseModelCredential):
def is_valid(self, model_type: str, model_name, model_credential: Dict[str, object], model_params, provider,
raise_exception=False):
model_type_list = provider.get_model_type_list()
if not any(list(filter(lambda mt: mt.get('value') == model_type, model_type_list))):
raise AppApiException(ValidCode.valid_error.value,
gettext('{model_type} Model type is not supported').format(model_type=model_type))
for key in ['api_key']:
if key not in model_credential:
if raise_exception:
raise AppApiException(ValidCode.valid_error.value, gettext('{key} is required').format(key=key))
else:
return False
try:
model = provider.get_model(model_type, model_name, model_credential, **model_params)
res = model.check_auth()
print(res)
except Exception as e:
traceback.print_exc()
if isinstance(e, AppApiException):
raise e
if raise_exception:
raise AppApiException(ValidCode.valid_error.value,
gettext(
'Verification failed, please check whether the parameters are correct: {error}').format(
error=str(e)))
else:
return False
return True
def encryption_dict(self, model: Dict[str, object]):
return {**model, 'api_key': super().encryption(model.get('api_key', ''))}
api_key = forms.PasswordInputField('API Key', required=True)
def get_model_params_setting_form(self, model_name):
return QwenModelParams()

File diff suppressed because one or more lines are too long

Before

Width:  |  Height:  |  Size: 14 KiB

View File

@ -1,26 +0,0 @@
# coding=utf-8
from typing import Dict
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class QwenVLChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
chat_tong_yi = QwenVLChatModel(
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
# stream_options={"include_usage": True},
streaming=True,
stream_usage=True,
**optional_params,
)
return chat_tong_yi

View File

@ -1,31 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file llm.py
@date2024/4/28 11:44
@desc:
"""
from typing import Dict
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_chat_open_ai import BaseChatOpenAI
class QwenChatModel(MaxKBBaseModel, BaseChatOpenAI):
@staticmethod
def is_cache_model():
return False
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = MaxKBBaseModel.filter_optional_params(model_kwargs)
chat_tong_yi = QwenChatModel(
model_name=model_name,
openai_api_key=model_credential.get('api_key'),
openai_api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
streaming=True,
stream_usage=True,
**optional_params,
)
return chat_tong_yi

View File

@ -1,59 +0,0 @@
# coding=utf-8
from http import HTTPStatus
from typing import Dict
from dashscope import ImageSynthesis
from django.utils.translation import gettext
from langchain_community.chat_models import ChatTongyi
from langchain_core.messages import HumanMessage
from models_provider.base_model_provider import MaxKBBaseModel
from models_provider.impl.base_tti import BaseTextToImage
class QwenTextToImageModel(MaxKBBaseModel, BaseTextToImage):
api_key: str
model_name: str
params: dict
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.api_key = kwargs.get('api_key')
self.model_name = kwargs.get('model_name')
self.params = kwargs.get('params')
@staticmethod
def new_instance(model_type, model_name, model_credential: Dict[str, object], **model_kwargs):
optional_params = {'params': {'size': '1024*1024', 'style': '<auto>', 'n': 1}}
for key, value in model_kwargs.items():
if key not in ['model_id', 'use_local', 'streaming']:
optional_params['params'][key] = value
chat_tong_yi = QwenTextToImageModel(
model_name=model_name,
api_key=model_credential.get('api_key'),
**optional_params,
)
return chat_tong_yi
def is_cache_model(self):
return False
def check_auth(self):
chat = ChatTongyi(api_key=self.api_key, model_name='qwen-max')
chat.invoke([HumanMessage([{"type": "text", "text": gettext('Hello')}])])
def generate_image(self, prompt: str, negative_prompt: str = None):
# api_base='https://dashscope.aliyuncs.com/compatible-mode/v1',
rsp = ImageSynthesis.call(api_key=self.api_key,
model=self.model_name,
prompt=prompt,
negative_prompt=negative_prompt,
**self.params)
file_urls = []
if rsp.status_code == HTTPStatus.OK:
for result in rsp.output.results:
file_urls.append(result.url)
else:
print('sync_call Failed, status_code: %s, code: %s, message: %s' %
(rsp.status_code, rsp.code, rsp.message))
return file_urls

View File

@ -1,65 +0,0 @@
# coding=utf-8
"""
@project: maxkb
@Author
@file qwen_model_provider.py
@date2023/10/31 16:19
@desc:
"""
import os
from common.utils.common import get_file_content
from models_provider.base_model_provider import ModelProvideInfo, ModelTypeConst, ModelInfo, IModelProvider, \
ModelInfoManage
from models_provider.impl.qwen_model_provider.credential.image import QwenVLModelCredential
from models_provider.impl.qwen_model_provider.credential.llm import OpenAILLMModelCredential
from models_provider.impl.qwen_model_provider.credential.tti import QwenTextToImageModelCredential
from models_provider.impl.qwen_model_provider.model.image import QwenVLChatModel
from models_provider.impl.qwen_model_provider.model.llm import QwenChatModel
from models_provider.impl.qwen_model_provider.model.tti import QwenTextToImageModel
from maxkb.conf import PROJECT_DIR
from django.utils.translation import gettext as _
qwen_model_credential = OpenAILLMModelCredential()
qwenvl_model_credential = QwenVLModelCredential()
qwentti_model_credential = QwenTextToImageModelCredential()
module_info_list = [
ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel),
ModelInfo('qwen-plus', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel),
ModelInfo('qwen-max', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel)
]
module_info_vl_list = [
ModelInfo('qwen-vl-max', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
ModelInfo('qwen-vl-max-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
ModelInfo('qwen-vl-plus-0809', '', ModelTypeConst.IMAGE, qwenvl_model_credential, QwenVLChatModel),
]
module_info_tti_list = [
ModelInfo('wanx-v1',
_('Tongyi Wanxiang - a large image model for text generation, supports bilingual input in Chinese and English, and supports the input of reference pictures for reference content or reference style migration. Key styles include but are not limited to watercolor, oil painting, Chinese painting, sketch, flat illustration, two-dimensional, and 3D. Cartoon.'),
ModelTypeConst.TTI, qwentti_model_credential, QwenTextToImageModel),
]
model_info_manage = (
ModelInfoManage.builder()
.append_model_info_list(module_info_list)
.append_default_model_info(
ModelInfo('qwen-turbo', '', ModelTypeConst.LLM, qwen_model_credential, QwenChatModel))
.append_model_info_list(module_info_vl_list)
.append_default_model_info(module_info_vl_list[0])
.append_model_info_list(module_info_tti_list)
.append_default_model_info(module_info_tti_list[0])
.build()
)
class QwenModelProvider(IModelProvider):
def get_model_info_manage(self):
return model_info_manage
def get_model_provide_info(self):
return ModelProvideInfo(provider='model_qwen_provider', name=_('Tongyi Qianwen'), icon=get_file_content(
os.path.join(PROJECT_DIR, "apps", 'models_provider', 'impl', 'qwen_model_provider', 'icon',
'qwen_icon_svg')))