diff --git a/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py b/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py index 8d59deaa7..1aaf7b56c 100644 --- a/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py +++ b/apps/application/chat_pipeline/step/chat_step/impl/base_chat_step.py @@ -170,7 +170,7 @@ class BaseChatStep(IChatStep): return iter( [AIMessageChunk(content=no_references_setting.get('value').replace('{question}', problem_text))]), False if chat_model is None: - return iter([AIMessageChunk('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。')]), False + return iter([AIMessageChunk('抱歉,没有配置 AI 模型,请先去应用中设置 AI 模型。')]), False else: return chat_model.stream(message_list), True @@ -214,7 +214,7 @@ class BaseChatStep(IChatStep): 'status') == 'designated_answer': return AIMessage(no_references_setting.get('value').replace('{question}', problem_text)), False if chat_model is None: - return AIMessage('抱歉,没有配置 AI 模型,无法优化引用分段,请先去应用中设置 AI 模型。'), False + return AIMessage('抱歉,没有配置 AI 模型,请先去应用中设置 AI 模型。'), False else: return chat_model.invoke(message_list), True diff --git a/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py b/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py index e12fd082d..920cf626f 100644 --- a/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py +++ b/apps/application/chat_pipeline/step/reset_problem_step/i_reset_problem_step.py @@ -45,8 +45,9 @@ class IResetProblemStep(IBaseChatPipelineStep): manage.context['problem_text'] = source_problem_text manage.context['padding_problem_text'] = padding_problem # 累加tokens - manage.context['message_tokens'] = manage.context['message_tokens'] + self.context.get('message_tokens') - manage.context['answer_tokens'] = manage.context['answer_tokens'] + self.context.get('answer_tokens') + manage.context['message_tokens'] = manage.context.get('message_tokens', 0) + self.context.get('message_tokens', + 0) + manage.context['answer_tokens'] = manage.context.get('answer_tokens', 0) + self.context.get('answer_tokens', 0) @abstractmethod def execute(self, problem_text: str, history_chat_record: List[ChatRecord] = None, model_id: str = None, diff --git a/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py b/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py index 2d631e076..5e64f8b1f 100644 --- a/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py +++ b/apps/application/chat_pipeline/step/reset_problem_step/impl/base_reset_problem_step.py @@ -25,6 +25,8 @@ class BaseResetProblemStep(IResetProblemStep): user_id=None, **kwargs) -> str: chat_model = get_model_instance_by_model_user_id(model_id, user_id) if model_id is not None else None + if chat_model is None: + return problem_text start_index = len(history_chat_record) - 3 history_message = [[history_chat_record[index].get_human_message(), history_chat_record[index].get_ai_message()] for index in @@ -57,8 +59,8 @@ class BaseResetProblemStep(IResetProblemStep): 'step_type': 'problem_padding', 'run_time': self.context['run_time'], 'model_id': str(manage.context['model_id']) if 'model_id' in manage.context else None, - 'message_tokens': self.context['message_tokens'], - 'answer_tokens': self.context['answer_tokens'], + 'message_tokens': self.context.get('message_tokens', 0), + 'answer_tokens': self.context.get('answer_tokens', 0), 'cost': 0, 'padding_problem_text': self.context.get('padding_problem_text'), 'problem_text': self.context.get("step_args").get('problem_text'), diff --git a/apps/application/serializers/application_serializers.py b/apps/application/serializers/application_serializers.py index 41ba5050c..01a82a841 100644 --- a/apps/application/serializers/application_serializers.py +++ b/apps/application/serializers/application_serializers.py @@ -716,10 +716,11 @@ class ApplicationSerializer(serializers.Serializer): desc=application.get('desc'), prologue=application.get('prologue'), dialogue_number=application.get('dialogue_number'), dataset_setting=application.get('dataset_setting'), + model_setting=application.get('model_setting'), model_params_setting=application.get('model_params_setting'), tts_model_params_setting=application.get('tts_model_params_setting'), problem_optimization=application.get('problem_optimization'), - icon=application.get('icon'), + icon="/ui/favicon.ico", work_flow=work_flow, type=application.get('type'), problem_optimization_prompt=application.get('problem_optimization_prompt'),