This commit is contained in:
liqiang-fit2cloud 2024-12-05 13:49:16 +08:00
commit d6a629369f
3 changed files with 34 additions and 18 deletions

View File

@ -57,13 +57,20 @@ def write_context(node_variable: Dict, workflow_variable: Dict, node: INode, wor
_write_context(node_variable, workflow_variable, node, workflow, answer)
def file_id_to_base64(file_id: str):
file = QuerySet(File).filter(id=file_id).first()
base64_image = base64.b64encode(file.get_byte()).decode("utf-8")
return base64_image
class BaseImageUnderstandNode(IImageUnderstandNode):
def save_context(self, details, workflow_manage):
self.context['answer'] = details.get('answer')
self.context['question'] = details.get('question')
self.answer_text = details.get('answer')
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id, chat_record_id,
def execute(self, model_id, system, prompt, dialogue_number, dialogue_type, history_chat_record, stream, chat_id,
chat_record_id,
image,
**kwargs) -> NodeResult:
# 处理不正确的参数
@ -72,12 +79,13 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
image_model = get_model_instance_by_model_user_id(model_id, self.flow_params_serializer.data.get('user_id'))
# 执行详情中的历史消息不需要图片内容
history_message =self.get_history_message_for_details(history_chat_record, dialogue_number)
history_message = self.get_history_message_for_details(history_chat_record, dialogue_number)
self.context['history_message'] = history_message
question = self.generate_prompt_question(prompt)
self.context['question'] = question.content
# 生成消息列表, 真实的history_message
message_list = self.generate_message_list(image_model, system, prompt, self.get_history_message(history_chat_record, dialogue_number), image)
message_list = self.generate_message_list(image_model, system, prompt,
self.get_history_message(history_chat_record, dialogue_number), image)
self.context['message_list'] = message_list
self.context['image_list'] = image
self.context['dialogue_type'] = dialogue_type
@ -92,11 +100,11 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
'history_message': history_message, 'question': question.content}, {},
_write_context=write_context)
def get_history_message_for_details(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message_for_details(history_chat_record[index]), self.generate_history_ai_message(history_chat_record[index])]
[self.generate_history_human_message_for_details(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
@ -115,17 +123,19 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
file_id = image_list[0]['file_id']
file_id_list = [image.get('file_id') for image in image_list]
return HumanMessage(content=[
{'type': 'text', 'text': data['question']},
{'type': 'image_url', 'image_url': {'url': f'/api/file/{file_id}'}},
])
{'type': 'text', 'text': data['question']},
*[{'type': 'image_url', 'image_url': {'url': f'/api/file/{file_id}'}} for file_id in file_id_list]
])
return HumanMessage(content=chat_record.problem_text)
def get_history_message(self, history_chat_record, dialogue_number):
start_index = len(history_chat_record) - dialogue_number
history_message = reduce(lambda x, y: [*x, *y], [
[self.generate_history_human_message(history_chat_record[index]), self.generate_history_ai_message(history_chat_record[index])]
[self.generate_history_human_message(history_chat_record[index]),
self.generate_history_ai_message(history_chat_record[index])]
for index in
range(start_index if start_index > 0 else 0, len(history_chat_record))], [])
return history_message
@ -137,13 +147,12 @@ class BaseImageUnderstandNode(IImageUnderstandNode):
image_list = data['image_list']
if len(image_list) == 0 or data['dialogue_type'] == 'WORKFLOW':
return HumanMessage(content=chat_record.problem_text)
file_id = image_list[0]['file_id']
file = QuerySet(File).filter(id=file_id).first()
base64_image = base64.b64encode(file.get_byte()).decode("utf-8")
image_base64_list = [file_id_to_base64(image.get('file_id')) for image in image_list]
return HumanMessage(
content=[
{'type': 'text', 'text': data['question']},
{'type': 'image_url', 'image_url': {'url': f'data:image/jpeg;base64,{base64_image}'}},
*[{'type': 'image_url', 'image_url': {'url': f'data:image/jpeg;base64,{base64_image}'}} for
base64_image in image_base64_list]
])
return HumanMessage(content=chat_record.problem_text)

View File

@ -653,7 +653,13 @@ class DocumentSerializers(ApiMixin, serializers.Serializer):
document_id=document_id).values('id'),
TaskType(instance.get('type')),
State.REVOKE)
ListenerManagement.update_status(QuerySet(Document).filter(id=document_id), TaskType(instance.get('type')),
ListenerManagement.update_status(QuerySet(Document).annotate(
reversed_status=Reverse('status'),
task_type_status=Substr('reversed_status', TaskType(instance.get('type')).value,
TaskType(instance.get('type')).value),
).filter(task_type_status__in=[State.PENDING.value, State.STARTED.value]).filter(
id=document_id).values('id'),
TaskType(instance.get('type')),
State.REVOKE)
return True

View File

@ -393,10 +393,11 @@
:src="h.image_url.url"
alt=""
fit="cover"
style="width: 40px; height: 40px; display: block"
class="border-r-4"
style="width: 40px; height: 40px; display: inline-block"
class="border-r-4 mr-8"
/>
<span v-else>{{ h.text }}</span>
<span v-else>{{ h.text }}<br /></span>
</template>
</span>