chore: strip reasoning from chatflow answers and persist generation details

This commit is contained in:
Novice 2025-12-25 13:59:38 +08:00
parent 7fc25cafb2
commit f55faae31b
No known key found for this signature in database
GPG Key ID: EE3F68E3105DAAAB
2 changed files with 38 additions and 5 deletions

View File

@ -529,7 +529,9 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
match chunk_type: match chunk_type:
case ChunkType.TEXT: case ChunkType.TEXT:
self._stream_buffer.record_text_chunk(delta_text) self._stream_buffer.record_text_chunk(delta_text)
self._task_state.answer += delta_text
case ChunkType.THOUGHT: case ChunkType.THOUGHT:
# Reasoning should not be part of final answer text
self._stream_buffer.record_thought_chunk(delta_text) self._stream_buffer.record_thought_chunk(delta_text)
case ChunkType.TOOL_CALL: case ChunkType.TOOL_CALL:
self._stream_buffer.record_tool_call( self._stream_buffer.record_tool_call(
@ -542,8 +544,8 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
tool_call_id=tool_call_id, tool_call_id=tool_call_id,
result=delta_text, result=delta_text,
) )
self._task_state.answer += delta_text
self._task_state.answer += delta_text
yield self._message_cycle_manager.message_to_stream_response( yield self._message_cycle_manager.message_to_stream_response(
answer=delta_text, answer=delta_text,
message_id=self._message_id, message_id=self._message_id,
@ -920,6 +922,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
# If there are assistant files, remove markdown image links from answer # If there are assistant files, remove markdown image links from answer
answer_text = self._task_state.answer answer_text = self._task_state.answer
answer_text = self._strip_think_blocks(answer_text)
if self._recorded_files: if self._recorded_files:
# Remove markdown image links since we're storing files separately # Remove markdown image links since we're storing files separately
answer_text = re.sub(r"!\[.*?\]\(.*?\)", "", answer_text).strip() answer_text = re.sub(r"!\[.*?\]\(.*?\)", "", answer_text).strip()
@ -971,6 +974,19 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport):
] ]
session.add_all(message_files) session.add_all(message_files)
# Save generation detail (reasoning/tool calls/sequence) from stream buffer
self._save_generation_detail(session=session, message=message)
@staticmethod
def _strip_think_blocks(text: str) -> str:
"""Remove <think>...</think> blocks (including their content) from text."""
if not text or "<think" not in text.lower():
return text
clean_text = re.sub(r"<think[^>]*>.*?</think>", "", text, flags=re.IGNORECASE | re.DOTALL)
clean_text = re.sub(r"\n\s*\n", "\n\n", clean_text).strip()
return clean_text
def _save_generation_detail(self, *, session: Session, message: Message) -> None: def _save_generation_detail(self, *, session: Session, message: Message) -> None:
""" """
Save LLM generation detail for Chatflow using stream event buffer. Save LLM generation detail for Chatflow using stream event buffer.

View File

@ -441,6 +441,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
if agent_thoughts: if agent_thoughts:
# Agent-Chat mode: merge MessageAgentThought records # Agent-Chat mode: merge MessageAgentThought records
content_pos = 0 content_pos = 0
cleaned_answer_parts: list[str] = []
for thought in agent_thoughts: for thought in agent_thoughts:
# Add thought/reasoning # Add thought/reasoning
if thought.thought: if thought.thought:
@ -466,10 +467,26 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline):
# Add answer content if present # Add answer content if present
if thought.answer: if thought.answer:
start = content_pos content_text = thought.answer
end = content_pos + len(thought.answer) if "<think" in content_text.lower():
sequence.append({"type": "content", "start": start, "end": end}) clean_answer, extracted_reasoning = self._split_reasoning_from_answer(content_text)
content_pos = end if extracted_reasoning:
reasoning_list.append(extracted_reasoning)
sequence.append({"type": "reasoning", "index": len(reasoning_list) - 1})
content_text = clean_answer
thought.answer = clean_answer or content_text
if content_text:
start = content_pos
end = content_pos + len(content_text)
sequence.append({"type": "content", "start": start, "end": end})
content_pos = end
cleaned_answer_parts.append(content_text)
if cleaned_answer_parts:
merged_answer = "".join(cleaned_answer_parts)
message.answer = merged_answer
llm_result.message.content = merged_answer
else: else:
# Completion/Chat mode: use reasoning_content from llm_result # Completion/Chat mode: use reasoning_content from llm_result
reasoning_content = llm_result.reasoning_content reasoning_content = llm_result.reasoning_content