From f55faae31b0e9e4b308056c6ad956737f3abd7a0 Mon Sep 17 00:00:00 2001 From: Novice Date: Thu, 25 Dec 2025 13:59:38 +0800 Subject: [PATCH] chore: strip reasoning from chatflow answers and persist generation details --- .../advanced_chat/generate_task_pipeline.py | 18 ++++++++++++- .../easy_ui_based_generate_task_pipeline.py | 25 ++++++++++++++++--- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 0d5e1e5dfd..53fa27cca7 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -529,7 +529,9 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): match chunk_type: case ChunkType.TEXT: self._stream_buffer.record_text_chunk(delta_text) + self._task_state.answer += delta_text case ChunkType.THOUGHT: + # Reasoning should not be part of final answer text self._stream_buffer.record_thought_chunk(delta_text) case ChunkType.TOOL_CALL: self._stream_buffer.record_tool_call( @@ -542,8 +544,8 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): tool_call_id=tool_call_id, result=delta_text, ) + self._task_state.answer += delta_text - self._task_state.answer += delta_text yield self._message_cycle_manager.message_to_stream_response( answer=delta_text, message_id=self._message_id, @@ -920,6 +922,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): # If there are assistant files, remove markdown image links from answer answer_text = self._task_state.answer + answer_text = self._strip_think_blocks(answer_text) if self._recorded_files: # Remove markdown image links since we're storing files separately answer_text = re.sub(r"!\[.*?\]\(.*?\)", "", answer_text).strip() @@ -971,6 +974,19 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): ] session.add_all(message_files) + # Save generation detail (reasoning/tool calls/sequence) from stream buffer + self._save_generation_detail(session=session, message=message) + + @staticmethod + def _strip_think_blocks(text: str) -> str: + """Remove ... blocks (including their content) from text.""" + if not text or "]*>.*?", "", text, flags=re.IGNORECASE | re.DOTALL) + clean_text = re.sub(r"\n\s*\n", "\n\n", clean_text).strip() + return clean_text + def _save_generation_detail(self, *, session: Session, message: Message) -> None: """ Save LLM generation detail for Chatflow using stream event buffer. diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 6cbd48e27b..c4ea428270 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -441,6 +441,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): if agent_thoughts: # Agent-Chat mode: merge MessageAgentThought records content_pos = 0 + cleaned_answer_parts: list[str] = [] for thought in agent_thoughts: # Add thought/reasoning if thought.thought: @@ -466,10 +467,26 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): # Add answer content if present if thought.answer: - start = content_pos - end = content_pos + len(thought.answer) - sequence.append({"type": "content", "start": start, "end": end}) - content_pos = end + content_text = thought.answer + if "