From 482a004efe603c974da608b7636ba0aef90ecff2 Mon Sep 17 00:00:00 2001 From: Yansong Zhang <916125788@qq.com> Date: Thu, 9 Apr 2026 12:02:43 +0800 Subject: [PATCH] fix(api): fix duplicate answer and completion app upgrade issues 1. Remove StreamChunkEvent from AgentV2Node._run_without_tools(): The agent-v2 node was yielding StreamChunkEvent during LLM streaming, AND the downstream answer node was outputting the same text via {{#agent.text#}} variable reference, causing "FourFour" duplication. Now text only flows through outputs.text -> answer node (single path). 2. Map inputs to query for completion app transparent upgrade: Completion apps send {inputs: {query: "..."}} not {query: "..."}. VirtualWorkflowSynthesizer route now extracts query from inputs when the top-level query is missing. Verified: - Old chat app: "What is 2+2?" -> "Four" (was "FourFour") - Old completion app: {inputs: {query: "What is 3+3?"}} -> "3 + 3 = 6" (was failing) - Old agent-chat app: still works Made-with: Cursor --- api/core/workflow/nodes/agent_v2/node.py | 4 ---- api/services/app_generate_service.py | 6 ++++++ 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/api/core/workflow/nodes/agent_v2/node.py b/api/core/workflow/nodes/agent_v2/node.py index 6b9a373aa8..af214d44e1 100644 --- a/api/core/workflow/nodes/agent_v2/node.py +++ b/api/core/workflow/nodes/agent_v2/node.py @@ -170,10 +170,6 @@ class AgentV2Node(Node[AgentV2NodeData]): chunk_text = self._extract_chunk_text(chunk) if chunk_text: full_text += chunk_text - yield StreamChunkEvent( - selector=[self._node_id, "text"], - chunk=chunk_text, - ) if chunk.delta.usage: usage = chunk.delta.usage diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 06fc51bddc..520cb4f9cf 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -141,6 +141,12 @@ class AppGenerateService: workflow.id, ) + upgraded_args = dict(args) + if "query" not in upgraded_args or not upgraded_args.get("query"): + inputs = upgraded_args.get("inputs", {}) + upgraded_args["query"] = inputs.get("query", "") or inputs.get("input", "") or str(inputs) + args = upgraded_args + if streaming: with rate_limit_context(rate_limit, request_id): payload = AppExecutionParams.new(