mirror of
https://github.com/langgenius/dify.git
synced 2026-05-09 21:28:25 +08:00
fix(api): fix duplicate answer and completion app upgrade issues
1. Remove StreamChunkEvent from AgentV2Node._run_without_tools():
The agent-v2 node was yielding StreamChunkEvent during LLM streaming,
AND the downstream answer node was outputting the same text via
{{#agent.text#}} variable reference, causing "FourFour" duplication.
Now text only flows through outputs.text -> answer node (single path).
2. Map inputs to query for completion app transparent upgrade:
Completion apps send {inputs: {query: "..."}} not {query: "..."}.
VirtualWorkflowSynthesizer route now extracts query from inputs
when the top-level query is missing.
Verified:
- Old chat app: "What is 2+2?" -> "Four" (was "FourFour")
- Old completion app: {inputs: {query: "What is 3+3?"}} -> "3 + 3 = 6" (was failing)
- Old agent-chat app: still works
Made-with: Cursor
This commit is contained in:
parent
7052257c8d
commit
482a004efe
@ -170,10 +170,6 @@ class AgentV2Node(Node[AgentV2NodeData]):
|
||||
chunk_text = self._extract_chunk_text(chunk)
|
||||
if chunk_text:
|
||||
full_text += chunk_text
|
||||
yield StreamChunkEvent(
|
||||
selector=[self._node_id, "text"],
|
||||
chunk=chunk_text,
|
||||
)
|
||||
|
||||
if chunk.delta.usage:
|
||||
usage = chunk.delta.usage
|
||||
|
||||
@ -141,6 +141,12 @@ class AppGenerateService:
|
||||
workflow.id,
|
||||
)
|
||||
|
||||
upgraded_args = dict(args)
|
||||
if "query" not in upgraded_args or not upgraded_args.get("query"):
|
||||
inputs = upgraded_args.get("inputs", {})
|
||||
upgraded_args["query"] = inputs.get("query", "") or inputs.get("input", "") or str(inputs)
|
||||
args = upgraded_args
|
||||
|
||||
if streaming:
|
||||
with rate_limit_context(rate_limit, request_id):
|
||||
payload = AppExecutionParams.new(
|
||||
|
||||
Loading…
Reference in New Issue
Block a user