mirror of https://github.com/langgenius/dify.git
fix: support structured output in streaming mode for LLM node (#27089)
This commit is contained in:
parent
2382229c7d
commit
f811471b18
|
|
@ -441,10 +441,14 @@ class LLMNode(Node):
|
|||
usage = LLMUsage.empty_usage()
|
||||
finish_reason = None
|
||||
full_text_buffer = io.StringIO()
|
||||
collected_structured_output = None # Collect structured_output from streaming chunks
|
||||
# Consume the invoke result and handle generator exception
|
||||
try:
|
||||
for result in invoke_result:
|
||||
if isinstance(result, LLMResultChunkWithStructuredOutput):
|
||||
# Collect structured_output from the chunk
|
||||
if result.structured_output is not None:
|
||||
collected_structured_output = dict(result.structured_output)
|
||||
yield result
|
||||
if isinstance(result, LLMResultChunk):
|
||||
contents = result.delta.message.content
|
||||
|
|
@ -492,6 +496,8 @@ class LLMNode(Node):
|
|||
finish_reason=finish_reason,
|
||||
# Reasoning content for workflow variables and downstream nodes
|
||||
reasoning_content=reasoning_content,
|
||||
# Pass structured output if collected from streaming chunks
|
||||
structured_output=collected_structured_output,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
|
|
|
|||
Loading…
Reference in New Issue