support returning structured output when using LLM API non streaming invocation (#26451)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
This commit is contained in:
goofy 2025-09-30 10:58:35 +08:00 committed by GitHub
parent 8d803a26eb
commit 86c3c58e64
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 12 additions and 1 deletions

View File

@ -20,6 +20,7 @@ class ModelInvokeCompletedEvent(NodeEventBase):
usage: LLMUsage
finish_reason: str | None = None
reasoning_content: str | None = None
structured_output: dict | None = None
class RunRetryEvent(NodeEventBase):

View File

@ -23,6 +23,7 @@ from core.model_runtime.entities.llm_entities import (
LLMResult,
LLMResultChunk,
LLMResultChunkWithStructuredOutput,
LLMResultWithStructuredOutput,
LLMStructuredOutput,
LLMUsage,
)
@ -278,6 +279,13 @@ class LLMNode(Node):
# Extract clean text from <think> tags
clean_text, _ = LLMNode._split_reasoning(result_text, self._node_data.reasoning_format)
# Process structured output if available from the event.
structured_output = (
LLMStructuredOutput(structured_output=event.structured_output)
if event.structured_output
else None
)
# deduct quota
llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage)
break
@ -1048,7 +1056,7 @@ class LLMNode(Node):
@staticmethod
def handle_blocking_result(
*,
invoke_result: LLMResult,
invoke_result: LLMResult | LLMResultWithStructuredOutput,
saver: LLMFileSaver,
file_outputs: list["File"],
reasoning_format: Literal["separated", "tagged"] = "tagged",
@ -1079,6 +1087,8 @@ class LLMNode(Node):
finish_reason=None,
# Reasoning content for workflow variables and downstream nodes
reasoning_content=reasoning_content,
# Pass structured output if enabled
structured_output=getattr(invoke_result, "structured_output", None),
)
@staticmethod