fix: fix structured_output_enabled miss in second validate (#35747)

Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com>
Co-authored-by: Yunlu Wen <yunlu.wen@dify.ai>
This commit is contained in:
wangxiaolei 2026-04-30 18:34:33 +08:00 committed by fatelei
parent 9d96e6e520
commit 29388b2a89
No known key found for this signature in database
GPG Key ID: 2F91DA05646F4EED
4 changed files with 222 additions and 9 deletions

View File

@ -365,7 +365,8 @@ class DifyNodeFactory(NodeFactory):
(including pydantic ValidationError, which subclasses ValueError),
if node type is unknown, or if no implementation exists for the resolved version
"""
typed_node_config = NodeConfigDictAdapter.validate_python(adapt_node_config_for_graph(node_config))
adapted_node_config = adapt_node_config_for_graph(node_config)
typed_node_config = NodeConfigDictAdapter.validate_python(adapted_node_config)
node_id = typed_node_config["id"]
node_data = typed_node_config["data"]
node_class = self._resolve_node_class(node_type=node_data.type, node_version=str(node_data.version))
@ -373,6 +374,11 @@ class DifyNodeFactory(NodeFactory):
# Re-validate using the resolved node class so workflow-local node schemas
# stay explicit and constructors receive the concrete typed payload.
resolved_node_data = self._validate_resolved_node_data(node_class, node_data)
config_for_node_init: BaseNodeData | dict[str, Any]
if isinstance(resolved_node_data, BaseNodeData):
config_for_node_init = resolved_node_data.model_dump(mode="python", by_alias=True)
else:
config_for_node_init = resolved_node_data
node_type = node_data.type
node_init_kwargs_factories: Mapping[NodeType, Callable[[], dict[str, object]]] = {
BuiltinNodeTypes.CODE: lambda: {
@ -442,7 +448,7 @@ class DifyNodeFactory(NodeFactory):
node_init_kwargs = node_init_kwargs_factories.get(node_type, lambda: {})()
return node_class(
node_id=node_id,
config=resolved_node_data,
config=config_for_node_init,
graph_init_params=self.graph_init_params,
graph_runtime_state=self.graph_runtime_state,
**node_init_kwargs,
@ -474,10 +480,7 @@ class DifyNodeFactory(NodeFactory):
include_retriever_attachment_loader: bool,
include_jinja2_template_renderer: bool,
) -> dict[str, object]:
validated_node_data = cast(
LLMCompatibleNodeData,
self._validate_resolved_node_data(node_class=node_class, node_data=node_data),
)
validated_node_data = cast(LLMCompatibleNodeData, node_data)
model_instance = self._build_model_instance_for_llm_node(validated_node_data)
node_init_kwargs: dict[str, object] = {
"credentials_provider": self._llm_credentials_provider,

View File

@ -5,6 +5,7 @@ from graphon.graph_events import (
NodeRunStreamChunkEvent,
)
from .test_mock_config import MockConfigBuilder
from .test_table_runner import TableTestRunner
@ -44,3 +45,51 @@ def test_tool_in_chatflow():
assert stream_chunk_events[0].chunk == "hello, dify!", (
f"Expected chunk to be 'hello, dify!', but got {stream_chunk_events[0].chunk}"
)
def test_answer_can_render_llm_structured_output_in_chatflow():
runner = TableTestRunner()
fixture_data = runner.workflow_runner.load_fixture("basic_chatflow")
nodes = fixture_data["workflow"]["graph"]["nodes"]
answer_node = next(node for node in nodes if node["id"] == "answer")
answer_node["data"]["answer"] = "{{#llm.structured_output#}}"
mock_config = (
MockConfigBuilder()
.with_node_output(
"llm",
{
"text": "plain text",
"structured_output": {"type": "greeting"},
"usage": {
"prompt_tokens": 10,
"completion_tokens": 5,
"total_tokens": 15,
},
"finish_reason": "stop",
},
)
.build()
)
graph, graph_runtime_state = runner.workflow_runner.create_graph_from_fixture(
fixture_data=fixture_data,
query="hello",
use_mock_factory=True,
mock_config=mock_config,
)
engine = GraphEngine(
workflow_id="test_workflow",
graph=graph,
graph_runtime_state=graph_runtime_state,
command_channel=InMemoryChannel(),
config=GraphEngineConfig(),
)
events = list(engine.run())
success_events = [e for e in events if isinstance(e, GraphRunSucceededEvent)]
assert success_events, "Workflow should complete successfully"
assert success_events[-1].outputs["answer"] == '{\n "type": "greeting"\n}'

View File

@ -86,3 +86,80 @@ def test_execute_answer():
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs["answer"] == "Today's weather is sunny\nYou are a helpful AI.\n{{img}}\nFin."
def test_execute_answer_renders_structured_output_object_as_json() -> None:
init_params = build_test_graph_init_params(
workflow_id="1",
graph_config={"nodes": [], "edges": []},
tenant_id="1",
app_id="1",
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
variable_pool = VariablePool(
system_variables=build_system_variables(user_id="aaa", files=[]),
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
variable_pool.add(["1777539038857", "structured_output"], {"type": "greeting"})
graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter())
node = AnswerNode(
node_id=str(uuid.uuid4()),
graph_init_params=init_params,
graph_runtime_state=graph_runtime_state,
config=AnswerNodeData(
title="123",
type="answer",
answer="{{#1777539038857.structured_output#}}",
),
)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs["answer"] == '{\n "type": "greeting"\n}'
def test_execute_answer_falls_back_to_plain_selector_text_when_structured_output_missing() -> None:
init_params = build_test_graph_init_params(
workflow_id="1",
graph_config={"nodes": [], "edges": []},
tenant_id="1",
app_id="1",
user_id="1",
user_from=UserFrom.ACCOUNT,
invoke_from=InvokeFrom.DEBUGGER,
call_depth=0,
)
variable_pool = VariablePool(
system_variables=build_system_variables(user_id="aaa", files=[]),
user_inputs={},
environment_variables=[],
conversation_variables=[],
)
graph_runtime_state = GraphRuntimeState(variable_pool=variable_pool, start_at=time.perf_counter())
node = AnswerNode(
node_id=str(uuid.uuid4()),
graph_init_params=init_params,
graph_runtime_state=graph_runtime_state,
config=AnswerNodeData(
title="123",
type="answer",
answer="{{#1777539038857.structured_output#}}",
),
)
result = node._run()
assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED
assert result.outputs["answer"] == "1777539038857.structured_output"

View File

@ -10,14 +10,20 @@ from core.workflow.nodes.knowledge_index import KNOWLEDGE_INDEX_NODE_TYPE
from graphon.entities.base_node_data import BaseNodeData
from graphon.enums import BuiltinNodeTypes, NodeType
from graphon.nodes.code.entities import CodeLanguage
from graphon.nodes.llm.entities import LLMNodeData
from graphon.variables.segments import StringSegment
def _assert_typed_node_config(config, *, node_id: str, node_type: NodeType, version: str = "1") -> None:
_ = node_id
assert isinstance(config, BaseNodeData)
assert config.type == node_type
assert config.version == version
if isinstance(config, BaseNodeData):
assert config.type == node_type
assert config.version == version
return
assert isinstance(config, dict)
assert config["type"] == node_type
assert config["version"] == version
def _node_constructor(*, return_value):
@ -546,6 +552,84 @@ class TestDifyNodeFactoryCreateNode:
assert kwargs["unstructured_api_config"] is sentinel.unstructured_api_config
assert kwargs["http_client"] is sentinel.http_client
def test_build_llm_compatible_node_init_kwargs_preserves_structured_output_switch(self, factory):
node_data = LLMNodeData.model_validate(
{
"type": BuiltinNodeTypes.LLM,
"title": "LLM",
"model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}},
"prompt_template": [{"role": "system", "text": "x"}],
"context": {"enabled": False, "variable_selector": []},
"vision": {"enabled": False},
"structured_output_enabled": True,
"structured_output": {
"schema": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"],
}
},
}
)
wrapped_model_instance = sentinel.wrapped_model_instance
memory = sentinel.memory
factory._build_model_instance_for_llm_node = MagicMock(return_value=sentinel.model_instance)
factory._build_memory_for_llm_node = MagicMock(return_value=memory)
with patch.object(node_factory, "DifyPreparedLLM", return_value=wrapped_model_instance) as prepared_llm:
kwargs = factory._build_llm_compatible_node_init_kwargs(
node_class=sentinel.node_class,
node_data=node_data,
wrap_model_instance=True,
include_http_client=True,
include_llm_file_saver=True,
include_prompt_message_serializer=True,
include_retriever_attachment_loader=True,
include_jinja2_template_renderer=True,
)
assert node_data.structured_output_switch_on is True
assert node_data.structured_output_enabled is True
factory._build_model_instance_for_llm_node.assert_called_once_with(node_data)
factory._build_memory_for_llm_node.assert_called_once_with(
node_data=node_data,
model_instance=sentinel.model_instance,
)
prepared_llm.assert_called_once_with(sentinel.model_instance)
assert kwargs["model_instance"] is wrapped_model_instance
def test_create_node_passes_alias_preserving_llm_config_to_constructor(self, monkeypatch, factory):
created_node = object()
constructor = _node_constructor(return_value=created_node)
monkeypatch.setattr(factory, "_resolve_node_class", MagicMock(return_value=constructor))
monkeypatch.setattr(factory, "_build_llm_compatible_node_init_kwargs", MagicMock(return_value={}))
node_config = {
"id": "llm-node-id",
"data": {
"type": BuiltinNodeTypes.LLM,
"title": "LLM",
"model": {"provider": "provider", "name": "model", "mode": "chat", "completion_params": {}},
"prompt_template": [{"role": "system", "text": "x"}],
"context": {"enabled": False, "variable_selector": []},
"vision": {"enabled": False},
"structured_output_enabled": True,
"structured_output": {
"schema": {
"type": "object",
"properties": {"type": {"type": "string"}},
"required": ["type"],
}
},
},
}
factory.create_node(node_config)
config = constructor.call_args.kwargs["config"]
assert isinstance(config, dict)
assert config["structured_output_enabled"] is True
assert "structured_output_switch_on" not in config
@pytest.mark.parametrize(
("node_type", "constructor_name", "expected_extra_kwargs"),
[