refactor: move execution limits from engine core to layer

Remove max_execution_time and max_execution_steps from ExecutionContext and GraphEngine since these limits are now handled by ExecutionLimitsLayer. This follows the separation of concerns principle by keeping execution limits as a cross-cutting concern handled by layers rather than embedded in core engine components.

Changes:
- Remove max_execution_time and max_execution_steps from ExecutionContext
- Remove these parameters from GraphEngine.__init__()
- Remove max_execution_time from Dispatcher
- Update workflow_entry.py to no longer pass these parameters
- Update all tests to remove these parameters
This commit is contained in:
-LAN- 2025-09-10 01:32:45 +08:00
parent e0e82fbfaa
commit a23c8fcb1a
No known key found for this signature in database
GPG Key ID: 6BA0D108DED011FF
13 changed files with 2 additions and 44 deletions

View File

@ -24,14 +24,8 @@ class ExecutionContext:
user_from: UserFrom
invoke_from: InvokeFrom
call_depth: int
max_execution_steps: int
max_execution_time: int
def __post_init__(self) -> None:
"""Validate execution context parameters."""
if self.call_depth < 0:
raise ValueError("Call depth must be non-negative")
if self.max_execution_steps <= 0:
raise ValueError("Max execution steps must be positive")
if self.max_execution_time <= 0:
raise ValueError("Max execution time must be positive")

View File

@ -65,8 +65,6 @@ class GraphEngine:
graph: Graph,
graph_config: Mapping[str, object],
graph_runtime_state: GraphRuntimeState,
max_execution_steps: int,
max_execution_time: int,
command_channel: CommandChannel,
min_workers: int | None = None,
max_workers: int | None = None,
@ -85,8 +83,6 @@ class GraphEngine:
user_from=user_from,
invoke_from=invoke_from,
call_depth=call_depth,
max_execution_steps=max_execution_steps,
max_execution_time=max_execution_time,
)
# Graph execution tracks the overall execution state
@ -216,7 +212,6 @@ class GraphEngine:
event_handler=self._event_handler_registry,
event_collector=self._event_manager,
execution_coordinator=self._execution_coordinator,
max_execution_time=self._execution_context.max_execution_time,
event_emitter=self._event_manager,
)

View File

@ -34,7 +34,6 @@ class Dispatcher:
event_handler: "EventHandler",
event_collector: EventManager,
execution_coordinator: ExecutionCoordinator,
max_execution_time: int,
event_emitter: EventManager | None = None,
) -> None:
"""
@ -45,14 +44,12 @@ class Dispatcher:
event_handler: Event handler registry for processing events
event_collector: Event manager for collecting unhandled events
execution_coordinator: Coordinator for execution flow
max_execution_time: Maximum execution time in seconds
event_emitter: Optional event manager to signal completion
"""
self._event_queue = event_queue
self._event_handler = event_handler
self._event_collector = event_collector
self._execution_coordinator = execution_coordinator
self._max_execution_time = max_execution_time
self._event_emitter = event_emitter
self._thread: threading.Thread | None = None

View File

@ -50,6 +50,7 @@ logger = logging.getLogger(__name__)
EmptyArraySegment = NewType("EmptyArraySegment", ArraySegment)
class IterationNode(Node):
"""
Iteration Node.
@ -95,7 +96,7 @@ class IterationNode(Node):
def version(cls) -> str:
return "1"
def _run(self) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]: # pyright: ignore[reportIncompatibleMethodOverride]
def _run(self) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]: # pyright: ignore[reportIncompatibleMethodOverride]
variable = self._get_iterator_variable()
if self._is_empty_iteration(variable):
@ -466,8 +467,6 @@ class IterationNode(Node):
graph=iteration_graph,
graph_config=self.graph_config,
graph_runtime_state=graph_runtime_state_copy,
max_execution_steps=10000, # Use default or config value
max_execution_time=600, # Use default or config value
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
)

View File

@ -4,7 +4,6 @@ from collections.abc import Callable, Generator, Mapping, Sequence
from datetime import datetime
from typing import TYPE_CHECKING, Any, Literal, Optional, cast
from configs import dify_config
from core.variables import Segment, SegmentType
from core.workflow.enums import (
ErrorStrategy,
@ -454,8 +453,6 @@ class LoopNode(Node):
graph=loop_graph,
graph_config=self.graph_config,
graph_runtime_state=graph_runtime_state_copy,
max_execution_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS,
max_execution_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME,
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
)

View File

@ -83,8 +83,6 @@ class WorkflowEntry:
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS,
max_execution_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME,
command_channel=command_channel,
)

View File

@ -52,8 +52,6 @@ def test_abort_command():
graph=mock_graph,
graph_config={},
graph_runtime_state=shared_runtime_state, # Use shared instance
max_execution_steps=100,
max_execution_time=10,
command_channel=command_channel,
)

View File

@ -55,8 +55,6 @@ def test_streaming_output_with_blocking_equals_one():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)
@ -162,8 +160,6 @@ def test_streaming_output_with_blocking_not_equals_one():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)

View File

@ -470,8 +470,6 @@ def test_layer_system_basic():
graph=graph,
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
graph_runtime_state=graph_runtime_state,
max_execution_steps=300,
max_execution_time=60,
command_channel=InMemoryChannel(),
)
@ -535,8 +533,6 @@ def test_layer_chaining():
graph=graph,
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
graph_runtime_state=graph_runtime_state,
max_execution_steps=300,
max_execution_time=60,
command_channel=InMemoryChannel(),
)
@ -591,8 +587,6 @@ def test_layer_error_handling():
graph=graph,
graph_config=fixture_data.get("workflow", {}).get("graph", {}),
graph_runtime_state=graph_runtime_state,
max_execution_steps=300,
max_execution_time=60,
command_channel=InMemoryChannel(),
)

View File

@ -625,8 +625,6 @@ class MockIterationNode(MockNodeMixin, IterationNode):
graph=iteration_graph,
graph_config=self.graph_config,
graph_runtime_state=graph_runtime_state_copy,
max_execution_steps=10000, # Use default or config value
max_execution_time=600, # Use default or config value
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
)
@ -695,8 +693,6 @@ class MockLoopNode(MockNodeMixin, LoopNode):
graph=loop_graph,
graph_config=self.graph_config,
graph_runtime_state=graph_runtime_state_copy,
max_execution_steps=10000, # Use default or config value
max_execution_time=600, # Use default or config value
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
)

View File

@ -128,8 +128,6 @@ def test_parallel_streaming_workflow():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)

View File

@ -388,8 +388,6 @@ class TableTestRunner:
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=int(test_case.timeout),
command_channel=InMemoryChannel(),
min_workers=self.graph_engine_min_workers,
max_workers=self.graph_engine_max_workers,

View File

@ -38,8 +38,6 @@ def test_tool_in_chatflow():
graph=graph,
graph_config=graph_config,
graph_runtime_state=graph_runtime_state,
max_execution_steps=500,
max_execution_time=30,
command_channel=InMemoryChannel(),
)