From 3dee8064badd64f195902feb7c749384a947c7d7 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 29 Aug 2025 13:17:02 +0800 Subject: [PATCH 01/96] feat: enhance typing --- api/core/workflow/graph/graph.py | 45 +++++++++++++++---------- api/core/workflow/nodes/node_factory.py | 19 +++++++---- api/libs/typing.py | 9 +++++ 3 files changed, 50 insertions(+), 23 deletions(-) create mode 100644 api/libs/typing.py diff --git a/api/core/workflow/graph/graph.py b/api/core/workflow/graph/graph.py index 3c21b4659f..dc38d4d2a3 100644 --- a/api/core/workflow/graph/graph.py +++ b/api/core/workflow/graph/graph.py @@ -1,10 +1,11 @@ import logging from collections import defaultdict -from collections.abc import Mapping -from typing import Any, Protocol, cast +from collections.abc import Mapping, Sequence +from typing import Protocol, cast, final from core.workflow.enums import NodeExecutionType, NodeState, NodeType from core.workflow.nodes.base.node import Node +from libs.typing import is_str, is_str_dict from .edge import Edge @@ -19,7 +20,7 @@ class NodeFactory(Protocol): allowing for different node creation strategies while maintaining type safety. """ - def create_node(self, node_config: dict[str, Any]) -> Node: + def create_node(self, node_config: dict[str, object]) -> Node: """ Create a Node instance from node configuration data. @@ -30,6 +31,7 @@ class NodeFactory(Protocol): ... +@final class Graph: """Graph representation with nodes and edges for workflow execution.""" @@ -58,18 +60,18 @@ class Graph: self.root_node = root_node @classmethod - def _parse_node_configs(cls, node_configs: list[dict[str, Any]]) -> dict[str, dict[str, Any]]: + def _parse_node_configs(cls, node_configs: list[dict[str, object]]) -> dict[str, dict[str, object]]: """ Parse node configurations and build a mapping of node IDs to configs. :param node_configs: list of node configuration dictionaries :return: mapping of node ID to node config """ - node_configs_map: dict[str, dict[str, Any]] = {} + node_configs_map: dict[str, dict[str, object]] = {} for node_config in node_configs: node_id = node_config.get("id") - if not node_id: + if not node_id or not isinstance(node_id, str): continue node_configs_map[node_id] = node_config @@ -79,8 +81,8 @@ class Graph: @classmethod def _find_root_node_id( cls, - node_configs_map: dict[str, dict[str, Any]], - edge_configs: list[dict[str, Any]], + node_configs_map: Mapping[str, Mapping[str, object]], + edge_configs: Sequence[Mapping[str, object]], root_node_id: str | None = None, ) -> str: """ @@ -97,10 +99,10 @@ class Graph: return root_node_id # Find nodes with no incoming edges - nodes_with_incoming = set() + nodes_with_incoming: set[str] = set() for edge_config in edge_configs: target = edge_config.get("target") - if target: + if isinstance(target, str): nodes_with_incoming.add(target) root_candidates = [nid for nid in node_configs_map if nid not in nodes_with_incoming] @@ -108,8 +110,13 @@ class Graph: # Prefer START node if available start_node_id = None for nid in root_candidates: - node_data = node_configs_map[nid].get("data", {}) - if node_data.get("type") == NodeType.START.value: + node_data = node_configs_map[nid].get("data") + if not is_str_dict(node_data): + continue + node_type = node_data.get("type") + if not isinstance(node_type, str): + continue + if node_type == NodeType.START: start_node_id = nid break @@ -122,7 +129,7 @@ class Graph: @classmethod def _build_edges( - cls, edge_configs: list[dict[str, Any]] + cls, edge_configs: list[dict[str, object]] ) -> tuple[dict[str, Edge], dict[str, list[str]], dict[str, list[str]]]: """ Build edge objects and mappings from edge configurations. @@ -139,7 +146,7 @@ class Graph: source = edge_config.get("source") target = edge_config.get("target") - if not source or not target: + if not is_str(source) or not is_str(target): continue # Create edge @@ -147,6 +154,8 @@ class Graph: edge_counter += 1 source_handle = edge_config.get("sourceHandle", "source") + if not is_str(source_handle): + continue edge = Edge( id=edge_id, @@ -164,7 +173,7 @@ class Graph: @classmethod def _create_node_instances( cls, - node_configs_map: dict[str, dict[str, Any]], + node_configs_map: dict[str, dict[str, object]], node_factory: "NodeFactory", ) -> dict[str, Node]: """ @@ -256,7 +265,7 @@ class Graph: def init( cls, *, - graph_config: Mapping[str, Any], + graph_config: Mapping[str, object], node_factory: "NodeFactory", root_node_id: str | None = None, ) -> "Graph": @@ -272,10 +281,12 @@ class Graph: edge_configs = graph_config.get("edges", []) node_configs = graph_config.get("nodes", []) + edge_configs = cast(list[dict[str, object]], edge_configs) + node_configs = cast(list[dict[str, object]], node_configs) + if not node_configs: raise ValueError("Graph must have at least one node") - edge_configs = cast(list, edge_configs) node_configs = [node_config for node_config in node_configs if node_config.get("type", "") != "custom-note"] # Parse node configurations diff --git a/api/core/workflow/nodes/node_factory.py b/api/core/workflow/nodes/node_factory.py index bf6a1389fc..5ded1ad44c 100644 --- a/api/core/workflow/nodes/node_factory.py +++ b/api/core/workflow/nodes/node_factory.py @@ -1,8 +1,11 @@ -from typing import TYPE_CHECKING, Any +from typing import TYPE_CHECKING, final + +from typing_extensions import override from core.workflow.enums import ErrorStrategy, NodeExecutionType, NodeType from core.workflow.graph import NodeFactory from core.workflow.nodes.base.node import Node +from libs.typing import is_str, is_str_dict from .node_mapping import LATEST_VERSION, NODE_TYPE_CLASSES_MAPPING @@ -10,6 +13,7 @@ if TYPE_CHECKING: from core.workflow.entities import GraphInitParams, GraphRuntimeState +@final class DifyNodeFactory(NodeFactory): """ Default implementation of NodeFactory that uses the traditional node mapping. @@ -26,10 +30,8 @@ class DifyNodeFactory(NodeFactory): self.graph_init_params = graph_init_params self.graph_runtime_state = graph_runtime_state - def create_node( - self, - node_config: dict[str, Any], - ) -> Node: + @override + def create_node(self, node_config: dict[str, object]) -> Node: """ Create a Node instance from node configuration data using the traditional mapping. @@ -39,11 +41,14 @@ class DifyNodeFactory(NodeFactory): """ # Get node_id from config node_id = node_config.get("id") - if not node_id: + if not is_str(node_id): raise ValueError("Node config missing id") # Get node type from config node_data = node_config.get("data", {}) + if not is_str_dict(node_data): + raise ValueError(f"Node {node_id} missing data information") + node_type_str = node_data.get("type") if not node_type_str: raise ValueError(f"Node {node_id} missing type information") @@ -72,6 +77,8 @@ class DifyNodeFactory(NodeFactory): # Initialize node with provided data node_data = node_config.get("data", {}) + if not is_str_dict(node_data): + raise ValueError(f"Node {node_id} missing data information") node_instance.init_node_data(node_data) # If node has fail branch, change execution type to branch diff --git a/api/libs/typing.py b/api/libs/typing.py new file mode 100644 index 0000000000..f84e9911e0 --- /dev/null +++ b/api/libs/typing.py @@ -0,0 +1,9 @@ +from typing import TypeGuard + + +def is_str_dict(v: object) -> TypeGuard[dict[str, object]]: + return isinstance(v, dict) + + +def is_str(v: object) -> TypeGuard[str]: + return isinstance(v, str) From f2bc4f5d87e5e60bf3e8d2d74487c5c4572c9fe8 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 29 Aug 2025 16:16:58 +0800 Subject: [PATCH 02/96] fix: resolve type error in node_factory by using type guard for node_type_str --- api/core/workflow/nodes/node_factory.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/core/workflow/nodes/node_factory.py b/api/core/workflow/nodes/node_factory.py index 5ded1ad44c..df1d685909 100644 --- a/api/core/workflow/nodes/node_factory.py +++ b/api/core/workflow/nodes/node_factory.py @@ -50,8 +50,8 @@ class DifyNodeFactory(NodeFactory): raise ValueError(f"Node {node_id} missing data information") node_type_str = node_data.get("type") - if not node_type_str: - raise ValueError(f"Node {node_id} missing type information") + if not is_str(node_type_str): + raise ValueError(f"Node {node_id} missing or invalid type information") try: node_type = NodeType(node_type_str) From 8eb78c04b2597158d6144f0d30d0c1a6209d526e Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 29 Aug 2025 17:02:51 +0800 Subject: [PATCH 03/96] chore(token_buffer_memory): code format Signed-off-by: -LAN- --- api/core/memory/token_buffer_memory.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index ab12b1a846..6cbd949be9 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -2,7 +2,6 @@ from collections.abc import Sequence from typing import Optional from sqlalchemy import select -from sqlalchemy.orm import Session from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.file import file_manager From 3aa48efd0a220c71b7f8955d8720ec4604f536d7 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 29 Aug 2025 22:06:10 +0800 Subject: [PATCH 04/96] test(test_workflow_service): Use new engine's method. Signed-off-by: -LAN- --- .../services/test_workflow_service.py | 80 ++++++++++--------- 1 file changed, 44 insertions(+), 36 deletions(-) diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index 018eb6d896..f7c8cd113f 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -1421,16 +1421,16 @@ class TestWorkflowService: # Mock successful node execution def mock_successful_invoke(): - from core.workflow.entities.node_entities import NodeRunResult - from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus - from core.workflow.nodes.base.node import BaseNode - from core.workflow.nodes.event import RunCompletedEvent + from core.workflow.enums import WorkflowNodeExecutionStatus + from core.workflow.graph_events import NodeRunSucceededEvent + from core.workflow.node_events import NodeRunResult + from core.workflow.nodes.base.node import Node # Create mock node - mock_node = MagicMock(spec=BaseNode) - mock_node.type_ = "start" # Use valid NodeType + mock_node = MagicMock(spec=Node) + mock_node.node_type = "start" # Use valid NodeType mock_node.title = "Test Node" - mock_node.continue_on_error = False + mock_node.error_strategy = None # Create mock result with valid metadata mock_result = NodeRunResult( @@ -1442,14 +1442,18 @@ class TestWorkflowService: ) # Create mock event - mock_event = RunCompletedEvent(run_result=mock_result) + mock_event = NodeRunSucceededEvent(node_run_result=mock_result) - return mock_node, [mock_event] + # Return node and generator + def event_generator(): + yield mock_event + + return mock_node, event_generator() workflow_service = WorkflowService() # Act - result = workflow_service._handle_node_run_result( + result = workflow_service._handle_single_step_result( invoke_node_fn=mock_successful_invoke, start_at=start_at, node_id=node_id ) @@ -1459,7 +1463,7 @@ class TestWorkflowService: assert result.node_type == "start" # Should match the mock node type assert result.title == "Test Node" # Import the enum for comparison - from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus + from core.workflow.enums import WorkflowNodeExecutionStatus assert result.status == WorkflowNodeExecutionStatus.SUCCEEDED assert result.inputs is not None @@ -1481,34 +1485,37 @@ class TestWorkflowService: # Mock failed node execution def mock_failed_invoke(): - from core.workflow.entities.node_entities import NodeRunResult - from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus - from core.workflow.nodes.base.node import BaseNode - from core.workflow.nodes.event import RunCompletedEvent + from core.workflow.enums import WorkflowNodeExecutionStatus + from core.workflow.graph_events import NodeRunFailedEvent + from core.workflow.node_events import NodeRunResult + from core.workflow.nodes.base.node import Node # Create mock node - mock_node = MagicMock(spec=BaseNode) - mock_node.type_ = "llm" # Use valid NodeType + mock_node = MagicMock(spec=Node) + mock_node.node_type = "llm" # Use valid NodeType mock_node.title = "Test Node" - mock_node.continue_on_error = False + mock_node.error_strategy = None # Create mock failed result mock_result = NodeRunResult( status=WorkflowNodeExecutionStatus.FAILED, inputs={"input1": "value1"}, error="Test error message", - error_type="TestError", ) # Create mock event - mock_event = RunCompletedEvent(run_result=mock_result) + mock_event = NodeRunFailedEvent(node_run_result=mock_result) - return mock_node, [mock_event] + # Return node and generator + def event_generator(): + yield mock_event + + return mock_node, event_generator() workflow_service = WorkflowService() # Act - result = workflow_service._handle_node_run_result( + result = workflow_service._handle_single_step_result( invoke_node_fn=mock_failed_invoke, start_at=start_at, node_id=node_id ) @@ -1516,7 +1523,7 @@ class TestWorkflowService: assert result is not None assert result.node_id == node_id # Import the enum for comparison - from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus + from core.workflow.enums import WorkflowNodeExecutionStatus assert result.status == WorkflowNodeExecutionStatus.FAILED assert result.error is not None @@ -1537,17 +1544,15 @@ class TestWorkflowService: # Mock node execution with continue_on_error def mock_continue_on_error_invoke(): - from core.workflow.entities.node_entities import NodeRunResult - from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus - from core.workflow.nodes.base.node import BaseNode - from core.workflow.nodes.enums import ErrorStrategy - from core.workflow.nodes.event import RunCompletedEvent + from core.workflow.enums import ErrorStrategy, WorkflowNodeExecutionStatus + from core.workflow.graph_events import NodeRunFailedEvent + from core.workflow.node_events import NodeRunResult + from core.workflow.nodes.base.node import Node # Create mock node with continue_on_error - mock_node = MagicMock(spec=BaseNode) - mock_node.type_ = "tool" # Use valid NodeType + mock_node = MagicMock(spec=Node) + mock_node.node_type = "tool" # Use valid NodeType mock_node.title = "Test Node" - mock_node.continue_on_error = True mock_node.error_strategy = ErrorStrategy.DEFAULT_VALUE mock_node.default_value_dict = {"default_output": "default_value"} @@ -1556,18 +1561,21 @@ class TestWorkflowService: status=WorkflowNodeExecutionStatus.FAILED, inputs={"input1": "value1"}, error="Test error message", - error_type="TestError", ) # Create mock event - mock_event = RunCompletedEvent(run_result=mock_result) + mock_event = NodeRunFailedEvent(node_run_result=mock_result) - return mock_node, [mock_event] + # Return node and generator + def event_generator(): + yield mock_event + + return mock_node, event_generator() workflow_service = WorkflowService() # Act - result = workflow_service._handle_node_run_result( + result = workflow_service._handle_single_step_result( invoke_node_fn=mock_continue_on_error_invoke, start_at=start_at, node_id=node_id ) @@ -1575,7 +1583,7 @@ class TestWorkflowService: assert result is not None assert result.node_id == node_id # Import the enum for comparison - from core.workflow.entities.workflow_node_execution import WorkflowNodeExecutionStatus + from core.workflow.enums import WorkflowNodeExecutionStatus assert result.status == WorkflowNodeExecutionStatus.EXCEPTION # Should be EXCEPTION, not FAILED assert result.outputs is not None From d8af8ae4e65d8f98a8366f4fdf8fb7fd779607af Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 29 Aug 2025 23:04:33 +0800 Subject: [PATCH 05/96] fix: update workflow service tests for new graph engine - Update method calls from _handle_node_run_result to _handle_single_step_result - Add required fields (id, node_id, node_type, start_at) to graph events - Use proper NodeType enum values instead of strings - Fix imports to use correct modules (Node instead of BaseNode) - Ensure event generators return proper generator objects These tests were failing because the internal implementation changed with the new graph engine architecture. --- .../services/test_workflow_service.py | 57 ++++++++++++++----- 1 file changed, 44 insertions(+), 13 deletions(-) diff --git a/api/tests/test_containers_integration_tests/services/test_workflow_service.py b/api/tests/test_containers_integration_tests/services/test_workflow_service.py index f7c8cd113f..eb7a5a23f6 100644 --- a/api/tests/test_containers_integration_tests/services/test_workflow_service.py +++ b/api/tests/test_containers_integration_tests/services/test_workflow_service.py @@ -1421,14 +1421,17 @@ class TestWorkflowService: # Mock successful node execution def mock_successful_invoke(): - from core.workflow.enums import WorkflowNodeExecutionStatus + import uuid + from datetime import datetime + + from core.workflow.enums import NodeType, WorkflowNodeExecutionStatus from core.workflow.graph_events import NodeRunSucceededEvent from core.workflow.node_events import NodeRunResult from core.workflow.nodes.base.node import Node # Create mock node mock_node = MagicMock(spec=Node) - mock_node.node_type = "start" # Use valid NodeType + mock_node.node_type = NodeType.START mock_node.title = "Test Node" mock_node.error_strategy = None @@ -1441,8 +1444,14 @@ class TestWorkflowService: metadata={"total_tokens": 100}, # Use valid metadata field ) - # Create mock event - mock_event = NodeRunSucceededEvent(node_run_result=mock_result) + # Create mock event with all required fields + mock_event = NodeRunSucceededEvent( + id=str(uuid.uuid4()), + node_id=node_id, + node_type=NodeType.START, + node_run_result=mock_result, + start_at=datetime.now(), + ) # Return node and generator def event_generator(): @@ -1460,7 +1469,9 @@ class TestWorkflowService: # Assert assert result is not None assert result.node_id == node_id - assert result.node_type == "start" # Should match the mock node type + from core.workflow.enums import NodeType + + assert result.node_type == NodeType.START # Should match the mock node type assert result.title == "Test Node" # Import the enum for comparison from core.workflow.enums import WorkflowNodeExecutionStatus @@ -1485,14 +1496,17 @@ class TestWorkflowService: # Mock failed node execution def mock_failed_invoke(): - from core.workflow.enums import WorkflowNodeExecutionStatus + import uuid + from datetime import datetime + + from core.workflow.enums import NodeType, WorkflowNodeExecutionStatus from core.workflow.graph_events import NodeRunFailedEvent from core.workflow.node_events import NodeRunResult from core.workflow.nodes.base.node import Node # Create mock node mock_node = MagicMock(spec=Node) - mock_node.node_type = "llm" # Use valid NodeType + mock_node.node_type = NodeType.LLM mock_node.title = "Test Node" mock_node.error_strategy = None @@ -1503,8 +1517,15 @@ class TestWorkflowService: error="Test error message", ) - # Create mock event - mock_event = NodeRunFailedEvent(node_run_result=mock_result) + # Create mock event with all required fields + mock_event = NodeRunFailedEvent( + id=str(uuid.uuid4()), + node_id=node_id, + node_type=NodeType.LLM, + node_run_result=mock_result, + error="Test error message", + start_at=datetime.now(), + ) # Return node and generator def event_generator(): @@ -1544,14 +1565,17 @@ class TestWorkflowService: # Mock node execution with continue_on_error def mock_continue_on_error_invoke(): - from core.workflow.enums import ErrorStrategy, WorkflowNodeExecutionStatus + import uuid + from datetime import datetime + + from core.workflow.enums import ErrorStrategy, NodeType, WorkflowNodeExecutionStatus from core.workflow.graph_events import NodeRunFailedEvent from core.workflow.node_events import NodeRunResult from core.workflow.nodes.base.node import Node # Create mock node with continue_on_error mock_node = MagicMock(spec=Node) - mock_node.node_type = "tool" # Use valid NodeType + mock_node.node_type = NodeType.TOOL mock_node.title = "Test Node" mock_node.error_strategy = ErrorStrategy.DEFAULT_VALUE mock_node.default_value_dict = {"default_output": "default_value"} @@ -1563,8 +1587,15 @@ class TestWorkflowService: error="Test error message", ) - # Create mock event - mock_event = NodeRunFailedEvent(node_run_result=mock_result) + # Create mock event with all required fields + mock_event = NodeRunFailedEvent( + id=str(uuid.uuid4()), + node_id=node_id, + node_type=NodeType.TOOL, + node_run_result=mock_result, + error="Test error message", + start_at=datetime.now(), + ) # Return node and generator def event_generator(): From 11d32ca87d36e99416f3e93da4d0a92acd0b8d7c Mon Sep 17 00:00:00 2001 From: -LAN- Date: Fri, 29 Aug 2025 23:20:28 +0800 Subject: [PATCH 06/96] test: fix web test Signed-off-by: -LAN- --- .../run/utils/format-log/agent/index.ts | 94 ++++++++++++++----- 1 file changed, 72 insertions(+), 22 deletions(-) diff --git a/web/app/components/workflow/run/utils/format-log/agent/index.ts b/web/app/components/workflow/run/utils/format-log/agent/index.ts index 8f922f548f..311e56a269 100644 --- a/web/app/components/workflow/run/utils/format-log/agent/index.ts +++ b/web/app/components/workflow/run/utils/format-log/agent/index.ts @@ -9,10 +9,16 @@ const remove = (node: AgentLogItemWithChildren, removeId: string) => { if (!children || children.length === 0) return - const hasCircle = !!children.find(c => c.message_id === removeId) + const hasCircle = !!children.find((c) => { + const childId = c.message_id || (c as any).id + return childId === removeId + }) if (hasCircle) { node.hasCircle = true - node.children = node.children.filter(c => c.message_id !== removeId) + node.children = node.children.filter((c) => { + const childId = c.message_id || (c as any).id + return childId !== removeId + }) children = node.children } @@ -28,9 +34,10 @@ const removeRepeatedSiblings = (list: AgentLogItemWithChildren[]) => { const result: AgentLogItemWithChildren[] = [] const addedItemIds: string[] = [] list.forEach((item) => { - if (!addedItemIds.includes(item.message_id)) { + const itemId = item.message_id || (item as any).id + if (itemId && !addedItemIds.includes(itemId)) { result.push(item) - addedItemIds.push(item.message_id) + addedItemIds.push(itemId) } }) return result @@ -38,16 +45,26 @@ const removeRepeatedSiblings = (list: AgentLogItemWithChildren[]) => { const removeCircleLogItem = (log: AgentLogItemWithChildren) => { const newLog = cloneDeep(log) + + // If no children, return as is + if (!newLog.children || newLog.children.length === 0) + return newLog + newLog.children = removeRepeatedSiblings(newLog.children) - let { message_id: id, children } = newLog - if (!children || children.length === 0) - return log + const id = newLog.message_id || (newLog as any).id + let { children } = newLog // check one step circle - const hasOneStepCircle = !!children.find(c => c.message_id === id) + const hasOneStepCircle = !!children.find((c) => { + const childId = c.message_id || (c as any).id + return childId === id + }) if (hasOneStepCircle) { newLog.hasCircle = true - newLog.children = newLog.children.filter(c => c.message_id !== id) + newLog.children = newLog.children.filter((c) => { + const childId = c.message_id || (c as any).id + return childId !== id + }) children = newLog.children } @@ -62,21 +79,54 @@ const listToTree = (logs: AgentLogItem[]) => { if (!logs || logs.length === 0) return [] - const tree: AgentLogItemWithChildren[] = [] - logs.forEach((log) => { - const hasParent = !!log.parent_id - if (hasParent) { - const parent = logs.find(item => item.message_id === log.parent_id) as AgentLogItemWithChildren - if (parent) { - if (!parent.children) - parent.children = [] - parent.children.push(log as AgentLogItemWithChildren) - } - } - else { - tree.push(log as AgentLogItemWithChildren) + // First pass: identify all unique items and track parent-child relationships + const itemsById = new Map() + const childrenById = new Map() + + logs.forEach((item) => { + const itemId = item.message_id || (item as any).id + + // Only add to itemsById if not already there (keep first occurrence) + if (itemId && !itemsById.has(itemId)) + itemsById.set(itemId, item) + + // Initialize children array for this ID if needed + if (itemId && !childrenById.has(itemId)) + childrenById.set(itemId, []) + + // If this item has a parent, add it to parent's children list + if (item.parent_id) { + if (!childrenById.has(item.parent_id)) + childrenById.set(item.parent_id, []) + + childrenById.get(item.parent_id)!.push(item) } }) + + // Second pass: build tree structure + const tree: AgentLogItemWithChildren[] = [] + + // Find root nodes (items without parents) + itemsById.forEach((item) => { + const hasParent = !!item.parent_id + if (!hasParent) { + const itemId = item.message_id || (item as any).id + const children = childrenById.get(itemId) + if (children && children.length > 0) + item.children = children + + tree.push(item as AgentLogItemWithChildren) + } + }) + + // Add children property to all items that have children + itemsById.forEach((item) => { + const itemId = item.message_id || (item as any).id + const children = childrenById.get(itemId) + if (children && children.length > 0) + item.children = children + }) + return tree } From 82193580decd6be9f35c249f655064fb93c387f5 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Sat, 30 Aug 2025 16:35:57 +0800 Subject: [PATCH 07/96] chore: improve typing Signed-off-by: -LAN- --- .../graph_engine/command_channels/redis_channel.py | 4 ++-- .../command_processing/command_handlers.py | 3 +++ .../command_processing/command_processor.py | 2 +- api/core/workflow/graph_engine/graph_engine.py | 4 +++- api/core/workflow/graph_engine/layers/debug_logging.py | 9 +++++++-- .../workflow/graph_engine/layers/execution_limits.py | 5 +++++ .../workflow/graph_engine/output_registry/registry.py | 10 ++++++---- .../graph_engine/response_coordinator/coordinator.py | 6 +++--- api/core/workflow/graph_engine/worker.py | 2 ++ .../graph_engine/worker_management/activity_tracker.py | 2 +- .../graph_engine/worker_management/worker_factory.py | 3 ++- .../graph_engine/worker_management/worker_pool.py | 3 ++- 12 files changed, 37 insertions(+), 16 deletions(-) diff --git a/api/core/workflow/graph_engine/command_channels/redis_channel.py b/api/core/workflow/graph_engine/command_channels/redis_channel.py index 7809e43e32..ad0aa9402c 100644 --- a/api/core/workflow/graph_engine/command_channels/redis_channel.py +++ b/api/core/workflow/graph_engine/command_channels/redis_channel.py @@ -7,7 +7,7 @@ Each instance uses a unique key for its command queue. """ import json -from typing import TYPE_CHECKING, final +from typing import TYPE_CHECKING, Any, final from ..entities.commands import AbortCommand, CommandType, GraphEngineCommand @@ -87,7 +87,7 @@ class RedisChannel: pipe.expire(self._key, self._command_ttl) pipe.execute() - def _deserialize_command(self, data: dict) -> GraphEngineCommand | None: + def _deserialize_command(self, data: dict[str, Any]) -> GraphEngineCommand | None: """ Deserialize a command from dictionary data. diff --git a/api/core/workflow/graph_engine/command_processing/command_handlers.py b/api/core/workflow/graph_engine/command_processing/command_handlers.py index 9f8d20b1b9..3c51de99f3 100644 --- a/api/core/workflow/graph_engine/command_processing/command_handlers.py +++ b/api/core/workflow/graph_engine/command_processing/command_handlers.py @@ -5,6 +5,8 @@ Command handler implementations. import logging from typing import final +from typing_extensions import override + from ..domain.graph_execution import GraphExecution from ..entities.commands import AbortCommand, GraphEngineCommand from .command_processor import CommandHandler @@ -16,6 +18,7 @@ logger = logging.getLogger(__name__) class AbortCommandHandler(CommandHandler): """Handles abort commands.""" + @override def handle(self, command: GraphEngineCommand, execution: GraphExecution) -> None: """ Handle an abort command. diff --git a/api/core/workflow/graph_engine/command_processing/command_processor.py b/api/core/workflow/graph_engine/command_processing/command_processor.py index 2521058ef2..7051ece735 100644 --- a/api/core/workflow/graph_engine/command_processing/command_processor.py +++ b/api/core/workflow/graph_engine/command_processing/command_processor.py @@ -73,7 +73,7 @@ class CommandProcessor: if handler: try: handler.handle(command, self.graph_execution) - except Exception as e: + except Exception: logger.exception("Error handling command %s", command.__class__.__name__) else: logger.warning("No handler registered for command: %s", command.__class__.__name__) diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index dd98536fba..828e9b329f 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -213,7 +213,9 @@ class GraphEngine: # Capture context for workers flask_app: Flask | None = None try: - flask_app = current_app._get_current_object() # type: ignore + app = current_app._get_current_object() # type: ignore + if isinstance(app, Flask): + flask_app = app except RuntimeError: pass diff --git a/api/core/workflow/graph_engine/layers/debug_logging.py b/api/core/workflow/graph_engine/layers/debug_logging.py index 3052600161..42bacfa474 100644 --- a/api/core/workflow/graph_engine/layers/debug_logging.py +++ b/api/core/workflow/graph_engine/layers/debug_logging.py @@ -9,6 +9,8 @@ import logging from collections.abc import Mapping from typing import Any, final +from typing_extensions import override + from core.workflow.graph_events import ( GraphEngineEvent, GraphRunAbortedEvent, @@ -93,13 +95,14 @@ class DebugLoggingLayer(Layer): if not data: return "{}" - formatted_items = [] + formatted_items: list[str] = [] for key, value in data.items(): formatted_value = self._truncate_value(value) formatted_items.append(f" {key}: {formatted_value}") return "{\n" + ",\n".join(formatted_items) + "\n}" + @override def on_graph_start(self) -> None: """Log graph execution start.""" self.logger.info("=" * 80) @@ -112,7 +115,7 @@ class DebugLoggingLayer(Layer): # Log inputs if available if self.graph_runtime_state.variable_pool: - initial_vars = {} + initial_vars: dict[str, Any] = {} # Access the variable dictionary directly for node_id, variables in self.graph_runtime_state.variable_pool.variable_dictionary.items(): for var_key, var in variables.items(): @@ -121,6 +124,7 @@ class DebugLoggingLayer(Layer): if initial_vars: self.logger.info(" Initial variables: %s", self._format_dict(initial_vars)) + @override def on_event(self, event: GraphEngineEvent) -> None: """Log individual events based on their type.""" event_class = event.__class__.__name__ @@ -222,6 +226,7 @@ class DebugLoggingLayer(Layer): # Log unknown events at debug level self.logger.debug("Event: %s", event_class) + @override def on_graph_end(self, error: Exception | None) -> None: """Log graph execution end with summary statistics.""" self.logger.info("=" * 80) diff --git a/api/core/workflow/graph_engine/layers/execution_limits.py b/api/core/workflow/graph_engine/layers/execution_limits.py index efda0bacbe..6cc0c1305a 100644 --- a/api/core/workflow/graph_engine/layers/execution_limits.py +++ b/api/core/workflow/graph_engine/layers/execution_limits.py @@ -13,6 +13,8 @@ import time from enum import Enum from typing import final +from typing_extensions import override + from core.workflow.graph_engine.entities.commands import AbortCommand, CommandType from core.workflow.graph_engine.layers import Layer from core.workflow.graph_events import ( @@ -63,6 +65,7 @@ class ExecutionLimitsLayer(Layer): self._execution_ended = False self._abort_sent = False # Track if abort command has been sent + @override def on_graph_start(self) -> None: """Called when graph execution starts.""" self.start_time = time.time() @@ -73,6 +76,7 @@ class ExecutionLimitsLayer(Layer): self.logger.debug("Execution limits monitoring started") + @override def on_event(self, event: GraphEngineEvent) -> None: """ Called for every event emitted by the engine. @@ -95,6 +99,7 @@ class ExecutionLimitsLayer(Layer): if self._reached_time_limitation(): self._send_abort_command(LimitType.TIME_LIMIT) + @override def on_graph_end(self, error: Exception | None) -> None: """Called when graph execution ends.""" if self._execution_started and not self._execution_ended: diff --git a/api/core/workflow/graph_engine/output_registry/registry.py b/api/core/workflow/graph_engine/output_registry/registry.py index 4df7da207c..29eefa5abe 100644 --- a/api/core/workflow/graph_engine/output_registry/registry.py +++ b/api/core/workflow/graph_engine/output_registry/registry.py @@ -7,7 +7,7 @@ thread-safe storage for node outputs. from collections.abc import Sequence from threading import RLock -from typing import TYPE_CHECKING, Union, final +from typing import TYPE_CHECKING, Any, Union, final from core.variables import Segment from core.workflow.entities.variable_pool import VariablePool @@ -31,13 +31,15 @@ class OutputRegistry: """Initialize empty registry with thread-safe storage.""" self._lock = RLock() self._scalars = variable_pool - self._streams: dict[tuple, Stream] = {} + self._streams: dict[tuple[str, ...], Stream] = {} - def _selector_to_key(self, selector: Sequence[str]) -> tuple: + def _selector_to_key(self, selector: Sequence[str]) -> tuple[str, ...]: """Convert selector list to tuple key for internal storage.""" return tuple(selector) - def set_scalar(self, selector: Sequence[str], value: Union[str, int, float, bool, dict, list]) -> None: + def set_scalar( + self, selector: Sequence[str], value: Union[str, int, float, bool, dict[str, Any], list[Any]] + ) -> None: """ Set a scalar value for the given selector. diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index 4c3cc167fa..1fb58852d2 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -161,7 +161,7 @@ class ResponseStreamCoordinator: # Step 2: For each complete path, filter edges based on node blocking behavior filtered_paths: list[Path] = [] for path in all_complete_paths: - blocking_edges = [] + blocking_edges: list[str] = [] for edge_id in path: edge = self.graph.edges[edge_id] source_node = self.graph.nodes[edge.tail] @@ -260,7 +260,7 @@ class ResponseStreamCoordinator: if event.is_final: self.registry.close_stream(event.selector) return self.try_flush() - elif isinstance(event, NodeRunSucceededEvent): + else: # Skip cause we share the same variable pool. # # for variable_name, variable_value in event.node_run_result.outputs.items(): @@ -426,7 +426,7 @@ class ResponseStreamCoordinator: # Wait for more data break - elif isinstance(segment, TextSegment): + else: segment_events = self._process_text_segment(segment) events.extend(segment_events) self.active_session.index += 1 diff --git a/api/core/workflow/graph_engine/worker.py b/api/core/workflow/graph_engine/worker.py index dacf6f0435..1fb0824e63 100644 --- a/api/core/workflow/graph_engine/worker.py +++ b/api/core/workflow/graph_engine/worker.py @@ -15,6 +15,7 @@ from typing import final from uuid import uuid4 from flask import Flask +from typing_extensions import override from core.workflow.enums import NodeType from core.workflow.graph import Graph @@ -73,6 +74,7 @@ class Worker(threading.Thread): """Signal the worker to stop processing.""" self._stop_event.set() + @override def run(self) -> None: """ Main worker loop. diff --git a/api/core/workflow/graph_engine/worker_management/activity_tracker.py b/api/core/workflow/graph_engine/worker_management/activity_tracker.py index b2125a0158..19c4ddaeb5 100644 --- a/api/core/workflow/graph_engine/worker_management/activity_tracker.py +++ b/api/core/workflow/graph_engine/worker_management/activity_tracker.py @@ -46,7 +46,7 @@ class ActivityTracker: List of idle worker IDs """ current_time = time.time() - idle_workers = [] + idle_workers: list[int] = [] with self._lock: for worker_id, (is_active, last_change) in self._worker_activity.items(): diff --git a/api/core/workflow/graph_engine/worker_management/worker_factory.py b/api/core/workflow/graph_engine/worker_management/worker_factory.py index 673ca11f26..cbb8e0b68e 100644 --- a/api/core/workflow/graph_engine/worker_management/worker_factory.py +++ b/api/core/workflow/graph_engine/worker_management/worker_factory.py @@ -10,6 +10,7 @@ from typing import final from flask import Flask from core.workflow.graph import Graph +from core.workflow.graph_events import GraphNodeEventBase from ..worker import Worker @@ -42,7 +43,7 @@ class WorkerFactory: def create_worker( self, ready_queue: queue.Queue[str], - event_queue: queue.Queue, + event_queue: queue.Queue[GraphNodeEventBase], graph: Graph, on_idle_callback: Callable[[int], None] | None = None, on_active_callback: Callable[[int], None] | None = None, diff --git a/api/core/workflow/graph_engine/worker_management/worker_pool.py b/api/core/workflow/graph_engine/worker_management/worker_pool.py index 55250809cd..bdec3e5323 100644 --- a/api/core/workflow/graph_engine/worker_management/worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/worker_pool.py @@ -7,6 +7,7 @@ import threading from typing import final from core.workflow.graph import Graph +from core.workflow.graph_events import GraphNodeEventBase from ..worker import Worker from .activity_tracker import ActivityTracker @@ -26,7 +27,7 @@ class WorkerPool: def __init__( self, ready_queue: queue.Queue[str], - event_queue: queue.Queue, + event_queue: queue.Queue[GraphNodeEventBase], graph: Graph, worker_factory: WorkerFactory, dynamic_scaler: DynamicScaler, From 72acd9b48381b10f652af58b7d8f3eb6bea2f6fe Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Sun, 31 Aug 2025 17:00:13 +0800 Subject: [PATCH 08/96] Remove redundant from_variable_selector null-check (#24842) --- api/core/workflow/nodes/answer/answer_stream_processor.py | 3 --- .../app/configuration/config/automatic/get-automatic-res.tsx | 4 ++-- web/service/debug.ts | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/api/core/workflow/nodes/answer/answer_stream_processor.py b/api/core/workflow/nodes/answer/answer_stream_processor.py index 97666fad05..4a75c9edd4 100644 --- a/api/core/workflow/nodes/answer/answer_stream_processor.py +++ b/api/core/workflow/nodes/answer/answer_stream_processor.py @@ -149,9 +149,6 @@ class AnswerStreamProcessor(StreamProcessor): return [] stream_output_value_selector = event.from_variable_selector - if not stream_output_value_selector: - return [] - stream_out_answer_node_ids = [] for answer_node_id, route_position in self.route_position.items(): if answer_node_id not in self.rest_node_ids: diff --git a/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx b/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx index 31f81d274d..e6b6c83846 100644 --- a/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx +++ b/web/app/components/app/configuration/config/automatic/get-automatic-res.tsx @@ -18,7 +18,7 @@ import s from './style.module.css' import Modal from '@/app/components/base/modal' import Button from '@/app/components/base/button' import Toast from '@/app/components/base/toast' -import { generateBasicAppFistTimeRule, generateRule } from '@/service/debug' +import { generateBasicAppFirstTimeRule, generateRule } from '@/service/debug' import type { CompletionParams, Model } from '@/types/app' import type { AppType } from '@/types/app' import Loading from '@/app/components/base/loading' @@ -226,7 +226,7 @@ const GetAutomaticRes: FC = ({ let apiRes: GenRes let hasError = false if (isBasicMode || !currentPrompt) { - const { error, ...res } = await generateBasicAppFistTimeRule({ + const { error, ...res } = await generateBasicAppFirstTimeRule({ instruction, model_config: model, no_variable: false, diff --git a/web/service/debug.ts b/web/service/debug.ts index 20a4f0953f..fab2910c5e 100644 --- a/web/service/debug.ts +++ b/web/service/debug.ts @@ -80,7 +80,7 @@ export const fetchConversationMessages = (appId: string, conversation_id: string }) } -export const generateBasicAppFistTimeRule = (body: Record) => { +export const generateBasicAppFirstTimeRule = (body: Record) => { return post('/rule-generate', { body, }) From bdfbfa391fae692e74d07e07cd1671711ad5f409 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Sun, 31 Aug 2025 17:01:01 +0800 Subject: [PATCH 09/96] Feature add test containers mcp tools manage service (#24840) --- .../tools/test_mcp_tools_manage_service.py | 1277 +++++++++++++++++ 1 file changed, 1277 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py diff --git a/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py new file mode 100644 index 0000000000..0fcaf86711 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/tools/test_mcp_tools_manage_service.py @@ -0,0 +1,1277 @@ +from unittest.mock import patch + +import pytest +from faker import Faker + +from core.tools.entities.tool_entities import ToolProviderType +from models.account import Account, Tenant +from models.tools import MCPToolProvider +from services.tools.mcp_tools_manage_service import UNCHANGED_SERVER_URL_PLACEHOLDER, MCPToolManageService + + +class TestMCPToolManageService: + """Integration tests for MCPToolManageService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.tools.mcp_tools_manage_service.encrypter") as mock_encrypter, + patch("services.tools.mcp_tools_manage_service.ToolTransformService") as mock_tool_transform_service, + ): + # Setup default mock returns + mock_encrypter.encrypt_token.return_value = "encrypted_server_url" + mock_tool_transform_service.mcp_provider_to_user_provider.return_value = { + "id": "test_id", + "name": "test_name", + "type": ToolProviderType.MCP, + } + + yield { + "encrypter": mock_encrypter, + "tool_transform_service": mock_tool_transform_service, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + # Create account + account = Account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + status="active", + ) + + from extensions.ext_database import db + + db.session.add(account) + db.session.commit() + + # Create tenant for the account + tenant = Tenant( + name=fake.company(), + status="normal", + ) + db.session.add(tenant) + db.session.commit() + + # Create tenant-account join + from models.account import TenantAccountJoin, TenantAccountRole + + join = TenantAccountJoin( + tenant_id=tenant.id, + account_id=account.id, + role=TenantAccountRole.OWNER.value, + current=True, + ) + db.session.add(join) + db.session.commit() + + # Set current tenant for account + account.current_tenant = tenant + + return account, tenant + + def _create_test_mcp_provider( + self, db_session_with_containers, mock_external_service_dependencies, tenant_id, user_id + ): + """ + Helper method to create a test MCP tool provider for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + tenant_id: Tenant ID for the provider + user_id: User ID who created the provider + + Returns: + MCPToolProvider: Created MCP tool provider instance + """ + fake = Faker() + + # Create MCP tool provider + mcp_provider = MCPToolProvider( + tenant_id=tenant_id, + name=fake.company(), + server_identifier=fake.uuid4(), + server_url="encrypted_server_url", + server_url_hash=fake.sha256(), + user_id=user_id, + authed=False, + tools="[]", + icon='{"content": "🤖", "background": "#FF6B6B"}', + timeout=30.0, + sse_read_timeout=300.0, + ) + + from extensions.ext_database import db + + db.session.add(mcp_provider) + db.session.commit() + + return mcp_provider + + def test_get_mcp_provider_by_provider_id_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful retrieval of MCP provider by provider ID. + + This test verifies: + - Proper retrieval of MCP provider by ID + - Correct tenant isolation + - Proper error handling for non-existent providers + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + + # Act: Execute the method under test + result = MCPToolManageService.get_mcp_provider_by_provider_id(mcp_provider.id, tenant.id) + + # Assert: Verify the expected outcomes + assert result is not None + assert result.id == mcp_provider.id + assert result.name == mcp_provider.name + assert result.tenant_id == tenant.id + assert result.user_id == account.id + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(result) + assert result.id is not None + assert result.server_identifier == mcp_provider.server_identifier + + def test_get_mcp_provider_by_provider_id_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when MCP provider is not found by provider ID. + + This test verifies: + - Proper error handling for non-existent provider IDs + - Correct exception type and message + - Tenant isolation enforcement + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + non_existent_id = fake.uuid4() + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="MCP tool not found"): + MCPToolManageService.get_mcp_provider_by_provider_id(non_existent_id, tenant.id) + + def test_get_mcp_provider_by_provider_id_tenant_isolation( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test tenant isolation when retrieving MCP provider by provider ID. + + This test verifies: + - Proper tenant isolation enforcement + - Providers from other tenants are not accessible + - Security boundaries are maintained + """ + # Arrange: Create test data for two tenants + fake = Faker() + account1, tenant1 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + account2, tenant2 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider in tenant1 + mcp_provider1 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant1.id, account1.id + ) + + # Act & Assert: Verify tenant isolation + with pytest.raises(ValueError, match="MCP tool not found"): + MCPToolManageService.get_mcp_provider_by_provider_id(mcp_provider1.id, tenant2.id) + + def test_get_mcp_provider_by_server_identifier_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful retrieval of MCP provider by server identifier. + + This test verifies: + - Proper retrieval of MCP provider by server identifier + - Correct tenant isolation + - Proper error handling for non-existent server identifiers + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + + # Act: Execute the method under test + result = MCPToolManageService.get_mcp_provider_by_server_identifier(mcp_provider.server_identifier, tenant.id) + + # Assert: Verify the expected outcomes + assert result is not None + assert result.id == mcp_provider.id + assert result.server_identifier == mcp_provider.server_identifier + assert result.tenant_id == tenant.id + assert result.user_id == account.id + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(result) + assert result.id is not None + assert result.name == mcp_provider.name + + def test_get_mcp_provider_by_server_identifier_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when MCP provider is not found by server identifier. + + This test verifies: + - Proper error handling for non-existent server identifiers + - Correct exception type and message + - Tenant isolation enforcement + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + non_existent_identifier = fake.uuid4() + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="MCP tool not found"): + MCPToolManageService.get_mcp_provider_by_server_identifier(non_existent_identifier, tenant.id) + + def test_get_mcp_provider_by_server_identifier_tenant_isolation( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test tenant isolation when retrieving MCP provider by server identifier. + + This test verifies: + - Proper tenant isolation enforcement + - Providers from other tenants are not accessible by server identifier + - Security boundaries are maintained + """ + # Arrange: Create test data for two tenants + fake = Faker() + account1, tenant1 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + account2, tenant2 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider in tenant1 + mcp_provider1 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant1.id, account1.id + ) + + # Act & Assert: Verify tenant isolation + with pytest.raises(ValueError, match="MCP tool not found"): + MCPToolManageService.get_mcp_provider_by_server_identifier(mcp_provider1.server_identifier, tenant2.id) + + def test_create_mcp_provider_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful creation of MCP provider. + + This test verifies: + - Proper MCP provider creation with all required fields + - Correct database state after creation + - Proper relationship establishment + - External service integration + - Return value correctness + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Setup mocks for provider creation + mock_external_service_dependencies["encrypter"].encrypt_token.return_value = "encrypted_server_url" + mock_external_service_dependencies["tool_transform_service"].mcp_provider_to_user_provider.return_value = { + "id": "new_provider_id", + "name": "Test MCP Provider", + "type": ToolProviderType.MCP, + } + + # Act: Execute the method under test + result = MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider", + server_url="https://example.com/mcp", + user_id=account.id, + icon="🤖", + icon_type="emoji", + icon_background="#FF6B6B", + server_identifier="test_identifier_123", + timeout=30.0, + sse_read_timeout=300.0, + ) + + # Assert: Verify the expected outcomes + assert result is not None + assert result["name"] == "Test MCP Provider" + assert result["type"] == ToolProviderType.MCP + + # Verify database state + from extensions.ext_database import db + + created_provider = ( + db.session.query(MCPToolProvider) + .filter(MCPToolProvider.tenant_id == tenant.id, MCPToolProvider.name == "Test MCP Provider") + .first() + ) + + assert created_provider is not None + assert created_provider.server_identifier == "test_identifier_123" + assert created_provider.timeout == 30.0 + assert created_provider.sse_read_timeout == 300.0 + assert created_provider.authed is False + assert created_provider.tools == "[]" + + # Verify mock interactions + mock_external_service_dependencies["encrypter"].encrypt_token.assert_called_once_with( + tenant.id, "https://example.com/mcp" + ) + mock_external_service_dependencies["tool_transform_service"].mcp_provider_to_user_provider.assert_called_once() + + def test_create_mcp_provider_duplicate_name(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test error handling when creating MCP provider with duplicate name. + + This test verifies: + - Proper error handling for duplicate provider names + - Correct exception type and message + - Database integrity constraints + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create first provider + MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider", + server_url="https://example1.com/mcp", + user_id=account.id, + icon="🤖", + icon_type="emoji", + icon_background="#FF6B6B", + server_identifier="test_identifier_1", + timeout=30.0, + sse_read_timeout=300.0, + ) + + # Act & Assert: Verify proper error handling for duplicate name + with pytest.raises(ValueError, match="MCP tool Test MCP Provider already exists"): + MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider", # Duplicate name + server_url="https://example2.com/mcp", + user_id=account.id, + icon="🚀", + icon_type="emoji", + icon_background="#4ECDC4", + server_identifier="test_identifier_2", + timeout=45.0, + sse_read_timeout=400.0, + ) + + def test_create_mcp_provider_duplicate_server_url( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when creating MCP provider with duplicate server URL. + + This test verifies: + - Proper error handling for duplicate server URLs + - Correct exception type and message + - URL hash uniqueness enforcement + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create first provider + MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider 1", + server_url="https://example.com/mcp", + user_id=account.id, + icon="🤖", + icon_type="emoji", + icon_background="#FF6B6B", + server_identifier="test_identifier_1", + timeout=30.0, + sse_read_timeout=300.0, + ) + + # Act & Assert: Verify proper error handling for duplicate server URL + with pytest.raises(ValueError, match="MCP tool https://example.com/mcp already exists"): + MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider 2", + server_url="https://example.com/mcp", # Duplicate URL + user_id=account.id, + icon="🚀", + icon_type="emoji", + icon_background="#4ECDC4", + server_identifier="test_identifier_2", + timeout=45.0, + sse_read_timeout=400.0, + ) + + def test_create_mcp_provider_duplicate_server_identifier( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when creating MCP provider with duplicate server identifier. + + This test verifies: + - Proper error handling for duplicate server identifiers + - Correct exception type and message + - Server identifier uniqueness enforcement + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create first provider + MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider 1", + server_url="https://example1.com/mcp", + user_id=account.id, + icon="🤖", + icon_type="emoji", + icon_background="#FF6B6B", + server_identifier="test_identifier_123", + timeout=30.0, + sse_read_timeout=300.0, + ) + + # Act & Assert: Verify proper error handling for duplicate server identifier + with pytest.raises(ValueError, match="MCP tool test_identifier_123 already exists"): + MCPToolManageService.create_mcp_provider( + tenant_id=tenant.id, + name="Test MCP Provider 2", + server_url="https://example2.com/mcp", + user_id=account.id, + icon="🚀", + icon_type="emoji", + icon_background="#4ECDC4", + server_identifier="test_identifier_123", # Duplicate identifier + timeout=45.0, + sse_read_timeout=400.0, + ) + + def test_retrieve_mcp_tools_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of MCP tools for a tenant. + + This test verifies: + - Proper retrieval of all MCP providers for a tenant + - Correct ordering by name + - Proper transformation of providers to user entities + - Empty list handling for tenants with no providers + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create multiple MCP providers + provider1 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + provider1.name = "Alpha Provider" + + provider2 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + provider2.name = "Beta Provider" + + provider3 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + provider3.name = "Gamma Provider" + + from extensions.ext_database import db + + db.session.commit() + + # Setup mock for transformation service + mock_external_service_dependencies["tool_transform_service"].mcp_provider_to_user_provider.side_effect = [ + {"id": provider1.id, "name": provider1.name, "type": ToolProviderType.MCP}, + {"id": provider2.id, "name": provider2.name, "type": ToolProviderType.MCP}, + {"id": provider3.id, "name": provider3.name, "type": ToolProviderType.MCP}, + ] + + # Act: Execute the method under test + result = MCPToolManageService.retrieve_mcp_tools(tenant.id, for_list=True) + + # Assert: Verify the expected outcomes + assert result is not None + assert len(result) == 3 + + # Verify correct ordering by name + assert result[0]["name"] == "Alpha Provider" + assert result[1]["name"] == "Beta Provider" + assert result[2]["name"] == "Gamma Provider" + + # Verify mock interactions + assert ( + mock_external_service_dependencies["tool_transform_service"].mcp_provider_to_user_provider.call_count == 3 + ) + + def test_retrieve_mcp_tools_empty_list(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test retrieval of MCP tools when tenant has no providers. + + This test verifies: + - Proper handling of empty provider lists + - Correct return value for tenants with no MCP tools + - No transformation service calls for empty lists + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # No MCP providers created for this tenant + + # Act: Execute the method under test + result = MCPToolManageService.retrieve_mcp_tools(tenant.id, for_list=False) + + # Assert: Verify the expected outcomes + assert result is not None + assert len(result) == 0 + assert isinstance(result, list) + + # Verify no transformation service calls for empty list + mock_external_service_dependencies["tool_transform_service"].mcp_provider_to_user_provider.assert_not_called() + + def test_retrieve_mcp_tools_tenant_isolation(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test tenant isolation when retrieving MCP tools. + + This test verifies: + - Proper tenant isolation enforcement + - Providers from other tenants are not accessible + - Security boundaries are maintained + """ + # Arrange: Create test data for two tenants + fake = Faker() + account1, tenant1 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + account2, tenant2 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider in tenant1 + provider1 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant1.id, account1.id + ) + + # Create MCP provider in tenant2 + provider2 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant2.id, account2.id + ) + + # Setup mock for transformation service + mock_external_service_dependencies["tool_transform_service"].mcp_provider_to_user_provider.side_effect = [ + {"id": provider1.id, "name": provider1.name, "type": ToolProviderType.MCP}, + {"id": provider2.id, "name": provider2.name, "type": ToolProviderType.MCP}, + ] + + # Act: Execute the method under test for both tenants + result1 = MCPToolManageService.retrieve_mcp_tools(tenant1.id, for_list=True) + result2 = MCPToolManageService.retrieve_mcp_tools(tenant2.id, for_list=True) + + # Assert: Verify tenant isolation + assert len(result1) == 1 + assert len(result2) == 1 + assert result1[0]["id"] == provider1.id + assert result2[0]["id"] == provider2.id + + def test_list_mcp_tool_from_remote_server_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful listing of MCP tools from remote server. + + This test verifies: + - Proper connection to remote MCP server + - Correct tool listing and database update + - Proper authentication state management + - Return value correctness + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + mcp_provider.server_url = "encrypted_server_url" + mcp_provider.authed = False + mcp_provider.tools = "[]" + + from extensions.ext_database import db + + db.session.commit() + + # Mock the decrypted_server_url property to avoid encryption issues + with patch("models.tools.encrypter") as mock_encrypter: + mock_encrypter.decrypt_token.return_value = "https://example.com/mcp" + + # Mock MCPClient and its context manager + mock_tools = [ + type( + "MockTool", (), {"model_dump": lambda self: {"name": "test_tool_1", "description": "Test tool 1"}} + )(), + type( + "MockTool", (), {"model_dump": lambda self: {"name": "test_tool_2", "description": "Test tool 2"}} + )(), + ] + + with patch("services.tools.mcp_tools_manage_service.MCPClient") as mock_mcp_client: + # Setup mock client + mock_client_instance = mock_mcp_client.return_value.__enter__.return_value + mock_client_instance.list_tools.return_value = mock_tools + + # Act: Execute the method under test + result = MCPToolManageService.list_mcp_tool_from_remote_server(tenant.id, mcp_provider.id) + + # Assert: Verify the expected outcomes + assert result is not None + assert result.id == mcp_provider.id + assert result.name == mcp_provider.name + assert result.type == ToolProviderType.MCP + # Note: server_url is mocked, so we skip that assertion to avoid encryption issues + + # Verify database state was updated + db.session.refresh(mcp_provider) + assert mcp_provider.authed is True + assert mcp_provider.tools != "[]" + assert mcp_provider.updated_at is not None + + # Verify mock interactions + mock_mcp_client.assert_called_once_with( + "https://example.com/mcp", mcp_provider.id, tenant.id, authed=False, for_list=True + ) + + def test_list_mcp_tool_from_remote_server_auth_error( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when MCP server requires authentication. + + This test verifies: + - Proper error handling for authentication errors + - Correct exception type and message + - Database state remains unchanged + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + mcp_provider.server_url = "encrypted_server_url" + mcp_provider.authed = False + mcp_provider.tools = "[]" + + from extensions.ext_database import db + + db.session.commit() + + # Mock the decrypted_server_url property to avoid encryption issues + with patch("models.tools.encrypter") as mock_encrypter: + mock_encrypter.decrypt_token.return_value = "https://example.com/mcp" + + # Mock MCPClient to raise authentication error + with patch("services.tools.mcp_tools_manage_service.MCPClient") as mock_mcp_client: + from core.mcp.error import MCPAuthError + + mock_client_instance = mock_mcp_client.return_value.__enter__.return_value + mock_client_instance.list_tools.side_effect = MCPAuthError("Authentication required") + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Please auth the tool first"): + MCPToolManageService.list_mcp_tool_from_remote_server(tenant.id, mcp_provider.id) + + # Verify database state was not changed + db.session.refresh(mcp_provider) + assert mcp_provider.authed is False + assert mcp_provider.tools == "[]" + + def test_list_mcp_tool_from_remote_server_connection_error( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when MCP server connection fails. + + This test verifies: + - Proper error handling for connection errors + - Correct exception type and message + - Database state remains unchanged + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + mcp_provider.server_url = "encrypted_server_url" + mcp_provider.authed = False + mcp_provider.tools = "[]" + + from extensions.ext_database import db + + db.session.commit() + + # Mock the decrypted_server_url property to avoid encryption issues + with patch("models.tools.encrypter") as mock_encrypter: + mock_encrypter.decrypt_token.return_value = "https://example.com/mcp" + + # Mock MCPClient to raise connection error + with patch("services.tools.mcp_tools_manage_service.MCPClient") as mock_mcp_client: + from core.mcp.error import MCPError + + mock_client_instance = mock_mcp_client.return_value.__enter__.return_value + mock_client_instance.list_tools.side_effect = MCPError("Connection failed") + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Failed to connect to MCP server: Connection failed"): + MCPToolManageService.list_mcp_tool_from_remote_server(tenant.id, mcp_provider.id) + + # Verify database state was not changed + db.session.refresh(mcp_provider) + assert mcp_provider.authed is False + assert mcp_provider.tools == "[]" + + def test_delete_mcp_tool_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful deletion of MCP tool. + + This test verifies: + - Proper deletion of MCP provider from database + - Correct tenant isolation enforcement + - Database state after deletion + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + + # Verify provider exists + from extensions.ext_database import db + + assert db.session.query(MCPToolProvider).filter_by(id=mcp_provider.id).first() is not None + + # Act: Execute the method under test + MCPToolManageService.delete_mcp_tool(tenant.id, mcp_provider.id) + + # Assert: Verify the expected outcomes + # Provider should be deleted from database + deleted_provider = db.session.query(MCPToolProvider).filter_by(id=mcp_provider.id).first() + assert deleted_provider is None + + def test_delete_mcp_tool_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test error handling when deleting non-existent MCP tool. + + This test verifies: + - Proper error handling for non-existent provider IDs + - Correct exception type and message + - Tenant isolation enforcement + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + non_existent_id = fake.uuid4() + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="MCP tool not found"): + MCPToolManageService.delete_mcp_tool(tenant.id, non_existent_id) + + def test_delete_mcp_tool_tenant_isolation(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test tenant isolation when deleting MCP tool. + + This test verifies: + - Proper tenant isolation enforcement + - Providers from other tenants cannot be deleted + - Security boundaries are maintained + """ + # Arrange: Create test data for two tenants + fake = Faker() + account1, tenant1 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + account2, tenant2 = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider in tenant1 + mcp_provider1 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant1.id, account1.id + ) + + # Act & Assert: Verify tenant isolation + with pytest.raises(ValueError, match="MCP tool not found"): + MCPToolManageService.delete_mcp_tool(tenant2.id, mcp_provider1.id) + + # Verify provider still exists in tenant1 + from extensions.ext_database import db + + assert db.session.query(MCPToolProvider).filter_by(id=mcp_provider1.id).first() is not None + + def test_update_mcp_provider_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful update of MCP provider. + + This test verifies: + - Proper update of MCP provider fields + - Correct database state after update + - Proper handling of unchanged server URL + - External service integration + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + original_name = mcp_provider.name + original_icon = mcp_provider.icon + + from extensions.ext_database import db + + db.session.commit() + + # Act: Execute the method under test + MCPToolManageService.update_mcp_provider( + tenant_id=tenant.id, + provider_id=mcp_provider.id, + name="Updated MCP Provider", + server_url=UNCHANGED_SERVER_URL_PLACEHOLDER, # Use placeholder for unchanged URL + icon="🚀", + icon_type="emoji", + icon_background="#4ECDC4", + server_identifier="updated_identifier_123", + timeout=45.0, + sse_read_timeout=400.0, + ) + + # Assert: Verify the expected outcomes + db.session.refresh(mcp_provider) + assert mcp_provider.name == "Updated MCP Provider" + assert mcp_provider.server_identifier == "updated_identifier_123" + assert mcp_provider.timeout == 45.0 + assert mcp_provider.sse_read_timeout == 400.0 + assert mcp_provider.updated_at is not None + + # Verify icon was updated + import json + + icon_data = json.loads(mcp_provider.icon) + assert icon_data["content"] == "🚀" + assert icon_data["background"] == "#4ECDC4" + + def test_update_mcp_provider_with_server_url_change( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful update of MCP provider with server URL change. + + This test verifies: + - Proper handling of server URL changes + - Correct reconnection logic + - Database state updates + - External service integration + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + + from extensions.ext_database import db + + db.session.commit() + + # Mock the reconnection method + with patch.object(MCPToolManageService, "_re_connect_mcp_provider") as mock_reconnect: + mock_reconnect.return_value = { + "authed": True, + "tools": '[{"name": "test_tool"}]', + "encrypted_credentials": "{}", + } + + # Act: Execute the method under test + MCPToolManageService.update_mcp_provider( + tenant_id=tenant.id, + provider_id=mcp_provider.id, + name="Updated MCP Provider", + server_url="https://new-example.com/mcp", + icon="🚀", + icon_type="emoji", + icon_background="#4ECDC4", + server_identifier="updated_identifier_123", + timeout=45.0, + sse_read_timeout=400.0, + ) + + # Assert: Verify the expected outcomes + db.session.refresh(mcp_provider) + assert mcp_provider.name == "Updated MCP Provider" + assert mcp_provider.server_identifier == "updated_identifier_123" + assert mcp_provider.timeout == 45.0 + assert mcp_provider.sse_read_timeout == 400.0 + assert mcp_provider.updated_at is not None + + # Verify reconnection was called + mock_reconnect.assert_called_once_with("https://new-example.com/mcp", mcp_provider.id, tenant.id) + + def test_update_mcp_provider_duplicate_name(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test error handling when updating MCP provider with duplicate name. + + This test verifies: + - Proper error handling for duplicate provider names + - Correct exception type and message + - Database integrity constraints + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create two MCP providers + provider1 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + provider1.name = "First Provider" + + provider2 = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + provider2.name = "Second Provider" + + from extensions.ext_database import db + + db.session.commit() + + # Act & Assert: Verify proper error handling for duplicate name + with pytest.raises(ValueError, match="MCP tool First Provider already exists"): + MCPToolManageService.update_mcp_provider( + tenant_id=tenant.id, + provider_id=provider2.id, + name="First Provider", # Duplicate name + server_url=UNCHANGED_SERVER_URL_PLACEHOLDER, + icon="🚀", + icon_type="emoji", + icon_background="#4ECDC4", + server_identifier="unique_identifier", + timeout=45.0, + sse_read_timeout=400.0, + ) + + def test_update_mcp_provider_credentials_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful update of MCP provider credentials. + + This test verifies: + - Proper encryption of credentials + - Correct database state after update + - Authentication state management + - External service integration + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + mcp_provider.encrypted_credentials = '{"existing_key": "existing_value"}' + mcp_provider.authed = False + mcp_provider.tools = "[]" + + from extensions.ext_database import db + + db.session.commit() + + # Mock the provider controller and encryption + with ( + patch("services.tools.mcp_tools_manage_service.MCPToolProviderController") as mock_controller, + patch("services.tools.mcp_tools_manage_service.ProviderConfigEncrypter") as mock_encrypter, + ): + # Setup mocks + mock_controller_instance = mock_controller._from_db.return_value + mock_controller_instance.get_credentials_schema.return_value = [] + + mock_encrypter_instance = mock_encrypter.return_value + mock_encrypter_instance.encrypt.return_value = {"new_key": "encrypted_value"} + + # Act: Execute the method under test + MCPToolManageService.update_mcp_provider_credentials( + mcp_provider=mcp_provider, credentials={"new_key": "new_value"}, authed=True + ) + + # Assert: Verify the expected outcomes + db.session.refresh(mcp_provider) + assert mcp_provider.authed is True + assert mcp_provider.updated_at is not None + + # Verify credentials were encrypted and merged + import json + + credentials = json.loads(mcp_provider.encrypted_credentials) + assert "existing_key" in credentials + assert "new_key" in credentials + + def test_update_mcp_provider_credentials_not_authed( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test update of MCP provider credentials when not authenticated. + + This test verifies: + - Proper handling of non-authenticated state + - Tools list is cleared when not authenticated + - Credentials are still updated + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Create MCP provider + mcp_provider = self._create_test_mcp_provider( + db_session_with_containers, mock_external_service_dependencies, tenant.id, account.id + ) + mcp_provider.encrypted_credentials = '{"existing_key": "existing_value"}' + mcp_provider.authed = True + mcp_provider.tools = '[{"name": "test_tool"}]' + + from extensions.ext_database import db + + db.session.commit() + + # Mock the provider controller and encryption + with ( + patch("services.tools.mcp_tools_manage_service.MCPToolProviderController") as mock_controller, + patch("services.tools.mcp_tools_manage_service.ProviderConfigEncrypter") as mock_encrypter, + ): + # Setup mocks + mock_controller_instance = mock_controller._from_db.return_value + mock_controller_instance.get_credentials_schema.return_value = [] + + mock_encrypter_instance = mock_encrypter.return_value + mock_encrypter_instance.encrypt.return_value = {"new_key": "encrypted_value"} + + # Act: Execute the method under test + MCPToolManageService.update_mcp_provider_credentials( + mcp_provider=mcp_provider, credentials={"new_key": "new_value"}, authed=False + ) + + # Assert: Verify the expected outcomes + db.session.refresh(mcp_provider) + assert mcp_provider.authed is False + assert mcp_provider.tools == "[]" + assert mcp_provider.updated_at is not None + + def test_re_connect_mcp_provider_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful reconnection to MCP provider. + + This test verifies: + - Proper connection to remote MCP server + - Correct tool listing and return value + - Proper error handling for authentication errors + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Mock MCPClient and its context manager + mock_tools = [ + type("MockTool", (), {"model_dump": lambda self: {"name": "test_tool_1", "description": "Test tool 1"}})(), + type("MockTool", (), {"model_dump": lambda self: {"name": "test_tool_2", "description": "Test tool 2"}})(), + ] + + with patch("services.tools.mcp_tools_manage_service.MCPClient") as mock_mcp_client: + # Setup mock client + mock_client_instance = mock_mcp_client.return_value.__enter__.return_value + mock_client_instance.list_tools.return_value = mock_tools + + # Act: Execute the method under test + result = MCPToolManageService._re_connect_mcp_provider( + "https://example.com/mcp", "test_provider_id", tenant.id + ) + + # Assert: Verify the expected outcomes + assert result is not None + assert result["authed"] is True + assert result["tools"] is not None + assert result["encrypted_credentials"] == "{}" + + # Verify tools were properly serialized + import json + + tools_data = json.loads(result["tools"]) + assert len(tools_data) == 2 + assert tools_data[0]["name"] == "test_tool_1" + assert tools_data[1]["name"] == "test_tool_2" + + # Verify mock interactions + mock_mcp_client.assert_called_once_with( + "https://example.com/mcp", "test_provider_id", tenant.id, authed=False, for_list=True + ) + + def test_re_connect_mcp_provider_auth_error(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test reconnection to MCP provider when authentication fails. + + This test verifies: + - Proper handling of authentication errors + - Correct return value for failed authentication + - Tools list is cleared + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Mock MCPClient to raise authentication error + with patch("services.tools.mcp_tools_manage_service.MCPClient") as mock_mcp_client: + from core.mcp.error import MCPAuthError + + mock_client_instance = mock_mcp_client.return_value.__enter__.return_value + mock_client_instance.list_tools.side_effect = MCPAuthError("Authentication required") + + # Act: Execute the method under test + result = MCPToolManageService._re_connect_mcp_provider( + "https://example.com/mcp", "test_provider_id", tenant.id + ) + + # Assert: Verify the expected outcomes + assert result is not None + assert result["authed"] is False + assert result["tools"] == "[]" + assert result["encrypted_credentials"] == "{}" + + def test_re_connect_mcp_provider_connection_error( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test reconnection to MCP provider when connection fails. + + This test verifies: + - Proper error handling for connection errors + - Correct exception type and message + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + # Mock MCPClient to raise connection error + with patch("services.tools.mcp_tools_manage_service.MCPClient") as mock_mcp_client: + from core.mcp.error import MCPError + + mock_client_instance = mock_mcp_client.return_value.__enter__.return_value + mock_client_instance.list_tools.side_effect = MCPError("Connection failed") + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError, match="Failed to re-connect MCP server: Connection failed"): + MCPToolManageService._re_connect_mcp_provider("https://example.com/mcp", "test_provider_id", tenant.id) From e5e42bc483547259ae802b72cf141946944b3d36 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sun, 31 Aug 2025 17:01:10 +0800 Subject: [PATCH 10/96] fix: XSS vulnerability in block-input and support-var-input components (#24835) --- web/__tests__/xss-fix-verification.test.tsx | 212 ------------------ web/__tests__/xss-prevention.test.tsx | 76 +++++++ .../base/var-highlight/index.tsx | 17 +- web/app/components/base/block-input/index.tsx | 35 ++- .../components/support-var-input/index.tsx | 31 ++- 5 files changed, 134 insertions(+), 237 deletions(-) delete mode 100644 web/__tests__/xss-fix-verification.test.tsx create mode 100644 web/__tests__/xss-prevention.test.tsx diff --git a/web/__tests__/xss-fix-verification.test.tsx b/web/__tests__/xss-fix-verification.test.tsx deleted file mode 100644 index 2fa5ab3c05..0000000000 --- a/web/__tests__/xss-fix-verification.test.tsx +++ /dev/null @@ -1,212 +0,0 @@ -/** - * XSS Fix Verification Test - * - * This test verifies that the XSS vulnerability in check-code pages has been - * properly fixed by replacing dangerouslySetInnerHTML with safe React rendering. - */ - -import React from 'react' -import { cleanup, render } from '@testing-library/react' -import '@testing-library/jest-dom' - -// Mock i18next with the new safe translation structure -jest.mock('react-i18next', () => ({ - useTranslation: () => ({ - t: (key: string) => { - if (key === 'login.checkCode.tipsPrefix') - return 'We send a verification code to ' - - return key - }, - }), -})) - -// Mock Next.js useSearchParams -jest.mock('next/navigation', () => ({ - useSearchParams: () => ({ - get: (key: string) => { - if (key === 'email') - return 'test@example.com' - return null - }, - }), -})) - -// Fixed CheckCode component implementation (current secure version) -const SecureCheckCodeComponent = ({ email }: { email: string }) => { - const { t } = require('react-i18next').useTranslation() - - return ( -
-

Check Code

-

- - {t('login.checkCode.tipsPrefix')} - {email} - -

-
- ) -} - -// Vulnerable implementation for comparison (what we fixed) -const VulnerableCheckCodeComponent = ({ email }: { email: string }) => { - const mockTranslation = (key: string, params?: any) => { - if (key === 'login.checkCode.tips' && params?.email) - return `We send a verification code to ${params.email}` - - return key - } - - return ( -
-

Check Code

-

- -

-
- ) -} - -describe('XSS Fix Verification - Check Code Pages Security', () => { - afterEach(() => { - cleanup() - }) - - const maliciousEmail = 'test@example.com' - - it('should securely render email with HTML characters as text (FIXED VERSION)', () => { - console.log('\n🔒 Security Fix Verification Report') - console.log('===================================') - - const { container } = render() - - const spanElement = container.querySelector('span') - const strongElement = container.querySelector('strong') - const scriptElements = container.querySelectorAll('script') - - console.log('\n✅ Fixed Implementation Results:') - console.log('- Email rendered in strong tag:', strongElement?.textContent) - console.log('- HTML tags visible as text:', strongElement?.textContent?.includes('', - 'normal@email.com', - ] - - testCases.forEach((testEmail, index) => { - const { container } = render() - - const strongElement = container.querySelector('strong') - const scriptElements = container.querySelectorAll('script') - const imgElements = container.querySelectorAll('img') - const divElements = container.querySelectorAll('div:not([data-testid])') - - console.log(`\n📧 Test Case ${index + 1}: ${testEmail.substring(0, 20)}...`) - console.log(` - Script elements: ${scriptElements.length}`) - console.log(` - Img elements: ${imgElements.length}`) - console.log(` - Malicious divs: ${divElements.length - 1}`) // -1 for container div - console.log(` - Text content: ${strongElement?.textContent === testEmail ? 'SAFE' : 'ISSUE'}`) - - // All should be safe - expect(scriptElements).toHaveLength(0) - expect(imgElements).toHaveLength(0) - expect(strongElement?.textContent).toBe(testEmail) - }) - - console.log('\n✅ All test cases passed - secure rendering confirmed') - }) - - it('should validate the translation structure is secure', () => { - console.log('\n🔍 Translation Security Analysis') - console.log('=================================') - - const { t } = require('react-i18next').useTranslation() - const prefix = t('login.checkCode.tipsPrefix') - - console.log('- Translation key used: login.checkCode.tipsPrefix') - console.log('- Translation value:', prefix) - console.log('- Contains HTML tags:', prefix.includes('<')) - console.log('- Pure text content:', !prefix.includes('<') && !prefix.includes('>')) - - // Verify translation is plain text - expect(prefix).toBe('We send a verification code to ') - expect(prefix).not.toContain('<') - expect(prefix).not.toContain('>') - expect(typeof prefix).toBe('string') - - console.log('\n✅ Translation structure is secure - no HTML content') - }) - - it('should confirm React automatic escaping works correctly', () => { - console.log('\n⚡ React Security Mechanism Test') - console.log('=================================') - - // Test React's automatic escaping with various inputs - const dangerousInputs = [ - '', - '', - '">', - '\'>alert(3)', - '
click
', - ] - - dangerousInputs.forEach((input, index) => { - const TestComponent = () => {input} - const { container } = render() - - const strongElement = container.querySelector('strong') - const scriptElements = container.querySelectorAll('script') - - console.log(`\n🧪 Input ${index + 1}: ${input.substring(0, 30)}...`) - console.log(` - Rendered as text: ${strongElement?.textContent === input}`) - console.log(` - No script execution: ${scriptElements.length === 0}`) - - expect(strongElement?.textContent).toBe(input) - expect(scriptElements).toHaveLength(0) - }) - - console.log('\n🛡️ React automatic escaping is working perfectly') - }) -}) - -export {} diff --git a/web/__tests__/xss-prevention.test.tsx b/web/__tests__/xss-prevention.test.tsx new file mode 100644 index 0000000000..064c6e08de --- /dev/null +++ b/web/__tests__/xss-prevention.test.tsx @@ -0,0 +1,76 @@ +/** + * XSS Prevention Test Suite + * + * This test verifies that the XSS vulnerabilities in block-input and support-var-input + * components have been properly fixed by replacing dangerouslySetInnerHTML with safe React rendering. + */ + +import React from 'react' +import { cleanup, render } from '@testing-library/react' +import '@testing-library/jest-dom' +import BlockInput from '../app/components/base/block-input' +import SupportVarInput from '../app/components/workflow/nodes/_base/components/support-var-input' + +// Mock styles +jest.mock('../app/components/app/configuration/base/var-highlight/style.module.css', () => ({ + item: 'mock-item-class', +})) + +describe('XSS Prevention - Block Input and Support Var Input Security', () => { + afterEach(() => { + cleanup() + }) + + describe('BlockInput Component Security', () => { + it('should safely render malicious variable names without executing scripts', () => { + const testInput = 'user@test.com{{}}' + const { container } = render() + + const scriptElements = container.querySelectorAll('script') + expect(scriptElements).toHaveLength(0) + + const textContent = container.textContent + expect(textContent).toContain(''} + const { container } = render() + + const spanElement = container.querySelector('span') + const scriptElements = container.querySelectorAll('script') + + expect(spanElement?.textContent).toBe('') + expect(scriptElements).toHaveLength(0) + }) + }) +}) + +export {} diff --git a/web/app/components/app/configuration/base/var-highlight/index.tsx b/web/app/components/app/configuration/base/var-highlight/index.tsx index 1900dd5be6..2d8fc2dcb4 100644 --- a/web/app/components/app/configuration/base/var-highlight/index.tsx +++ b/web/app/components/app/configuration/base/var-highlight/index.tsx @@ -16,19 +16,26 @@ const VarHighlight: FC = ({ return (
- {'{{'} - {name} - {'}}'} + {'{{'}{name}{'}}'}
) } +// DEPRECATED: This function is vulnerable to XSS attacks and should not be used +// Use the VarHighlight React component instead export const varHighlightHTML = ({ name, className = '' }: IVarHighlightProps) => { + const escapedName = name + .replace(/&/g, '&') + .replace(//g, '>') + .replace(/"/g, '"') + .replace(/'/g, ''') + const html = `
{{ - ${name} + ${escapedName} }}
` return html diff --git a/web/app/components/base/block-input/index.tsx b/web/app/components/base/block-input/index.tsx index 27d53a8eea..ae6f77fab3 100644 --- a/web/app/components/base/block-input/index.tsx +++ b/web/app/components/base/block-input/index.tsx @@ -3,7 +3,7 @@ import type { ChangeEvent, FC } from 'react' import React, { useCallback, useEffect, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' -import { varHighlightHTML } from '../../app/configuration/base/var-highlight' +import VarHighlight from '../../app/configuration/base/var-highlight' import Toast from '../toast' import classNames from '@/utils/classnames' import { checkKeys } from '@/utils/var' @@ -66,11 +66,24 @@ const BlockInput: FC = ({ 'block-input--editing': isEditing, }) - const coloredContent = (currentValue || '') - .replace(//g, '>') - .replace(regex, varHighlightHTML({ name: '$1' })) // `{{$1}}` - .replace(/\n/g, '
') + const renderSafeContent = (value: string) => { + const parts = value.split(/(\{\{[^}]+\}\}|\n)/g) + return parts.map((part, index) => { + const variableMatch = part.match(/^\{\{([^}]+)\}\}$/) + if (variableMatch) { + return ( + + ) + } + if (part === '\n') + return
+ + return {part} + }) + } // Not use useCallback. That will cause out callback get old data. const handleSubmit = (value: string) => { @@ -96,11 +109,11 @@ const BlockInput: FC = ({ // Prevent rerendering caused cursor to jump to the start of the contentEditable element const TextAreaContentView = () => { - return
+ return ( +
+ {renderSafeContent(currentValue || '')} +
+ ) } const placeholder = '' diff --git a/web/app/components/workflow/nodes/_base/components/support-var-input/index.tsx b/web/app/components/workflow/nodes/_base/components/support-var-input/index.tsx index 6999a973f1..3be1262e14 100644 --- a/web/app/components/workflow/nodes/_base/components/support-var-input/index.tsx +++ b/web/app/components/workflow/nodes/_base/components/support-var-input/index.tsx @@ -2,7 +2,7 @@ import type { FC } from 'react' import React from 'react' import cn from '@/utils/classnames' -import { varHighlightHTML } from '@/app/components/app/configuration/base/var-highlight' +import VarHighlight from '@/app/components/app/configuration/base/var-highlight' type Props = { isFocus?: boolean onFocus?: () => void @@ -22,11 +22,24 @@ const SupportVarInput: FC = ({ textClassName, readonly, }) => { - const withHightContent = (value || '') - .replace(//g, '>') - .replace(/\{\{([^}]+)\}\}/g, varHighlightHTML({ name: '$1', className: '!mb-0' })) // `{{$1}}` - .replace(/\n/g, '
') + const renderSafeContent = (inputValue: string) => { + const parts = inputValue.split(/(\{\{[^}]+\}\}|\n)/g) + return parts.map((part, index) => { + const variableMatch = part.match(/^\{\{([^}]+)\}\}$/) + if (variableMatch) { + return ( + + ) + } + if (part === '\n') + return
+ + return {part} + }) + } return (
= ({
+ > + {renderSafeContent(value || '')} +
)}
) From f3c5d77ad50c9b158e5f49afd9ed840135413830 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Sun, 31 Aug 2025 17:01:19 +0800 Subject: [PATCH 11/96] chore: remove duplicate Python style checks handled by autofix CI (#24833) --- .github/workflows/style.yml | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index 7cd43d2a97..b6c9131c08 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -44,21 +44,10 @@ jobs: if: steps.changed-files.outputs.any_changed == 'true' run: uv sync --project api --dev - - name: Ruff check - if: steps.changed-files.outputs.any_changed == 'true' - run: | - uv run --directory api ruff --version - uv run --directory api ruff check ./ - uv run --directory api ruff format --check ./ - - name: Dotenv check if: steps.changed-files.outputs.any_changed == 'true' run: uv run --project api dotenv-linter ./api/.env.example ./web/.env.example - - name: Lint hints - if: failure() - run: echo "Please run 'dev/reformat' to fix the fixable linting errors." - web-style: name: Web Style runs-on: ubuntu-latest From b66945b9b8b4d4bd3ca4c562d2c3e2562f70b65c Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Sun, 31 Aug 2025 17:02:08 +0800 Subject: [PATCH 12/96] feat: add test containers based tests for api tool manage service (#24821) --- .../services/tools/__init__.py | 0 .../tools/test_api_tools_manage_service.py | 550 ++++++++++++++++++ 2 files changed, 550 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/tools/__init__.py create mode 100644 api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py diff --git a/api/tests/test_containers_integration_tests/services/tools/__init__.py b/api/tests/test_containers_integration_tests/services/tools/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py b/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py new file mode 100644 index 0000000000..a412bdccf8 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/tools/test_api_tools_manage_service.py @@ -0,0 +1,550 @@ +from unittest.mock import patch + +import pytest +from faker import Faker + +from models.account import Account, Tenant +from models.tools import ApiToolProvider +from services.tools.api_tools_manage_service import ApiToolManageService + + +class TestApiToolManageService: + """Integration tests for ApiToolManageService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.tools.api_tools_manage_service.ToolLabelManager") as mock_tool_label_manager, + patch("services.tools.api_tools_manage_service.create_tool_provider_encrypter") as mock_encrypter, + patch("services.tools.api_tools_manage_service.ApiToolProviderController") as mock_provider_controller, + ): + # Setup default mock returns + mock_tool_label_manager.update_tool_labels.return_value = None + mock_encrypter.return_value = (mock_encrypter, None) + mock_encrypter.encrypt.return_value = {"encrypted": "credentials"} + mock_provider_controller.from_db.return_value = mock_provider_controller + mock_provider_controller.load_bundled_tools.return_value = None + + yield { + "tool_label_manager": mock_tool_label_manager, + "encrypter": mock_encrypter, + "provider_controller": mock_provider_controller, + } + + def _create_test_account_and_tenant(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account and tenant for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (account, tenant) - Created account and tenant instances + """ + fake = Faker() + + # Create account + account = Account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + status="active", + ) + + from extensions.ext_database import db + + db.session.add(account) + db.session.commit() + + # Create tenant for the account + tenant = Tenant( + name=fake.company(), + status="normal", + ) + db.session.add(tenant) + db.session.commit() + + # Create tenant-account join + from models.account import TenantAccountJoin, TenantAccountRole + + join = TenantAccountJoin( + tenant_id=tenant.id, + account_id=account.id, + role=TenantAccountRole.OWNER.value, + current=True, + ) + db.session.add(join) + db.session.commit() + + # Set current tenant for account + account.current_tenant = tenant + + return account, tenant + + def _create_test_openapi_schema(self): + """Helper method to create a test OpenAPI schema.""" + return """ + { + "openapi": "3.0.0", + "info": { + "title": "Test API", + "version": "1.0.0", + "description": "Test API for testing purposes" + }, + "servers": [ + { + "url": "https://api.example.com", + "description": "Production server" + } + ], + "paths": { + "/test": { + "get": { + "operationId": "testOperation", + "summary": "Test operation", + "responses": { + "200": { + "description": "Success" + } + } + } + } + } + } + """ + + def test_parser_api_schema_success( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful parsing of API schema. + + This test verifies: + - Proper schema parsing with valid OpenAPI schema + - Correct credentials schema generation + - Proper warning handling + - Return value structure + """ + # Arrange: Create test schema + schema = self._create_test_openapi_schema() + + # Act: Parse the schema + result = ApiToolManageService.parser_api_schema(schema) + + # Assert: Verify the result structure + assert result is not None + assert "schema_type" in result + assert "parameters_schema" in result + assert "credentials_schema" in result + assert "warning" in result + + # Verify credentials schema structure + credentials_schema = result["credentials_schema"] + assert len(credentials_schema) == 3 + + # Check auth_type field + auth_type_field = next(field for field in credentials_schema if field["name"] == "auth_type") + assert auth_type_field["required"] is True + assert auth_type_field["default"] == "none" + assert len(auth_type_field["options"]) == 2 + + # Check api_key_header field + api_key_header_field = next(field for field in credentials_schema if field["name"] == "api_key_header") + assert api_key_header_field["required"] is False + assert api_key_header_field["default"] == "api_key" + + # Check api_key_value field + api_key_value_field = next(field for field in credentials_schema if field["name"] == "api_key_value") + assert api_key_value_field["required"] is False + assert api_key_value_field["default"] == "" + + def test_parser_api_schema_invalid_schema( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test parsing of invalid API schema. + + This test verifies: + - Proper error handling for invalid schemas + - Correct exception type and message + - Error propagation from underlying parser + """ + # Arrange: Create invalid schema + invalid_schema = "invalid json schema" + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError) as exc_info: + ApiToolManageService.parser_api_schema(invalid_schema) + + assert "invalid schema" in str(exc_info.value) + + def test_parser_api_schema_malformed_json( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test parsing of malformed JSON schema. + + This test verifies: + - Proper error handling for malformed JSON + - Correct exception type and message + - Error propagation from JSON parsing + """ + # Arrange: Create malformed JSON schema + malformed_schema = '{"openapi": "3.0.0", "info": {"title": "Test", "version": "1.0.0"}, "paths": {}}' + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError) as exc_info: + ApiToolManageService.parser_api_schema(malformed_schema) + + assert "invalid schema" in str(exc_info.value) + + def test_convert_schema_to_tool_bundles_success( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful conversion of schema to tool bundles. + + This test verifies: + - Proper schema conversion with valid OpenAPI schema + - Correct tool bundles generation + - Proper schema type detection + - Return value structure + """ + # Arrange: Create test schema + schema = self._create_test_openapi_schema() + + # Act: Convert schema to tool bundles + tool_bundles, schema_type = ApiToolManageService.convert_schema_to_tool_bundles(schema) + + # Assert: Verify the result structure + assert tool_bundles is not None + assert isinstance(tool_bundles, list) + assert len(tool_bundles) > 0 + assert schema_type is not None + assert isinstance(schema_type, str) + + # Verify tool bundle structure + tool_bundle = tool_bundles[0] + assert hasattr(tool_bundle, "operation_id") + assert tool_bundle.operation_id == "testOperation" + + def test_convert_schema_to_tool_bundles_with_extra_info( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful conversion of schema to tool bundles with extra info. + + This test verifies: + - Proper schema conversion with extra info parameter + - Correct tool bundles generation + - Extra info handling + - Return value structure + """ + # Arrange: Create test schema and extra info + schema = self._create_test_openapi_schema() + extra_info = {"description": "Custom description", "version": "2.0.0"} + + # Act: Convert schema to tool bundles with extra info + tool_bundles, schema_type = ApiToolManageService.convert_schema_to_tool_bundles(schema, extra_info) + + # Assert: Verify the result structure + assert tool_bundles is not None + assert isinstance(tool_bundles, list) + assert len(tool_bundles) > 0 + assert schema_type is not None + assert isinstance(schema_type, str) + + def test_convert_schema_to_tool_bundles_invalid_schema( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test conversion of invalid schema to tool bundles. + + This test verifies: + - Proper error handling for invalid schemas + - Correct exception type and message + - Error propagation from underlying parser + """ + # Arrange: Create invalid schema + invalid_schema = "invalid schema content" + + # Act & Assert: Verify proper error handling + with pytest.raises(ValueError) as exc_info: + ApiToolManageService.convert_schema_to_tool_bundles(invalid_schema) + + assert "invalid schema" in str(exc_info.value) + + def test_create_api_tool_provider_success( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful creation of API tool provider. + + This test verifies: + - Proper provider creation with valid parameters + - Correct database state after creation + - Proper relationship establishment + - External service integration + - Return value correctness + """ + # Arrange: Create test data + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_name = fake.company() + icon = {"type": "emoji", "value": "🔧"} + credentials = {"auth_type": "none", "api_key_header": "X-API-Key", "api_key_value": ""} + schema_type = "openapi" + schema = self._create_test_openapi_schema() + privacy_policy = "https://example.com/privacy" + custom_disclaimer = "Custom disclaimer text" + labels = ["test", "api"] + + # Act: Create API tool provider + result = ApiToolManageService.create_api_tool_provider( + user_id=account.id, + tenant_id=tenant.id, + provider_name=provider_name, + icon=icon, + credentials=credentials, + schema_type=schema_type, + schema=schema, + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, + labels=labels, + ) + + # Assert: Verify the result + assert result == {"result": "success"} + + # Verify database state + from extensions.ext_database import db + + provider = ( + db.session.query(ApiToolProvider) + .filter(ApiToolProvider.tenant_id == tenant.id, ApiToolProvider.name == provider_name) + .first() + ) + + assert provider is not None + assert provider.name == provider_name + assert provider.tenant_id == tenant.id + assert provider.user_id == account.id + assert provider.schema_type_str == schema_type + assert provider.privacy_policy == privacy_policy + assert provider.custom_disclaimer == custom_disclaimer + + # Verify mock interactions + mock_external_service_dependencies["tool_label_manager"].update_tool_labels.assert_called_once() + mock_external_service_dependencies["encrypter"].assert_called_once() + mock_external_service_dependencies["provider_controller"].from_db.assert_called_once() + mock_external_service_dependencies["provider_controller"].load_bundled_tools.assert_called_once() + + def test_create_api_tool_provider_duplicate_name( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creation of API tool provider with duplicate name. + + This test verifies: + - Proper error handling for duplicate provider names + - Correct exception type and message + - Database constraint enforcement + """ + # Arrange: Create test data and existing provider + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_name = fake.company() + icon = {"type": "emoji", "value": "🔧"} + credentials = {"auth_type": "none"} + schema_type = "openapi" + schema = self._create_test_openapi_schema() + privacy_policy = "https://example.com/privacy" + custom_disclaimer = "Custom disclaimer text" + labels = ["test"] + + # Create first provider + ApiToolManageService.create_api_tool_provider( + user_id=account.id, + tenant_id=tenant.id, + provider_name=provider_name, + icon=icon, + credentials=credentials, + schema_type=schema_type, + schema=schema, + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, + labels=labels, + ) + + # Act & Assert: Try to create duplicate provider + with pytest.raises(ValueError) as exc_info: + ApiToolManageService.create_api_tool_provider( + user_id=account.id, + tenant_id=tenant.id, + provider_name=provider_name, + icon=icon, + credentials=credentials, + schema_type=schema_type, + schema=schema, + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, + labels=labels, + ) + + assert f"provider {provider_name} already exists" in str(exc_info.value) + + def test_create_api_tool_provider_invalid_schema_type( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creation of API tool provider with invalid schema type. + + This test verifies: + - Proper error handling for invalid schema types + - Correct exception type and message + - Schema type validation + """ + # Arrange: Create test data with invalid schema type + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_name = fake.company() + icon = {"type": "emoji", "value": "🔧"} + credentials = {"auth_type": "none"} + schema_type = "invalid_type" + schema = self._create_test_openapi_schema() + privacy_policy = "https://example.com/privacy" + custom_disclaimer = "Custom disclaimer text" + labels = ["test"] + + # Act & Assert: Try to create provider with invalid schema type + with pytest.raises(ValueError) as exc_info: + ApiToolManageService.create_api_tool_provider( + user_id=account.id, + tenant_id=tenant.id, + provider_name=provider_name, + icon=icon, + credentials=credentials, + schema_type=schema_type, + schema=schema, + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, + labels=labels, + ) + + assert "invalid schema type" in str(exc_info.value) + + def test_create_api_tool_provider_missing_auth_type( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test creation of API tool provider with missing auth type. + + This test verifies: + - Proper error handling for missing auth type + - Correct exception type and message + - Credentials validation + """ + # Arrange: Create test data with missing auth type + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_name = fake.company() + icon = {"type": "emoji", "value": "🔧"} + credentials = {} # Missing auth_type + schema_type = "openapi" + schema = self._create_test_openapi_schema() + privacy_policy = "https://example.com/privacy" + custom_disclaimer = "Custom disclaimer text" + labels = ["test"] + + # Act & Assert: Try to create provider with missing auth type + with pytest.raises(ValueError) as exc_info: + ApiToolManageService.create_api_tool_provider( + user_id=account.id, + tenant_id=tenant.id, + provider_name=provider_name, + icon=icon, + credentials=credentials, + schema_type=schema_type, + schema=schema, + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, + labels=labels, + ) + + assert "auth_type is required" in str(exc_info.value) + + def test_create_api_tool_provider_with_api_key_auth( + self, flask_req_ctx_with_containers, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful creation of API tool provider with API key authentication. + + This test verifies: + - Proper provider creation with API key auth + - Correct credentials handling + - Proper authentication type processing + """ + # Arrange: Create test data with API key auth + fake = Faker() + account, tenant = self._create_test_account_and_tenant( + db_session_with_containers, mock_external_service_dependencies + ) + + provider_name = fake.company() + icon = {"type": "emoji", "value": "🔑"} + credentials = {"auth_type": "api_key", "api_key_header": "X-API-Key", "api_key_value": fake.uuid4()} + schema_type = "openapi" + schema = self._create_test_openapi_schema() + privacy_policy = "https://example.com/privacy" + custom_disclaimer = "Custom disclaimer text" + labels = ["api_key", "secure"] + + # Act: Create API tool provider + result = ApiToolManageService.create_api_tool_provider( + user_id=account.id, + tenant_id=tenant.id, + provider_name=provider_name, + icon=icon, + credentials=credentials, + schema_type=schema_type, + schema=schema, + privacy_policy=privacy_policy, + custom_disclaimer=custom_disclaimer, + labels=labels, + ) + + # Assert: Verify the result + assert result == {"result": "success"} + + # Verify database state + from extensions.ext_database import db + + provider = ( + db.session.query(ApiToolProvider) + .filter(ApiToolProvider.tenant_id == tenant.id, ApiToolProvider.name == provider_name) + .first() + ) + + assert provider is not None + assert provider.name == provider_name + assert provider.tenant_id == tenant.id + assert provider.user_id == account.id + assert provider.schema_type_str == schema_type + + # Verify mock interactions + mock_external_service_dependencies["encrypter"].assert_called_once() + mock_external_service_dependencies["provider_controller"].from_db.assert_called_once() From 529791ce627af4df88cbfb9268a7152b1d2b058c Mon Sep 17 00:00:00 2001 From: 17hz <0x149527@gmail.com> Date: Sun, 31 Aug 2025 17:03:36 +0800 Subject: [PATCH 13/96] fix: Variable Aggregator cannot select conversation variables (#24793) --- .../nodes/_base/components/add-variable-popup-with-position.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx b/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx index d0f971f849..6d54e38556 100644 --- a/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx +++ b/web/app/components/workflow/nodes/_base/components/add-variable-popup-with-position.tsx @@ -64,7 +64,7 @@ const AddVariablePopupWithPosition = ({ } as any, ], hideEnv: true, - hideChatVar: true, + hideChatVar: !isChatMode, isChatMode, filterVar: filterVar(outputType as VarType), }) From 24e2b72b716326f34ecb234ff467d3e26a24759e Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Sun, 31 Aug 2025 18:03:51 +0900 Subject: [PATCH 14/96] Update ast-grep pattern for session.query (#24828) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- .github/workflows/autofix.yml | 1 + api/controllers/console/app/message.py | 2 +- api/schedule/check_upgradable_plugin_task.py | 2 +- .../clean_workflow_runlogs_precise.py | 2 +- api/services/annotation_service.py | 4 ++-- .../clear_free_plan_tenant_expired_logs.py | 12 +++++------ api/services/dataset_service.py | 2 +- .../plugin/plugin_auto_upgrade_service.py | 6 +++--- .../services/test_annotation_service.py | 2 +- .../services/test_app_dsl_service.py | 6 +++--- ...est_clear_free_plan_tenant_expired_logs.py | 20 +++++++++---------- 11 files changed, 30 insertions(+), 29 deletions(-) diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 65f413af85..82ba95444f 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -26,6 +26,7 @@ jobs: - name: ast-grep run: | uvx --from ast-grep-cli sg --pattern 'db.session.query($WHATEVER).filter($HERE)' --rewrite 'db.session.query($WHATEVER).where($HERE)' -l py --update-all + uvx --from ast-grep-cli sg --pattern 'session.query($WHATEVER).filter($HERE)' --rewrite 'session.query($WHATEVER).where($HERE)' -l py --update-all - name: mdformat run: | uvx mdformat . diff --git a/api/controllers/console/app/message.py b/api/controllers/console/app/message.py index fd86191a07..f0605a37f9 100644 --- a/api/controllers/console/app/message.py +++ b/api/controllers/console/app/message.py @@ -130,7 +130,7 @@ class MessageFeedbackApi(Resource): message_id = str(args["message_id"]) - message = db.session.query(Message).filter(Message.id == message_id, Message.app_id == app_model.id).first() + message = db.session.query(Message).where(Message.id == message_id, Message.app_id == app_model.id).first() if not message: raise NotFound("Message Not Exists.") diff --git a/api/schedule/check_upgradable_plugin_task.py b/api/schedule/check_upgradable_plugin_task.py index e27391b558..08a5cfce79 100644 --- a/api/schedule/check_upgradable_plugin_task.py +++ b/api/schedule/check_upgradable_plugin_task.py @@ -20,7 +20,7 @@ def check_upgradable_plugin_task(): strategies = ( db.session.query(TenantPluginAutoUpgradeStrategy) - .filter( + .where( TenantPluginAutoUpgradeStrategy.upgrade_time_of_day >= now_seconds_of_day, TenantPluginAutoUpgradeStrategy.upgrade_time_of_day < now_seconds_of_day + AUTO_UPGRADE_MINIMAL_CHECKING_INTERVAL, diff --git a/api/schedule/clean_workflow_runlogs_precise.py b/api/schedule/clean_workflow_runlogs_precise.py index 75057983f6..1a0362ec38 100644 --- a/api/schedule/clean_workflow_runlogs_precise.py +++ b/api/schedule/clean_workflow_runlogs_precise.py @@ -93,7 +93,7 @@ def _delete_batch_with_retry(workflow_run_ids: list[str], attempt_count: int) -> with db.session.begin_nested(): message_data = ( db.session.query(Message.id, Message.conversation_id) - .filter(Message.workflow_run_id.in_(workflow_run_ids)) + .where(Message.workflow_run_id.in_(workflow_run_ids)) .all() ) message_id_list = [msg.id for msg in message_data] diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index 45b246af1e..6603063c22 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -282,7 +282,7 @@ class AppAnnotationService: annotations_to_delete = ( db.session.query(MessageAnnotation, AppAnnotationSetting) .outerjoin(AppAnnotationSetting, MessageAnnotation.app_id == AppAnnotationSetting.app_id) - .filter(MessageAnnotation.id.in_(annotation_ids)) + .where(MessageAnnotation.id.in_(annotation_ids)) .all() ) @@ -493,7 +493,7 @@ class AppAnnotationService: def clear_all_annotations(cls, app_id: str) -> dict: app = ( db.session.query(App) - .filter(App.id == app_id, App.tenant_id == current_user.current_tenant_id, App.status == "normal") + .where(App.id == app_id, App.tenant_id == current_user.current_tenant_id, App.status == "normal") .first() ) diff --git a/api/services/clear_free_plan_tenant_expired_logs.py b/api/services/clear_free_plan_tenant_expired_logs.py index b28afcaa41..de00e74637 100644 --- a/api/services/clear_free_plan_tenant_expired_logs.py +++ b/api/services/clear_free_plan_tenant_expired_logs.py @@ -62,7 +62,7 @@ class ClearFreePlanTenantExpiredLogs: # Query records related to expired messages records = ( session.query(model) - .filter( + .where( model.message_id.in_(batch_message_ids), # type: ignore ) .all() @@ -101,7 +101,7 @@ class ClearFreePlanTenantExpiredLogs: except Exception: logger.exception("Failed to save %s records", table_name) - session.query(model).filter( + session.query(model).where( model.id.in_(record_ids), # type: ignore ).delete(synchronize_session=False) @@ -295,7 +295,7 @@ class ClearFreePlanTenantExpiredLogs: with Session(db.engine).no_autoflush as session: workflow_app_logs = ( session.query(WorkflowAppLog) - .filter( + .where( WorkflowAppLog.tenant_id == tenant_id, WorkflowAppLog.created_at < datetime.datetime.now() - datetime.timedelta(days=days), ) @@ -321,9 +321,9 @@ class ClearFreePlanTenantExpiredLogs: workflow_app_log_ids = [workflow_app_log.id for workflow_app_log in workflow_app_logs] # delete workflow app logs - session.query(WorkflowAppLog).filter( - WorkflowAppLog.id.in_(workflow_app_log_ids), - ).delete(synchronize_session=False) + session.query(WorkflowAppLog).where(WorkflowAppLog.id.in_(workflow_app_log_ids)).delete( + synchronize_session=False + ) session.commit() click.echo( diff --git a/api/services/dataset_service.py b/api/services/dataset_service.py index 84860fd170..bbebb7a923 100644 --- a/api/services/dataset_service.py +++ b/api/services/dataset_service.py @@ -2346,7 +2346,7 @@ class SegmentService: def delete_segments(cls, segment_ids: list, document: Document, dataset: Dataset): segments = ( db.session.query(DocumentSegment.index_node_id, DocumentSegment.word_count) - .filter( + .where( DocumentSegment.id.in_(segment_ids), DocumentSegment.dataset_id == dataset.id, DocumentSegment.document_id == document.id, diff --git a/api/services/plugin/plugin_auto_upgrade_service.py b/api/services/plugin/plugin_auto_upgrade_service.py index 3774050445..174bed488d 100644 --- a/api/services/plugin/plugin_auto_upgrade_service.py +++ b/api/services/plugin/plugin_auto_upgrade_service.py @@ -10,7 +10,7 @@ class PluginAutoUpgradeService: with Session(db.engine) as session: return ( session.query(TenantPluginAutoUpgradeStrategy) - .filter(TenantPluginAutoUpgradeStrategy.tenant_id == tenant_id) + .where(TenantPluginAutoUpgradeStrategy.tenant_id == tenant_id) .first() ) @@ -26,7 +26,7 @@ class PluginAutoUpgradeService: with Session(db.engine) as session: exist_strategy = ( session.query(TenantPluginAutoUpgradeStrategy) - .filter(TenantPluginAutoUpgradeStrategy.tenant_id == tenant_id) + .where(TenantPluginAutoUpgradeStrategy.tenant_id == tenant_id) .first() ) if not exist_strategy: @@ -54,7 +54,7 @@ class PluginAutoUpgradeService: with Session(db.engine) as session: exist_strategy = ( session.query(TenantPluginAutoUpgradeStrategy) - .filter(TenantPluginAutoUpgradeStrategy.tenant_id == tenant_id) + .where(TenantPluginAutoUpgradeStrategy.tenant_id == tenant_id) .first() ) if not exist_strategy: diff --git a/api/tests/test_containers_integration_tests/services/test_annotation_service.py b/api/tests/test_containers_integration_tests/services/test_annotation_service.py index 92d93d601e..4184420880 100644 --- a/api/tests/test_containers_integration_tests/services/test_annotation_service.py +++ b/api/tests/test_containers_integration_tests/services/test_annotation_service.py @@ -674,7 +674,7 @@ class TestAnnotationService: history = ( db.session.query(AppAnnotationHitHistory) - .filter( + .where( AppAnnotationHitHistory.annotation_id == annotation.id, AppAnnotationHitHistory.message_id == message_id ) .first() diff --git a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py index fc614b2296..d83983d0ff 100644 --- a/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py +++ b/api/tests/test_containers_integration_tests/services/test_app_dsl_service.py @@ -166,7 +166,7 @@ class TestAppDslService: assert result.imported_dsl_version == "" # Verify no app was created in database - apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + apps_count = db_session_with_containers.query(App).where(App.tenant_id == account.current_tenant_id).count() assert apps_count == 1 # Only the original test app def test_import_app_missing_yaml_url(self, db_session_with_containers, mock_external_service_dependencies): @@ -191,7 +191,7 @@ class TestAppDslService: assert result.imported_dsl_version == "" # Verify no app was created in database - apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + apps_count = db_session_with_containers.query(App).where(App.tenant_id == account.current_tenant_id).count() assert apps_count == 1 # Only the original test app def test_import_app_invalid_import_mode(self, db_session_with_containers, mock_external_service_dependencies): @@ -215,7 +215,7 @@ class TestAppDslService: ) # Verify no app was created in database - apps_count = db_session_with_containers.query(App).filter(App.tenant_id == account.current_tenant_id).count() + apps_count = db_session_with_containers.query(App).where(App.tenant_id == account.current_tenant_id).count() assert apps_count == 1 # Only the original test app def test_export_dsl_chat_app_success(self, db_session_with_containers, mock_external_service_dependencies): diff --git a/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py b/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py index dd2bc21814..5099362e00 100644 --- a/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py +++ b/api/tests/unit_tests/services/test_clear_free_plan_tenant_expired_logs.py @@ -57,7 +57,7 @@ class TestClearFreePlanTenantExpiredLogs: def test_clear_message_related_tables_no_records_found(self, mock_session, sample_message_ids): """Test when no related records are found.""" with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: - mock_session.query.return_value.filter.return_value.all.return_value = [] + mock_session.query.return_value.where.return_value.all.return_value = [] ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) @@ -70,7 +70,7 @@ class TestClearFreePlanTenantExpiredLogs: ): """Test when records are found and have to_dict method.""" with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: - mock_session.query.return_value.filter.return_value.all.return_value = sample_records + mock_session.query.return_value.where.return_value.all.return_value = sample_records ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) @@ -101,7 +101,7 @@ class TestClearFreePlanTenantExpiredLogs: records.append(record) # Mock records for first table only, empty for others - mock_session.query.return_value.filter.return_value.all.side_effect = [ + mock_session.query.return_value.where.return_value.all.side_effect = [ records, [], [], @@ -123,13 +123,13 @@ class TestClearFreePlanTenantExpiredLogs: with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: mock_storage.save.side_effect = Exception("Storage error") - mock_session.query.return_value.filter.return_value.all.return_value = sample_records + mock_session.query.return_value.where.return_value.all.return_value = sample_records # Should not raise exception ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) # Should still delete records even if backup fails - assert mock_session.query.return_value.filter.return_value.delete.called + assert mock_session.query.return_value.where.return_value.delete.called def test_clear_message_related_tables_serialization_error_continues(self, mock_session, sample_message_ids): """Test that method continues even when record serialization fails.""" @@ -138,30 +138,30 @@ class TestClearFreePlanTenantExpiredLogs: record.id = "record-1" record.to_dict.side_effect = Exception("Serialization error") - mock_session.query.return_value.filter.return_value.all.return_value = [record] + mock_session.query.return_value.where.return_value.all.return_value = [record] # Should not raise exception ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) # Should still delete records even if serialization fails - assert mock_session.query.return_value.filter.return_value.delete.called + assert mock_session.query.return_value.where.return_value.delete.called def test_clear_message_related_tables_deletion_called(self, mock_session, sample_message_ids, sample_records): """Test that deletion is called for found records.""" with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: - mock_session.query.return_value.filter.return_value.all.return_value = sample_records + mock_session.query.return_value.where.return_value.all.return_value = sample_records ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) # Should call delete for each table that has records - assert mock_session.query.return_value.filter.return_value.delete.called + assert mock_session.query.return_value.where.return_value.delete.called def test_clear_message_related_tables_logging_output( self, mock_session, sample_message_ids, sample_records, capsys ): """Test that logging output is generated.""" with patch("services.clear_free_plan_tenant_expired_logs.storage") as mock_storage: - mock_session.query.return_value.filter.return_value.all.return_value = sample_records + mock_session.query.return_value.where.return_value.all.return_value = sample_records ClearFreePlanTenantExpiredLogs._clear_message_related_tables(mock_session, "tenant-123", sample_message_ids) From b4b71ded472e6b09a20b229bae32ef353be0ba58 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Sun, 31 Aug 2025 17:07:15 +0800 Subject: [PATCH 15/96] chore: remove unused i18n keys (#24803) --- web/i18n/de-DE/app-debug.ts | 3 - web/i18n/de-DE/dataset-documents.ts | 1 - web/i18n/es-ES/app-debug.ts | 3 - web/i18n/es-ES/dataset-documents.ts | 1 - web/i18n/fa-IR/billing.ts | 22 ----- web/i18n/fa-IR/common.ts | 1 - web/i18n/fa-IR/dataset-creation.ts | 2 - web/i18n/fa-IR/dataset-documents.ts | 2 - web/i18n/fa-IR/dataset-hit-testing.ts | 1 - web/i18n/hi-IN/app-debug.ts | 39 --------- web/i18n/hi-IN/billing.ts | 16 ---- web/i18n/hi-IN/common.ts | 1 - web/i18n/hi-IN/dataset-creation.ts | 2 - web/i18n/hi-IN/dataset-documents.ts | 2 - web/i18n/hi-IN/dataset-hit-testing.ts | 1 - web/i18n/it-IT/app-debug.ts | 22 ----- web/i18n/it-IT/billing.ts | 16 ---- web/i18n/it-IT/common.ts | 2 - web/i18n/it-IT/dataset-creation.ts | 2 - web/i18n/it-IT/dataset-documents.ts | 2 - web/i18n/it-IT/dataset-hit-testing.ts | 1 - web/i18n/ja-JP/app-debug.ts | 3 - web/i18n/ja-JP/dataset-documents.ts | 1 - web/i18n/ko-KR/app-debug.ts | 3 - web/i18n/ko-KR/dataset-documents.ts | 1 - web/i18n/pl-PL/app-debug.ts | 23 ----- web/i18n/pl-PL/billing.ts | 16 ---- web/i18n/pl-PL/common.ts | 1 - web/i18n/pl-PL/dataset-creation.ts | 2 - web/i18n/pl-PL/dataset-documents.ts | 2 - web/i18n/pl-PL/dataset-hit-testing.ts | 1 - web/i18n/pt-BR/app-debug.ts | 18 ---- web/i18n/pt-BR/billing.ts | 16 ---- web/i18n/pt-BR/common.ts | 1 - web/i18n/pt-BR/dataset-creation.ts | 2 - web/i18n/pt-BR/dataset-documents.ts | 2 - web/i18n/pt-BR/dataset-hit-testing.ts | 1 - web/i18n/ro-RO/app-debug.ts | 18 ---- web/i18n/ro-RO/billing.ts | 16 ---- web/i18n/ro-RO/common.ts | 1 - web/i18n/ro-RO/dataset-creation.ts | 2 - web/i18n/ro-RO/dataset-documents.ts | 2 - web/i18n/ro-RO/dataset-hit-testing.ts | 1 - web/i18n/ru-RU/app-debug.ts | 3 - web/i18n/ru-RU/billing.ts | 16 ---- web/i18n/ru-RU/common.ts | 1 - web/i18n/ru-RU/dataset-creation.ts | 2 - web/i18n/ru-RU/dataset-documents.ts | 2 - web/i18n/ru-RU/dataset-hit-testing.ts | 1 - web/i18n/sl-SI/app-debug.ts | 29 ------- web/i18n/sl-SI/billing.ts | 16 ---- web/i18n/sl-SI/common.ts | 118 -------------------------- web/i18n/sl-SI/dataset-creation.ts | 2 - web/i18n/sl-SI/dataset-documents.ts | 2 - web/i18n/sl-SI/dataset-hit-testing.ts | 1 - web/i18n/th-TH/app-debug.ts | 3 - web/i18n/th-TH/billing.ts | 16 ---- web/i18n/th-TH/common.ts | 1 - web/i18n/th-TH/dataset-creation.ts | 2 - web/i18n/th-TH/dataset-documents.ts | 2 - web/i18n/th-TH/dataset-hit-testing.ts | 1 - web/i18n/tr-TR/app-debug.ts | 3 - web/i18n/tr-TR/billing.ts | 16 ---- web/i18n/tr-TR/common.ts | 1 - web/i18n/tr-TR/dataset-creation.ts | 2 - web/i18n/tr-TR/dataset-documents.ts | 2 - web/i18n/tr-TR/dataset-hit-testing.ts | 1 - web/i18n/uk-UA/app-debug.ts | 18 ---- web/i18n/uk-UA/billing.ts | 16 ---- web/i18n/uk-UA/common.ts | 1 - web/i18n/uk-UA/dataset-creation.ts | 2 - web/i18n/uk-UA/dataset-documents.ts | 3 - web/i18n/uk-UA/dataset-hit-testing.ts | 1 - web/i18n/vi-VN/app-debug.ts | 18 ---- web/i18n/vi-VN/billing.ts | 16 ---- web/i18n/vi-VN/common.ts | 1 - web/i18n/vi-VN/dataset-creation.ts | 2 - web/i18n/vi-VN/dataset-documents.ts | 2 - web/i18n/vi-VN/dataset-hit-testing.ts | 1 - web/i18n/zh-Hans/app-debug.ts | 2 - web/i18n/zh-Hans/dataset-documents.ts | 1 - web/i18n/zh-Hant/app-debug.ts | 3 - web/i18n/zh-Hant/billing.ts | 16 ---- web/i18n/zh-Hant/dataset-documents.ts | 1 - 84 files changed, 622 deletions(-) diff --git a/web/i18n/de-DE/app-debug.ts b/web/i18n/de-DE/app-debug.ts index efa9eb3f7e..fc65959622 100644 --- a/web/i18n/de-DE/app-debug.ts +++ b/web/i18n/de-DE/app-debug.ts @@ -529,9 +529,6 @@ const translation = { title: 'Eingabeaufforderungs-Generator', apply: 'Anwenden', overwriteTitle: 'Vorhandene Konfiguration überschreiben?', - instructionPlaceHolder: 'Schreiben Sie klare und spezifische Anweisungen.', - noDataLine1: 'Beschreiben Sie links Ihren Anwendungsfall,', - noDataLine2: 'Die Orchestrierungsvorschau wird hier angezeigt.', instruction: 'Anweisungen', tryIt: 'Versuch es', generate: 'Erzeugen', diff --git a/web/i18n/de-DE/dataset-documents.ts b/web/i18n/de-DE/dataset-documents.ts index b17230354b..438bcb708d 100644 --- a/web/i18n/de-DE/dataset-documents.ts +++ b/web/i18n/de-DE/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: 'Synchronisieren', resume: 'Fortsetzen', pause: 'Pause', - download: 'Datei herunterladen', }, index: { enable: 'Aktivieren', diff --git a/web/i18n/es-ES/app-debug.ts b/web/i18n/es-ES/app-debug.ts index 3b90013dd3..e70f91281b 100644 --- a/web/i18n/es-ES/app-debug.ts +++ b/web/i18n/es-ES/app-debug.ts @@ -521,17 +521,14 @@ const translation = { }, apply: 'Aplicar', instruction: 'Instrucciones', - noDataLine2: 'La vista previa de orquestación se mostrará aquí.', description: 'El generador de mensajes utiliza el modelo configurado para optimizar los mensajes para una mayor calidad y una mejor estructura. Escriba instrucciones claras y detalladas.', generate: 'Generar', title: 'Generador de avisos', tryIt: 'Pruébalo', overwriteMessage: 'La aplicación de este mensaje anulará la configuración existente.', resTitle: 'Mensaje generado', - noDataLine1: 'Describa su caso de uso a la izquierda,', overwriteTitle: '¿Anular la configuración existente?', loading: 'Orquestando la aplicación para usted...', - instructionPlaceHolder: 'Escriba instrucciones claras y específicas.', to: 'a', dismiss: 'Descartar', press: 'Prensa', diff --git a/web/i18n/es-ES/dataset-documents.ts b/web/i18n/es-ES/dataset-documents.ts index 408c4bd0e0..3775873b40 100644 --- a/web/i18n/es-ES/dataset-documents.ts +++ b/web/i18n/es-ES/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'Sincronizar', resume: 'Reanudar', pause: 'Pausa', - download: 'Descargar archivo', }, index: { enable: 'Habilitar', diff --git a/web/i18n/fa-IR/billing.ts b/web/i18n/fa-IR/billing.ts index 68eff70426..a68a47a628 100644 --- a/web/i18n/fa-IR/billing.ts +++ b/web/i18n/fa-IR/billing.ts @@ -114,28 +114,12 @@ const translation = { name: 'سازمانی', description: 'دریافت کامل‌ترین قابلیت‌ها و پشتیبانی برای سیستم‌های بزرگ و بحرانی.', includesTitle: 'همه چیز در طرح تیم، به علاوه:', - features: { - 4: 'Sso', - 1: 'مجوز جواز تجاری', - 2: 'ویژگی های انحصاری سازمانی', - 8: 'پشتیبانی فنی حرفه ای', - 5: 'SLA های مذاکره شده توسط Dify Partners', - 6: 'امنیت و کنترل پیشرفته', - 3: 'فضاهای کاری چندگانه و مدیریت سازمانی', - 7: 'به روز رسانی و نگهداری توسط Dify به طور رسمی', - 0: 'راه حل های استقرار مقیاس پذیر در سطح سازمانی', - }, price: 'سفارشی', btnText: 'تماس با فروش', for: 'برای تیم‌های بزرگ', priceTip: 'فقط صورتحساب سالیانه', }, community: { - features: { - 1: 'فضای کاری واحد', - 2: 'با مجوز منبع باز Dify مطابقت دارد', - 0: 'تمام ویژگی های اصلی در مخزن عمومی منتشر شده است', - }, btnText: 'شروع کنید با جامعه', price: 'رایگان', includesTitle: 'ویژگی‌های رایگان:', @@ -144,12 +128,6 @@ const translation = { for: 'برای کاربران فردی، تیم‌های کوچک یا پروژه‌های غیر تجاری', }, premium: { - features: { - 1: 'فضای کاری واحد', - 3: 'پشتیبانی از ایمیل و چت اولویت دار', - 2: 'لوگوی وب اپلیکیشن و سفارشی سازی برندینگ', - 0: 'قابلیت اطمینان خود مدیریت شده توسط ارائه دهندگان مختلف ابر', - }, btnText: 'گرفتن نسخه پریمیوم در', description: 'برای سازمان‌ها و تیم‌های میان‌رده', price: 'قابل گسترش', diff --git a/web/i18n/fa-IR/common.ts b/web/i18n/fa-IR/common.ts index 5ca5468ebf..3d240f4594 100644 --- a/web/i18n/fa-IR/common.ts +++ b/web/i18n/fa-IR/common.ts @@ -202,7 +202,6 @@ const translation = { showAppLength: 'نمایش {{length}} برنامه', delete: 'حذف حساب کاربری', deleteTip: 'حذف حساب کاربری شما تمام داده‌های شما را به طور دائمی پاک می‌کند و قابل بازیابی نیست.', - deleteConfirmTip: 'برای تأیید، لطفاً موارد زیر را از ایمیل ثبت‌نام شده خود به این آدرس ارسال کنید ', account: 'حساب', myAccount: 'حساب من', studio: 'استودیو Dify', diff --git a/web/i18n/fa-IR/dataset-creation.ts b/web/i18n/fa-IR/dataset-creation.ts index 105753a249..2fd2c210fa 100644 --- a/web/i18n/fa-IR/dataset-creation.ts +++ b/web/i18n/fa-IR/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'ایجاد دانش', - update: 'افزودن داده', fallbackRoute: 'دانش', }, one: 'انتخاب منبع داده', diff --git a/web/i18n/fa-IR/dataset-documents.ts b/web/i18n/fa-IR/dataset-documents.ts index b9d76e5828..5417f317a7 100644 --- a/web/i18n/fa-IR/dataset-documents.ts +++ b/web/i18n/fa-IR/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'همگام‌سازی', resume: 'ادامه', pause: 'مکث', - download: 'دانلود فایل', }, index: { enable: 'فعال کردن', @@ -342,7 +341,6 @@ const translation = { keywords: 'کلیدواژه‌ها', addKeyWord: 'اضافه کردن کلیدواژه', keywordError: 'حداکثر طول کلیدواژه ۲۰ کاراکتر است', - characters: 'کاراکترها', hitCount: 'تعداد بازیابی', vectorHash: 'هش برداری: ', questionPlaceholder: 'سؤال را اینجا اضافه کنید', diff --git a/web/i18n/fa-IR/dataset-hit-testing.ts b/web/i18n/fa-IR/dataset-hit-testing.ts index 99ce31b870..e17dfd042e 100644 --- a/web/i18n/fa-IR/dataset-hit-testing.ts +++ b/web/i18n/fa-IR/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'آزمون بازیابی', desc: 'آزمون اثرگذاری دانش بر اساس متن پرسش داده شده.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'اخیرها', table: { header: { source: 'منبع', diff --git a/web/i18n/hi-IN/app-debug.ts b/web/i18n/hi-IN/app-debug.ts index 192f614dc7..b860e70ac8 100644 --- a/web/i18n/hi-IN/app-debug.ts +++ b/web/i18n/hi-IN/app-debug.ts @@ -244,25 +244,6 @@ const translation = { }, }, automatic: { - title: 'स्वचालित अनुप्रयोग आयोजन', - description: - 'अपना परिदृश्य वर्णित करें, डिफाई आपके लिए एक अनुप्रयोग आयोजित करेगा।', - intendedAudience: 'लक्षित दर्शक कौन हैं?', - intendedAudiencePlaceHolder: 'उदा. छात्र', - solveProblem: 'वे कौन सी समस्याएं हैं जिन्हें एआई उनके लिए हल कर सकता है?', - solveProblemPlaceHolder: - 'उदा. लंबे रिपोर्ट और लेख से अंतर्दृष्टि निकालें और जानकारी को संक्षेप में प्रस्तुत करें', - generate: 'उत्पन्न करें', - audiencesRequired: 'दर्शकों की आवश्यकता है', - problemRequired: 'समस्या आवश्यक है', - resTitle: 'हमने आपके लिए निम्नलिखित अनुप्रयोग आयोजित किया है।', - apply: 'इस आयोजन को लागू करें', - noData: - 'बाईं ओर अपने उपयोग मामले का वर्णन करें, आयोजन पूर्वावलोकन यहाँ दिखाई देगा।', - loading: 'आपके लिए अनुप्रयोग आयोजित कर रहे हैं...', - overwriteTitle: 'मौजूदा कॉन्फ़िगरेशन को अधिलेखित करें?', - overwriteMessage: - 'इस आयोजन को लागू करने से मौजूदा कॉन्फ़िगरेशन अधिलेखित हो जाएगा।', }, resetConfig: { title: 'रीसेट की पुष्टि करें?', @@ -529,31 +510,14 @@ const translation = { enabled: 'सक्षम', }, fileUpload: { - title: 'फ़ाइल अपलोड', - description: 'चैट इनपुट बॉक्स छवियों, दस्तावेज़ों और अन्य फ़ाइलों को अपलोड करने की अनुमति देता है।', - supportedTypes: 'समर्थित फ़ाइल प्रकार', - numberLimit: 'अधिकतम अपलोड', - modalTitle: 'फ़ाइल अपलोड सेटिंग', }, imageUpload: { - title: 'छवि अपलोड', - description: 'छवियों को अपलोड करने की अनुमति दें।', - supportedTypes: 'समर्थित फ़ाइल प्रकार', - numberLimit: 'अधिकतम अपलोड', - modalTitle: 'छवि अपलोड सेटिंग', }, bar: { - empty: 'वेब ऐप उपयोगकर्ता अनुभव को बेहतर बनाने के लिए फीचर सक्षम करें', - enableText: 'फीचर सक्षम', - manage: 'प्रबंधित करें', }, documentUpload: { - title: 'दस्तावेज़', - description: 'दस्तावेज़ सक्षम करने से मॉडल दस्तावेज़ों को स्वीकार कर सकेगा और उनके बारे में प्रश्नों का उत्तर दे सकेगा।', }, audioUpload: { - title: 'ऑडियो', - description: 'ऑडियो सक्षम करने से मॉडल ट्रांसक्रिप्शन और विश्लेषण के लिए ऑडियो फ़ाइलों को प्रोसेस कर सकेगा।', }, }, codegen: { @@ -613,14 +577,11 @@ const translation = { }, tryIt: 'इसे आजमाओ', generate: 'जनरेट करें', - instructionPlaceHolder: 'स्पष्ट और विशेष निर्देश लिखें।', title: 'प्रॉम्प्ट जनरेटर', apply: 'अनुप्रयोग करें', - noDataLine1: 'बाईं ओर अपने उपयोग केस का वर्णन करें,', instruction: 'अनुदेश', loading: 'आपके लिए एप्लिकेशन का आयोजन कर रहे हैं...', overwriteTitle: 'मौजूदा कॉन्फ़िगरेशन को अधिलेखित करें?', - noDataLine2: 'यहाँ सम्प्रेषण पूर्वावलोकन दिखाया जाएगा।', resTitle: 'जनित प्रॉम्प्ट', overwriteMessage: 'इस प्रॉम्प्ट को लागू करने से मौजूदा कॉन्फ़िगरेशन को ओवरराइड कर दिया जाएगा।', description: 'प्रॉम्प्ट जेनरेटर उच्च गुणवत्ता और बेहतर संरचना के लिए प्रॉम्प्ट्स को ऑप्टिमाइज़ करने के लिए कॉन्फ़िगर किए गए मॉडल का उपयोग करता है। कृपया स्पष्ट और विस्तृत निर्देश लिखें।', diff --git a/web/i18n/hi-IN/billing.ts b/web/i18n/hi-IN/billing.ts index 25c4298628..3c1fadca36 100644 --- a/web/i18n/hi-IN/billing.ts +++ b/web/i18n/hi-IN/billing.ts @@ -126,15 +126,6 @@ const translation = { 'बड़े पैमाने पर मिशन-क्रिटिकल सिस्टम के लिए पूर्ण क्षमताएं और समर्थन प्राप्त करें।', includesTitle: 'टीम योजना में सब कुछ, साथ में:', features: { - 1: 'Commercial License Authorization', - 4: 'SSO', - 6: 'उन्नत सुरक्षा और नियंत्रण', - 2: 'विशेष उद्यम सुविधाएँ', - 3: 'अनेक कार्यक्षेत्र और उद्यम प्रबंधक', - 5: 'डिफाई पार्टनर्स द्वारा बातचीत किए गए एसएलए', - 8: 'प्रोफेशनल तकनीकी समर्थन', - 7: 'डीफाई द्वारा आधिकारिक रूप से अपडेट और रखरखाव', - 0: 'उद्योग स्तर के बड़े पैमाने पर वितरण समाधान', }, price: 'कस्टम', btnText: 'बिक्री से संपर्क करें', @@ -143,9 +134,6 @@ const translation = { }, community: { features: { - 1: 'एकल कार्यक्षेत्र', - 2: 'डिफी ओपन सोर्स लाइसेंस के अनुपालन में', - 0: 'सभी मुख्य सुविधाएं सार्वजनिक संग्रह के तहत जारी की गई हैं।', }, description: 'व्यक्तिगत उपयोगकर्ताओं, छोटे टीमों, या गैर-व्यावसायिक परियोजनाओं के लिए', for: 'व्यक्तिगत उपयोगकर्ताओं, छोटे टीमों, या गैर-व्यावसायिक परियोजनाओं के लिए', @@ -156,10 +144,6 @@ const translation = { }, premium: { features: { - 1: 'एकल कार्यक्षेत्र', - 2: 'वेब ऐप लोगो और ब्रांडिंग कस्टमाइजेशन', - 3: 'प्राथमिकता ईमेल और चैट समर्थन', - 0: 'विभिन्न क्लाउड प्रदाताओं द्वारा आत्म-प्रबंधित विश्वसनीयता', }, priceTip: 'क्लाउड मार्केटप्लेस के आधार पर', name: 'प्रीमियम', diff --git a/web/i18n/hi-IN/common.ts b/web/i18n/hi-IN/common.ts index eea8168f43..3115cda56a 100644 --- a/web/i18n/hi-IN/common.ts +++ b/web/i18n/hi-IN/common.ts @@ -206,7 +206,6 @@ const translation = { langGeniusAccountTip: 'आपका Dify खाता और संबंधित उपयोगकर्ता डेटा।', editName: 'नाम संपादित करें', showAppLength: '{{length}} ऐप्स दिखाएं', - deleteConfirmTip: 'पुष्टि करने के लिए, कृपया अपने पंजीकृत ईमेल से निम्नलिखित भेजें', delete: 'खाता हटाएं', deleteTip: 'अपना खाता हटाने से आपका सारा डेटा स्थायी रूप से मिट जाएगा और इसे पुनर्प्राप्त नहीं किया जा सकता है।', account: 'खाता', diff --git a/web/i18n/hi-IN/dataset-creation.ts b/web/i18n/hi-IN/dataset-creation.ts index c91946302c..7e49dd86bc 100644 --- a/web/i18n/hi-IN/dataset-creation.ts +++ b/web/i18n/hi-IN/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'ज्ञान बनाएं', - update: 'डेटा जोड़ें', fallbackRoute: 'ज्ञान', }, one: 'डेटा स्रोत चुनें', diff --git a/web/i18n/hi-IN/dataset-documents.ts b/web/i18n/hi-IN/dataset-documents.ts index 15a42b1b50..7cf58f12a9 100644 --- a/web/i18n/hi-IN/dataset-documents.ts +++ b/web/i18n/hi-IN/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'सिंक्रोनाइज़ करें', resume: 'रिज़्यूमे', pause: 'रोकें', - download: 'फ़ाइल डाउनलोड करें', }, index: { enable: 'सक्रिय करें', @@ -344,7 +343,6 @@ const translation = { keywords: 'कीवर्ड', addKeyWord: 'कीवर्ड जोड़ें', keywordError: 'कीवर्ड की अधिकतम लंबाई 20 अक्षर हो सकती है', - characters: 'अक्षर', hitCount: 'पुनर्प्राप्ति गणना', vectorHash: 'वेक्टर हैश: ', questionPlaceholder: 'यहाँ प्रश्न जोड़ें', diff --git a/web/i18n/hi-IN/dataset-hit-testing.ts b/web/i18n/hi-IN/dataset-hit-testing.ts index fd562062b3..9da71c3c8c 100644 --- a/web/i18n/hi-IN/dataset-hit-testing.ts +++ b/web/i18n/hi-IN/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'पुनर्प्राप्ति परीक्षण', desc: 'दिए गए प्रश्न पाठ के आधार पर ज्ञान की प्रभावशीलता का परीक्षण करें।', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'हाल के', table: { header: { source: 'स्रोत', diff --git a/web/i18n/it-IT/app-debug.ts b/web/i18n/it-IT/app-debug.ts index 39fd1886ab..89204cab57 100644 --- a/web/i18n/it-IT/app-debug.ts +++ b/web/i18n/it-IT/app-debug.ts @@ -246,25 +246,6 @@ const translation = { }, }, automatic: { - title: 'Orchestrazione automatizzata delle applicazioni', - description: - 'Descrivi il tuo scenario, Dify orchestrerà un\'applicazione per te.', - intendedAudience: 'Chi è il pubblico di destinazione?', - intendedAudiencePlaceHolder: 'es. Studente', - solveProblem: 'Quali problemi sperano che l\'IA possa risolvere per loro?', - solveProblemPlaceHolder: - 'es. Estrarre approfondimenti e riassumere informazioni da lunghi rapporti e articoli', - generate: 'Genera', - audiencesRequired: 'Pubblico richiesto', - problemRequired: 'Problema richiesto', - resTitle: 'Abbiamo orchestrato la seguente applicazione per te.', - apply: 'Applica questa orchestrazione', - noData: - 'Descrivi il tuo caso d\'uso a sinistra, l\'anteprima dell\'orchestrazione verrà mostrata qui.', - loading: 'Orchestrazione dell\'applicazione per te...', - overwriteTitle: 'Sovrascrivere la configurazione esistente?', - overwriteMessage: - 'Applicando questa orchestrazione sovrascriverai la configurazione esistente.', }, resetConfig: { title: 'Confermare il ripristino?', @@ -587,9 +568,7 @@ const translation = { }, }, instruction: 'Disposizioni', - noDataLine1: 'Descrivi il tuo caso d\'uso a sinistra,', title: 'Generatore di prompt', - instructionPlaceHolder: 'Scrivi istruzioni chiare e specifiche.', loading: 'Orchestrare l\'applicazione per te...', apply: 'Applicare', overwriteMessage: 'L\'applicazione di questo prompt sovrascriverà la configurazione esistente.', @@ -597,7 +576,6 @@ const translation = { overwriteTitle: 'Sovrascrivere la configurazione esistente?', resTitle: 'Prompt generato', generate: 'Generare', - noDataLine2: 'L\'anteprima dell\'orchestrazione verrà visualizzata qui.', tryIt: 'Provalo', to: 'a', dismiss: 'Ignora', diff --git a/web/i18n/it-IT/billing.ts b/web/i18n/it-IT/billing.ts index 43d285f652..8b37d83a2d 100644 --- a/web/i18n/it-IT/billing.ts +++ b/web/i18n/it-IT/billing.ts @@ -126,15 +126,6 @@ const translation = { 'Ottieni tutte le capacità e il supporto per sistemi mission-critical su larga scala.', includesTitle: 'Tutto nel piano Team, più:', features: { - 3: 'Spazi di lavoro multipli e gestione aziendale', - 2: 'Funzionalità esclusive per le aziende', - 1: 'Autorizzazione Licenza Commerciale', - 5: 'SLA negoziati dai partner Dify', - 4: 'SSO', - 6: 'Sicurezza e controlli avanzati', - 8: 'Supporto tecnico professionale', - 7: 'Aggiornamenti e manutenzione da parte di Dify ufficialmente', - 0: 'Soluzioni di distribuzione scalabili di livello aziendale', }, price: 'Personalizzato', for: 'Per team di grandi dimensioni', @@ -143,9 +134,6 @@ const translation = { }, community: { features: { - 1: 'Area di lavoro singola', - 2: 'Conforme alla licenza Open Source Dify', - 0: 'Tutte le funzionalità principali rilasciate nel repository pubblico', }, name: 'Comunità', btnText: 'Inizia con la comunità', @@ -156,10 +144,6 @@ const translation = { }, premium: { features: { - 3: 'Supporto prioritario via e-mail e chat', - 1: 'Area di lavoro singola', - 2: 'Personalizzazione del logo e del marchio WebApp', - 0: 'Affidabilità autogestita da vari fornitori di servizi cloud', }, name: 'Premium', priceTip: 'Basato su Cloud Marketplace', diff --git a/web/i18n/it-IT/common.ts b/web/i18n/it-IT/common.ts index 5b8ece7559..4c2d7dc75e 100644 --- a/web/i18n/it-IT/common.ts +++ b/web/i18n/it-IT/common.ts @@ -209,8 +209,6 @@ const translation = { delete: 'Elimina Account', deleteTip: 'Eliminando il tuo account cancellerai permanentemente tutti i tuoi dati e non sarà possibile recuperarli.', - deleteConfirmTip: - 'Per confermare, invia il seguente messaggio dalla tua email registrata a ', myAccount: 'Il mio account', account: 'Conto', studio: 'Dify Studio', diff --git a/web/i18n/it-IT/dataset-creation.ts b/web/i18n/it-IT/dataset-creation.ts index 89b739a0ce..a0efa8d2c4 100644 --- a/web/i18n/it-IT/dataset-creation.ts +++ b/web/i18n/it-IT/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Crea Conoscenza', - update: 'Aggiungi dati', fallbackRoute: 'Conoscenza', }, one: 'Scegli fonte dati', diff --git a/web/i18n/it-IT/dataset-documents.ts b/web/i18n/it-IT/dataset-documents.ts index 404fb67bf7..23f0b0f3b7 100644 --- a/web/i18n/it-IT/dataset-documents.ts +++ b/web/i18n/it-IT/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'Sincronizza', resume: 'Riprendi', pause: 'Pausa', - download: 'Scarica file', }, index: { enable: 'Abilita', @@ -345,7 +344,6 @@ const translation = { keywords: 'Parole Chiave', addKeyWord: 'Aggiungi parola chiave', keywordError: 'La lunghezza massima della parola chiave è 20', - characters: 'caratteri', hitCount: 'Conteggio recuperi', vectorHash: 'Hash del vettore: ', questionPlaceholder: 'aggiungi domanda qui', diff --git a/web/i18n/it-IT/dataset-hit-testing.ts b/web/i18n/it-IT/dataset-hit-testing.ts index 95dd3d2aee..96f343b137 100644 --- a/web/i18n/it-IT/dataset-hit-testing.ts +++ b/web/i18n/it-IT/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Test di Recupero', desc: 'Testa l\'effetto di recupero della Conoscenza basato sul testo di query fornito.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'Recenti', table: { header: { source: 'Fonte', diff --git a/web/i18n/ja-JP/app-debug.ts b/web/i18n/ja-JP/app-debug.ts index 933f5f6b70..9cb3da5fda 100644 --- a/web/i18n/ja-JP/app-debug.ts +++ b/web/i18n/ja-JP/app-debug.ts @@ -248,11 +248,8 @@ const translation = { description: 'プロンプト生成器は、設定済みのモデルを使って、高品質で構造的に優れたプロンプトを作成するための最適化を行います。具体的で詳細な指示をお書きください。', tryIt: '試してみる', instruction: '指示', - instructionPlaceHolder: '具体的で明確な指示を入力してください。', generate: '生成', resTitle: '生成されたプロンプト', - noDataLine1: '左側に使用例を記入してください,', - noDataLine2: 'オーケストレーションのプレビューがこちらに表示されます。', apply: '適用', loading: 'アプリケーションを処理中です', overwriteTitle: '既存の設定を上書きしますか?', diff --git a/web/i18n/ja-JP/dataset-documents.ts b/web/i18n/ja-JP/dataset-documents.ts index d22e3018ed..b2638f1b56 100644 --- a/web/i18n/ja-JP/dataset-documents.ts +++ b/web/i18n/ja-JP/dataset-documents.ts @@ -32,7 +32,6 @@ const translation = { sync: '同期', pause: '一時停止', resume: '再開', - download: 'ファイルをダウンロード', }, index: { enable: '有効にする', diff --git a/web/i18n/ko-KR/app-debug.ts b/web/i18n/ko-KR/app-debug.ts index 54fa47b8ae..7b4dcf674f 100644 --- a/web/i18n/ko-KR/app-debug.ts +++ b/web/i18n/ko-KR/app-debug.ts @@ -527,10 +527,7 @@ const translation = { title: '프롬프트 생성기', overwriteTitle: '기존 구성을 재정의하시겠습니까?', loading: '응용 프로그램 오케스트레이션...', - instructionPlaceHolder: '명확하고 구체적인 지침을 작성하십시오.', - noDataLine2: '오케스트레이션 미리 보기가 여기에 표시됩니다.', overwriteMessage: '이 프롬프트를 적용하면 기존 구성이 재정의됩니다.', - noDataLine1: '왼쪽에 사용 사례를 설명하십시오.', description: '프롬프트 생성기는 구성된 모델을 사용하여 더 높은 품질과 더 나은 구조를 위해 프롬프트를 최적화합니다. 명확하고 상세한 지침을 작성하십시오.', to: '에게', press: '프레스', diff --git a/web/i18n/ko-KR/dataset-documents.ts b/web/i18n/ko-KR/dataset-documents.ts index 3aa3e9239f..aaa9ee688f 100644 --- a/web/i18n/ko-KR/dataset-documents.ts +++ b/web/i18n/ko-KR/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: '동기화', resume: '재개', pause: '일시 중지', - download: '파일 다운로드', }, index: { enable: '활성화', diff --git a/web/i18n/pl-PL/app-debug.ts b/web/i18n/pl-PL/app-debug.ts index b7ddcbb129..9e9bac1c57 100644 --- a/web/i18n/pl-PL/app-debug.ts +++ b/web/i18n/pl-PL/app-debug.ts @@ -244,26 +244,6 @@ const translation = { }, }, automatic: { - title: 'Zautomatyzowana orkiestracja aplikacji', - description: - 'Opisz swój scenariusz, Dify zorkiestruje aplikację dla Ciebie.', - intendedAudience: 'Dla kogo jest przeznaczona ta aplikacja?', - intendedAudiencePlaceHolder: 'np. Uczeń', - solveProblem: - 'Jakie problemy mają nadzieję, że AI może rozwiązać dla nich?', - solveProblemPlaceHolder: - 'np. Wyciąganie wniosków i podsumowanie informacji z długich raportów i artykułów', - generate: 'Generuj', - audiencesRequired: 'Wymagana publiczności', - problemRequired: 'Wymagany problem', - resTitle: 'Stworzyliśmy następującą aplikację dla Ciebie.', - apply: 'Zastosuj tę orkiestrację', - noData: - 'Opisz swój przypadek po lewej, podgląd orkiestracji pojawi się tutaj.', - loading: 'Orkiestracja aplikacji dla Ciebie...', - overwriteTitle: 'Zastąpić istniejącą konfigurację?', - overwriteMessage: - 'Zastosowanie tej orkiestracji zastąpi istniejącą konfigurację.', }, resetConfig: { title: 'Potwierdź reset?', @@ -582,19 +562,16 @@ const translation = { name: 'Polerka do pisania', }, }, - instructionPlaceHolder: 'Napisz jasne i konkretne instrukcje.', instruction: 'Instrukcje', generate: 'Stworzyć', tryIt: 'Spróbuj', overwriteMessage: 'Zastosowanie tego monitu spowoduje zastąpienie istniejącej konfiguracji.', resTitle: 'Wygenerowany monit', - noDataLine1: 'Opisz swój przypadek użycia po lewej stronie,', title: 'Generator podpowiedzi', apply: 'Zastosować', overwriteTitle: 'Nadpisać istniejącą konfigurację?', loading: 'Orkiestracja aplikacji dla Ciebie...', description: 'Generator podpowiedzi używa skonfigurowanego modelu do optymalizacji podpowiedzi w celu uzyskania wyższej jakości i lepszej struktury. Napisz jasne i szczegółowe instrukcje.', - noDataLine2: 'W tym miejscu zostanie wyświetlony podgląd orkiestracji.', idealOutput: 'Idealny wynik', to: 'do', version: 'Wersja', diff --git a/web/i18n/pl-PL/billing.ts b/web/i18n/pl-PL/billing.ts index 09e213df8d..49d082a921 100644 --- a/web/i18n/pl-PL/billing.ts +++ b/web/i18n/pl-PL/billing.ts @@ -125,15 +125,6 @@ const translation = { 'Uzyskaj pełne możliwości i wsparcie dla systemów o kluczowym znaczeniu dla misji.', includesTitle: 'Wszystko w planie Zespołowym, plus:', features: { - 2: 'Wyjątkowe funkcje dla przedsiębiorstw', - 7: 'Aktualizacje i konserwacja przez Dify oficjalnie', - 4: 'Usługi rejestracji jednokrotnej', - 1: 'Autoryzacja licencji komercyjnej', - 0: 'Skalowalne rozwiązania wdrożeniowe klasy korporacyjnej', - 5: 'Umowy SLA wynegocjowane przez Dify Partners', - 8: 'Profesjonalne wsparcie techniczne', - 3: 'Wiele przestrzeni roboczych i zarządzanie przedsiębiorstwem', - 6: 'Zaawansowane zabezpieczenia i kontrola', }, priceTip: 'Tylko roczne fakturowanie', btnText: 'Skontaktuj się z działem sprzedaży', @@ -142,9 +133,6 @@ const translation = { }, community: { features: { - 1: 'Pojedyncza przestrzeń robocza', - 2: 'Zgodny z licencją Dify Open Source', - 0: 'Wszystkie podstawowe funkcje udostępnione w repozytorium publicznym', }, includesTitle: 'Darmowe funkcje:', name: 'Społeczność', @@ -155,10 +143,6 @@ const translation = { }, premium: { features: { - 1: 'Pojedyncza przestrzeń robocza', - 2: 'Personalizacja logo i brandingu aplikacji internetowej', - 3: 'Priorytetowa pomoc techniczna przez e-mail i czat', - 0: 'Niezawodność samodzielnego zarządzania przez różnych dostawców usług w chmurze', }, description: 'Dla średnich organizacji i zespołów', for: 'Dla średnich organizacji i zespołów', diff --git a/web/i18n/pl-PL/common.ts b/web/i18n/pl-PL/common.ts index fa98146903..1e97c1218f 100644 --- a/web/i18n/pl-PL/common.ts +++ b/web/i18n/pl-PL/common.ts @@ -204,7 +204,6 @@ const translation = { showAppLength: 'Pokaż {{length}} aplikacje', delete: 'Usuń konto', deleteTip: 'Usunięcie konta spowoduje trwałe usunięcie wszystkich danych i nie będzie można ich odzyskać.', - deleteConfirmTip: 'Aby potwierdzić, wyślij następujące informacje z zarejestrowanego adresu e-mail na adres ', myAccount: 'Moje konto', studio: 'Dify Studio', account: 'Rachunek', diff --git a/web/i18n/pl-PL/dataset-creation.ts b/web/i18n/pl-PL/dataset-creation.ts index 28e400fd22..b0ac21c60f 100644 --- a/web/i18n/pl-PL/dataset-creation.ts +++ b/web/i18n/pl-PL/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Utwórz Wiedzę', - update: 'Dodaj dane', fallbackRoute: 'Wiedza', }, one: 'Wybierz źródło danych', diff --git a/web/i18n/pl-PL/dataset-documents.ts b/web/i18n/pl-PL/dataset-documents.ts index c0b801ccf5..db233d87f8 100644 --- a/web/i18n/pl-PL/dataset-documents.ts +++ b/web/i18n/pl-PL/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: 'Synchronizuj', resume: 'Wznów', pause: 'Pauza', - download: 'Pobierz plik', }, index: { enable: 'Włącz', @@ -344,7 +343,6 @@ const translation = { keywords: 'Słowa kluczowe', addKeyWord: 'Dodaj słowo kluczowe', keywordError: 'Maksymalna długość słowa kluczowego wynosi 20', - characters: 'znaków', hitCount: 'Liczba odwołań', vectorHash: 'Wektor hash: ', questionPlaceholder: 'dodaj pytanie tutaj', diff --git a/web/i18n/pl-PL/dataset-hit-testing.ts b/web/i18n/pl-PL/dataset-hit-testing.ts index f069e4de9e..5bc434a58a 100644 --- a/web/i18n/pl-PL/dataset-hit-testing.ts +++ b/web/i18n/pl-PL/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Testowanie odzyskiwania', desc: 'Przetestuj efekt uderzenia wiedzy na podstawie podanego tekstu zapytania.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'Ostatnie', table: { header: { source: 'Źródło', diff --git a/web/i18n/pt-BR/app-debug.ts b/web/i18n/pt-BR/app-debug.ts index c521abe700..3d58f956ca 100644 --- a/web/i18n/pt-BR/app-debug.ts +++ b/web/i18n/pt-BR/app-debug.ts @@ -228,21 +228,6 @@ const translation = { }, }, automatic: { - title: 'Orquestração Automatizada de Aplicativos', - description: 'Descreva o seu cenário, o Dify irá orquestrar um aplicativo para você.', - intendedAudience: 'Qual é o público-alvo?', - intendedAudiencePlaceHolder: 'ex: Estudante', - solveProblem: 'Quais problemas eles esperam que a IA possa resolver para eles?', - solveProblemPlaceHolder: 'ex: Avaliar o desempenho acadêmico', - generate: 'Gerar', - audiencesRequired: 'Públicos-alvo necessários', - problemRequired: 'Problema necessário', - resTitle: 'Orquestramos o seguinte aplicativo para você.', - apply: 'Aplicar esta orquestração', - noData: 'Descreva o seu caso de uso à esquerda, a visualização da orquestração será exibida aqui.', - loading: 'Orquestrando o aplicativo para você...', - overwriteTitle: 'Substituir configuração existente?', - overwriteMessage: 'Aplicar esta orquestração irá substituir a configuração existente.', }, resetConfig: { title: 'Confirmar redefinição?', @@ -544,13 +529,10 @@ const translation = { apply: 'Aplicar', title: 'Gerador de Prompt', description: 'O Gerador de Prompts usa o modelo configurado para otimizar prompts para maior qualidade e melhor estrutura. Por favor, escreva instruções claras e detalhadas.', - instructionPlaceHolder: 'Escreva instruções claras e específicas.', - noDataLine2: 'A visualização da orquestração será exibida aqui.', tryIt: 'Experimente', loading: 'Orquestrando o aplicativo para você...', instruction: 'Instruções', resTitle: 'Prompt gerado', - noDataLine1: 'Descreva seu caso de uso à esquerda,', overwriteTitle: 'Substituir a configuração existente?', to: 'para', press: 'Imprensa', diff --git a/web/i18n/pt-BR/billing.ts b/web/i18n/pt-BR/billing.ts index 3ef93d9f91..f6b442be06 100644 --- a/web/i18n/pt-BR/billing.ts +++ b/web/i18n/pt-BR/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Obtenha capacidades completas e suporte para sistemas críticos em larga escala.', includesTitle: 'Tudo no plano Equipe, além de:', features: { - 3: 'Vários espaços de trabalho e gerenciamento corporativo', - 2: 'Recursos exclusivos da empresa', - 6: 'Segurança e controles avançados', - 4: 'SSO', - 8: 'Suporte Técnico Profissional', - 0: 'Soluções de implantação escaláveis de nível empresarial', - 7: 'Atualizações e manutenção por Dify oficialmente', - 1: 'Autorização de Licença Comercial', - 5: 'SLAs negociados pela Dify Partners', }, btnText: 'Contate Vendas', priceTip: 'Faturamento Anual Apenas', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 0: 'Todos os principais recursos lançados no repositório público', - 2: 'Está em conformidade com a licença de código aberto Dify', - 1: 'Espaço de trabalho individual', }, name: 'Comunidade', description: 'Para Usuários Individuais, Pequenas Equipes ou Projetos Não Comerciais', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 2: 'Personalização do logotipo e da marca do WebApp', - 1: 'Espaço de trabalho individual', - 0: 'Confiabilidade autogerenciada por vários provedores de nuvem', - 3: 'Suporte prioritário por e-mail e bate-papo', }, includesTitle: 'Tudo da Comunidade, além de:', for: 'Para organizações e equipes de médio porte', diff --git a/web/i18n/pt-BR/common.ts b/web/i18n/pt-BR/common.ts index b555c2c2b0..6f900dbaf3 100644 --- a/web/i18n/pt-BR/common.ts +++ b/web/i18n/pt-BR/common.ts @@ -198,7 +198,6 @@ const translation = { showAppLength: 'Mostrar {{length}} apps', delete: 'Excluir conta', deleteTip: 'Excluir sua conta apagará permanentemente todos os seus dados e eles não poderão ser recuperados.', - deleteConfirmTip: 'Para confirmar, envie o seguinte do seu e-mail registrado para ', myAccount: 'Minha Conta', account: 'Conta', studio: 'Estúdio Dify', diff --git a/web/i18n/pt-BR/dataset-creation.ts b/web/i18n/pt-BR/dataset-creation.ts index e2668c818f..fcf4a13134 100644 --- a/web/i18n/pt-BR/dataset-creation.ts +++ b/web/i18n/pt-BR/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Criar Conhecimento', - update: 'Adicionar dados', fallbackRoute: 'Conhecimento', }, one: 'Escolher fonte de dados', diff --git a/web/i18n/pt-BR/dataset-documents.ts b/web/i18n/pt-BR/dataset-documents.ts index ca4ad21530..b795dd0d36 100644 --- a/web/i18n/pt-BR/dataset-documents.ts +++ b/web/i18n/pt-BR/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: 'Sincronizar', resume: 'Retomar', pause: 'Pausa', - download: 'Baixar arquivo', }, index: { enable: 'Habilitar', @@ -343,7 +342,6 @@ const translation = { keywords: 'Palavras-chave', addKeyWord: 'Adicionar palavra-chave', keywordError: 'O comprimento máximo da palavra-chave é 20', - characters: 'caracteres', hitCount: 'Contagem de recuperação', vectorHash: 'Hash do vetor: ', questionPlaceholder: 'adicionar pergunta aqui', diff --git a/web/i18n/pt-BR/dataset-hit-testing.ts b/web/i18n/pt-BR/dataset-hit-testing.ts index 61ab4f3d6e..7c075fff11 100644 --- a/web/i18n/pt-BR/dataset-hit-testing.ts +++ b/web/i18n/pt-BR/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Teste de Recuperação', desc: 'Teste o efeito de recuperação do conhecimento com base no texto de consulta fornecido.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'Recentes', table: { header: { source: 'Origem', diff --git a/web/i18n/ro-RO/app-debug.ts b/web/i18n/ro-RO/app-debug.ts index c75f3e5e49..c36285be8d 100644 --- a/web/i18n/ro-RO/app-debug.ts +++ b/web/i18n/ro-RO/app-debug.ts @@ -228,21 +228,6 @@ const translation = { }, }, automatic: { - title: 'Orchestrarea automată a aplicațiilor', - description: 'Descrieți scenariul dvs., Dify vă va orchestra o aplicație pentru dvs.', - intendedAudience: 'Care este publicul țintă?', - intendedAudiencePlaceHolder: 'de ex. Student', - solveProblem: 'Ce probleme speră ei că IA le poate rezolva?', - solveProblemPlaceHolder: 'de ex. Extrage informații și rezumă informații din rapoarte și articole lungi', - generate: 'Generează', - audiencesRequired: 'Publicul țintă este necesar', - problemRequired: 'Problema este necesară', - resTitle: 'Am orchestrat următoarea aplicație pentru dvs.', - apply: 'Aplicați această orchestrare', - noData: 'Descrieți cazul de utilizare din stânga, previzualizarea orchestrării se va afișa aici.', - loading: 'Orchestrarea aplicației pentru dvs...', - overwriteTitle: 'Suprascrieți configurația existentă?', - overwriteMessage: 'Aplicarea acestei orchestrări va suprascrie configurația existentă.', }, resetConfig: { title: 'Confirmați resetarea?', @@ -550,10 +535,7 @@ const translation = { description: 'Generatorul de solicitări utilizează modelul configurat pentru a optimiza solicitările pentru o calitate superioară și o structură mai bună. Vă rugăm să scrieți instrucțiuni clare și detaliate.', instruction: 'Instrucţiuni', loading: 'Orchestrarea aplicației pentru dvs....', - noDataLine1: 'Descrieți cazul de utilizare din stânga,', title: 'Generator de solicitări', - instructionPlaceHolder: 'Scrieți instrucțiuni clare și specifice.', - noDataLine2: 'Previzualizarea orchestrației va fi afișată aici.', overwriteMessage: 'Aplicarea acestei solicitări va înlocui configurația existentă.', press: 'Presa', versions: 'Versiuni', diff --git a/web/i18n/ro-RO/billing.ts b/web/i18n/ro-RO/billing.ts index df35ec26fb..fee5b2303f 100644 --- a/web/i18n/ro-RO/billing.ts +++ b/web/i18n/ro-RO/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Obțineți capacități și asistență complete pentru sisteme critice la scară largă.', includesTitle: 'Tot ce este în planul Echipă, plus:', features: { - 6: 'Securitate și controale avansate', - 1: 'Autorizare licență comercială', - 2: 'Funcții exclusive pentru întreprinderi', - 0: 'Soluții de implementare scalabile la nivel de întreprindere', - 5: 'SLA-uri negociate de partenerii Dify', - 3: 'Mai multe spații de lucru și managementul întreprinderii', - 7: 'Actualizări și întreținere de către Dify oficial', - 8: 'Asistență tehnică profesională', - 4: 'SSO', }, for: 'Pentru echipe de mari dimensiuni', price: 'Personalizat', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 0: 'Toate caracteristicile de bază lansate în depozitul public', - 2: 'Respectă licența Dify Open Source', - 1: 'Spațiu de lucru unic', }, description: 'Pentru utilizatori individuali, echipe mici sau proiecte necomerciale', btnText: 'Începe cu Comunitatea', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 3: 'Asistență prioritară prin e-mail și chat', - 1: 'Spațiu de lucru unic', - 0: 'Fiabilitate autogestionată de diverși furnizori de cloud', - 2: 'Personalizarea logo-ului și brandingului WebApp', }, btnText: 'Obține Premium în', description: 'Pentru organizații și echipe de dimensiuni medii', diff --git a/web/i18n/ro-RO/common.ts b/web/i18n/ro-RO/common.ts index 473a349784..f4e59de2e2 100644 --- a/web/i18n/ro-RO/common.ts +++ b/web/i18n/ro-RO/common.ts @@ -198,7 +198,6 @@ const translation = { showAppLength: 'Afișează {{length}} aplicații', delete: 'Șterge contul', deleteTip: 'Ștergerea contului vă va șterge definitiv toate datele și nu pot fi recuperate.', - deleteConfirmTip: 'Pentru a confirma, trimiteți următoarele din e-mailul înregistrat la ', account: 'Cont', studio: 'Dify Studio', myAccount: 'Contul meu', diff --git a/web/i18n/ro-RO/dataset-creation.ts b/web/i18n/ro-RO/dataset-creation.ts index 0849d4dc87..bd51a6a7e8 100644 --- a/web/i18n/ro-RO/dataset-creation.ts +++ b/web/i18n/ro-RO/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Creați Cunoștințe', - update: 'Adăugați date', fallbackRoute: 'Cunoaștere', }, one: 'Alegeți sursa de date', diff --git a/web/i18n/ro-RO/dataset-documents.ts b/web/i18n/ro-RO/dataset-documents.ts index a6d7ffdfab..a5c499857a 100644 --- a/web/i18n/ro-RO/dataset-documents.ts +++ b/web/i18n/ro-RO/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: 'Sincronizează', pause: 'Pauză', resume: 'Reia', - download: 'Descărcați fișierul', }, index: { enable: 'Activează', @@ -343,7 +342,6 @@ const translation = { keywords: 'Cuvinte cheie', addKeyWord: 'Adăugați un cuvânt cheie', keywordError: 'Lungimea maximă a cuvântului cheie este de 20 de caractere', - characters: 'caractere', hitCount: 'Număr de rezultate', vectorHash: 'Vector hash: ', questionPlaceholder: 'adăugați întrebarea aici', diff --git a/web/i18n/ro-RO/dataset-hit-testing.ts b/web/i18n/ro-RO/dataset-hit-testing.ts index 323cd68746..60ea837df5 100644 --- a/web/i18n/ro-RO/dataset-hit-testing.ts +++ b/web/i18n/ro-RO/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Testarea Recuperării', desc: 'Testați efectul de atingere al Cunoștințelor pe baza textului interogat dat.', dateTimeFormat: 'DD/MM/YYYY hh:mm A', - recents: 'Recente', table: { header: { source: 'Sursă', diff --git a/web/i18n/ru-RU/app-debug.ts b/web/i18n/ru-RU/app-debug.ts index 5beaa68b8f..450da405e2 100644 --- a/web/i18n/ru-RU/app-debug.ts +++ b/web/i18n/ru-RU/app-debug.ts @@ -232,11 +232,8 @@ const translation = { description: 'Генератор промпта использует настроенную модель для оптимизации промпта для повышения качества и улучшения структуры. Пожалуйста, напишите четкие и подробные инструкции.', tryIt: 'Попробуйте', instruction: 'Инструкции', - instructionPlaceHolder: 'Напишите четкие и конкретные инструкции.', generate: 'Сгенерировать', resTitle: 'Сгенерированный промпт', - noDataLine1: 'Опишите свой случай использования слева,', - noDataLine2: 'предварительный просмотр оркестрации будет показан здесь.', apply: 'Применить', loading: 'Оркестрация приложения для вас...', overwriteTitle: 'Перезаписать существующую конфигурацию?', diff --git a/web/i18n/ru-RU/billing.ts b/web/i18n/ru-RU/billing.ts index 7af47ee00b..b0a48f7c3d 100644 --- a/web/i18n/ru-RU/billing.ts +++ b/web/i18n/ru-RU/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Получите полный набор возможностей и поддержку для крупномасштабных критически важных систем.', includesTitle: 'Все в командном плане, плюс:', features: { - 4: 'ССО', - 5: 'Согласованные SLA от Dify Partners', - 8: 'Профессиональная техническая поддержка', - 2: 'Эксклюзивные корпоративные функции', - 6: 'Расширенная безопасность и контроль', - 7: 'Обновления и обслуживание от Dify официально', - 3: 'Несколько рабочих пространств и управление предприятием', - 0: 'Масштабируемые решения для развертывания корпоративного уровня', - 1: 'Разрешение на коммерческую лицензию', }, price: 'Пользовательский', priceTip: 'Только годовая подписка', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 1: 'Единое рабочее пространство', - 2: 'Соответствует лицензии Dify с открытым исходным кодом', - 0: 'Все основные функции выпущены в общедоступном репозитории', }, name: 'Сообщество', btnText: 'Начните с сообщества', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 2: 'Настройка логотипа и брендинга WebApp', - 1: 'Единое рабочее пространство', - 3: 'Приоритетная поддержка по электронной почте и в чате', - 0: 'Самостоятельное управление надежностью от различных поставщиков облачных услуг', }, description: 'Для средних организаций и команд', includesTitle: 'Всё из Сообщества, плюс:', diff --git a/web/i18n/ru-RU/common.ts b/web/i18n/ru-RU/common.ts index 02bd415dc5..0dfa0c5257 100644 --- a/web/i18n/ru-RU/common.ts +++ b/web/i18n/ru-RU/common.ts @@ -202,7 +202,6 @@ const translation = { showAppLength: 'Показать {{length}} приложений', delete: 'Удалить учетную запись', deleteTip: 'Удаление вашей учетной записи приведет к безвозвратному удалению всех ваших данных, и их невозможно будет восстановить.', - deleteConfirmTip: 'Для подтверждения, пожалуйста, отправьте следующее с вашего зарегистрированного адреса электронной почты на ', account: 'Счет', studio: 'Студия Dify', myAccount: 'Моя учетная запись', diff --git a/web/i18n/ru-RU/dataset-creation.ts b/web/i18n/ru-RU/dataset-creation.ts index bf2532836c..7585c2f12c 100644 --- a/web/i18n/ru-RU/dataset-creation.ts +++ b/web/i18n/ru-RU/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Создать базу знаний', - update: 'Добавить данные', fallbackRoute: 'Знание', }, one: 'Выберите источник данных', diff --git a/web/i18n/ru-RU/dataset-documents.ts b/web/i18n/ru-RU/dataset-documents.ts index 400ada270d..0471decf3c 100644 --- a/web/i18n/ru-RU/dataset-documents.ts +++ b/web/i18n/ru-RU/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'Синхронизировать', resume: 'Возобновить', pause: 'Пауза', - download: 'Скачать файл', }, index: { enable: 'Включить', @@ -343,7 +342,6 @@ const translation = { keywords: 'Ключевые слова', addKeyWord: 'Добавить ключевое слово', keywordError: 'Максимальная длина ключевого слова - 20', - characters: 'символов', hitCount: 'Количество обращений', vectorHash: 'Векторный хэш: ', questionPlaceholder: 'добавьте вопрос здесь', diff --git a/web/i18n/ru-RU/dataset-hit-testing.ts b/web/i18n/ru-RU/dataset-hit-testing.ts index 5ac504efbf..bd2cfc232c 100644 --- a/web/i18n/ru-RU/dataset-hit-testing.ts +++ b/web/i18n/ru-RU/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Тестирование поиска', desc: 'Проверьте эффективность поиска в базе знаний на основе заданного текста запроса.', dateTimeFormat: 'DD.MM.YYYY HH:mm', - recents: 'Недавние', table: { header: { source: 'Источник', diff --git a/web/i18n/sl-SI/app-debug.ts b/web/i18n/sl-SI/app-debug.ts index 9b2649c280..60c0578d54 100644 --- a/web/i18n/sl-SI/app-debug.ts +++ b/web/i18n/sl-SI/app-debug.ts @@ -200,51 +200,25 @@ const translation = { contentEnableLabel: 'Moderiranje vsebine omogočeno', }, debug: { - title: 'Odpravljanje napak', - description: 'Debugiranje omogoča pregled podrobnih informacij, kot so podatki API-jev, vklop dnevnikov, opozorila in še več.', }, agent: { - title: 'Pomočnik', - description: 'Osnovne informacije in odgovorne naloge pomočnika.', - prompts: 'Temeljni PROMPT', message: { - title: 'Vrstice sporočila', - user: 'Uporabnik', - assistant: 'Pomočnik', }, }, history: { - title: 'Zgodovina', - notFound: 'Zgodovina ni bila najdena', - notOpen: 'Zgodovina ni odprta', }, prompt: { - title: 'Vsebina PROMPT-a', }, message: { - title: 'Sporočilo', - description: 'Način nastavitve formatiranega pogovora.', - tryChat: 'Preizkusi klepet', }, theme: { - title: 'Tema', themes: { - default: 'Osnovna tema', - light: 'Svetla tema', - dark: 'Temna tema', - custom: 'Prilagodi temo', }, modal: { - title: 'Nastavitve teme', primaryColor: { - title: 'Primarna barva', - placeholder: 'Izberi primarno barvo', }, textColor: { - title: 'Barva besedila', - placeholder: 'Izberi barvo besedila', }, - ok: 'V redu', }, }, fileUpload: { @@ -332,14 +306,11 @@ const translation = { }, apply: 'Uporabiti', generate: 'Ustvariti', - instructionPlaceHolder: 'Napišite jasna in specifična navodila.', resTitle: 'Ustvarjen poziv', - noDataLine2: 'Predogled orkestracije bo prikazan tukaj.', overwriteMessage: 'Če uporabite ta poziv, boste preglasili obstoječo konfiguracijo.', overwriteTitle: 'Preglasiti obstoječo konfiguracijo?', instruction: 'Navodila', loading: 'Orkestriranje aplikacije za vas ...', - noDataLine1: 'Na levi opišite primer uporabe,', title: 'Generator pozivov', tryIt: 'Poskusite', description: 'Generator pozivov uporablja konfiguriran model za optimizacijo pozivov za višjo kakovost in boljšo strukturo. Prosimo, napišite jasna in podrobna navodila.', diff --git a/web/i18n/sl-SI/billing.ts b/web/i18n/sl-SI/billing.ts index ffaa1b56e2..63fbb90dda 100644 --- a/web/i18n/sl-SI/billing.ts +++ b/web/i18n/sl-SI/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Pridobite vse zmogljivosti in podporo za velike sisteme kritične za misijo.', includesTitle: 'Vse v načrtu Ekipa, plus:', features: { - 0: 'Prilagodljive rešitve za uvajanje na ravni podjetij', - 2: 'Ekskluzivne funkcije za podjetja', - 7: 'Posodobitve in vzdrževanje s strani Dify Official', - 8: 'Strokovna tehnična podpora', - 1: 'Dovoljenje za komercialno licenco', - 3: 'Več delovnih prostorov in upravljanje podjetja', - 5: 'Dogovorjene pogodbe o ravni storitev s strani Dify Partners', - 6: 'Napredna varnost in nadzor', - 4: 'SSO', }, priceTip: 'Letno zaračunavanje samo', price: 'Po meri', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 1: 'En delovni prostor', - 0: 'Vse osnovne funkcije, izdane v javnem repozitoriju', - 2: 'Skladen z odprtokodno licenco Dify', }, includesTitle: 'Brezplačne funkcije:', price: 'Brezplačno', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 1: 'En delovni prostor', - 3: 'Prednostna podpora po e-pošti in klepetu', - 2: 'Prilagajanje logotipa in blagovne znamke WebApp', - 0: 'Samostojna zanesljivost različnih ponudnikov storitev v oblaku', }, name: 'Premium', priceTip: 'Na podlagi oblaka Marketplace', diff --git a/web/i18n/sl-SI/common.ts b/web/i18n/sl-SI/common.ts index d3acc5f47f..6d81e54078 100644 --- a/web/i18n/sl-SI/common.ts +++ b/web/i18n/sl-SI/common.ts @@ -205,7 +205,6 @@ const translation = { showAppLength: 'Prikaz {{length}} aplikacij', delete: 'Izbriši račun', deleteTip: 'Brisanje vašega računa bo trajno izbrisalo vse vaše podatke in jih ne bo mogoče obnoviti.', - deleteConfirmTip: 'Za potrditev pošljite naslednje s svojega registriranega e-poštnega naslova na ', permanentlyDeleteButton: 'Trajno izbriši račun', deletePrivacyLinkTip: 'Za več informacij o tem, kako ravnamo z vašimi podatki, si oglejte naše', feedbackPlaceholder: 'Neobvezno', @@ -469,105 +468,40 @@ const translation = { loadBalancingInfo: 'Privzeto uravnoteženje obremenitev uporablja strategijo Round-robin. Če se sproži omejitev hitrosti, se uporabi 1-minutno obdobje ohlajanja.', upgradeForLoadBalancing: 'Nadgradite svoj načrt, da omogočite uravnoteženje obremenitev.', dataSource: { - add: 'Dodaj vir podatkov', - connect: 'Poveži', - configure: 'Konfiguriraj', notion: { - title: 'Notion', - description: 'Uporaba Notiona kot vira podatkov za Znanost.', - connectedWorkspace: 'Povezano delovno okolje', - addWorkspace: 'Dodaj delovno okolje', - connected: 'Povezan', - disconnected: 'Prekinjen', - changeAuthorizedPages: 'Spremeni pooblaščene strani', - pagesAuthorized: 'Pooblaščene strani', - sync: 'Sinhroniziraj', - remove: 'Odstrani', selector: { - pageSelected: 'Izbrane strani', - searchPages: 'Iskanje strani...', - noSearchResult: 'Ni rezultatov iskanja', - addPages: 'Dodaj strani', - preview: 'PREDOGLED', }, }, website: { - title: 'Spletna stran', - description: 'Uvoz vsebine s spletnih strani z uporabo spletnega pajka.', - with: 'S', - configuredCrawlers: 'Konfigurirani pajki', - active: 'Aktiven', - inactive: 'Neaktiven', }, }, plugin: { serpapi: { - apiKey: 'API ključ', - apiKeyPlaceholder: 'Vnesite svoj API ključ', - keyFrom: 'Pridobite svoj SerpAPI ključ na strani računa SerpAPI', }, }, apiBasedExtension: { - title: 'Razširitve API omogočajo centralizirano upravljanje API, kar poenostavi konfiguracijo za enostavno uporabo v aplikacijah Dify.', - link: 'Naučite se, kako razviti svojo API razširitev.', - add: 'Dodaj API razširitev', selector: { - title: 'API razširitev', - placeholder: 'Prosimo, izberite API razširitev', - manage: 'Upravljaj API razširitev', }, modal: { - title: 'Dodaj API razširitev', - editTitle: 'Uredi API razširitev', name: { - title: 'Ime', - placeholder: 'Vnesite ime', }, apiEndpoint: { - title: 'API konec', - placeholder: 'Vnesite API konec', }, apiKey: { - title: 'API ključ', - placeholder: 'Vnesite API ključ', - lengthError: 'Dolžina API ključa ne sme biti manjša od 5 znakov', }, }, - type: 'Tip', }, about: { - changeLog: 'Dnevnik sprememb', - updateNow: 'Posodobi zdaj', - nowAvailable: 'Dify {{version}} je zdaj na voljo.', - latestAvailable: 'Dify {{version}} je najnovejša različica na voljo.', }, appMenus: { - overview: 'Nadzor', - promptEng: 'Orkestriraj', - apiAccess: 'Dostop API', - logAndAnn: 'Dnevniki in objave', - logs: 'Dnevniki', }, environment: { - testing: 'TESTIRANJE', - development: 'RAZVOJ', }, appModes: { - completionApp: 'Generator besedila', - chatApp: 'Klepetalna aplikacija', }, datasetMenus: { - documents: 'Dokumenti', - hitTesting: 'Preizkušanje pridobivanja', - settings: 'Nastavitve', - emptyTip: 'Znanost še ni povezana, pojdite v aplikacijo ali vtičnik, da dokončate povezavo.', - viewDoc: 'Ogled dokumentacije', - relatedApp: 'povezane aplikacije', }, voiceInput: { - speaking: 'Govorite zdaj...', - converting: 'Pretvarjanje v besedilo...', - notAllow: 'mikrofon ni pooblaščen', }, modelName: { 'gpt-3.5-turbo': 'GPT-3.5-Turbo', @@ -581,90 +515,38 @@ const translation = { 'claude-2': 'Claude-2', }, chat: { - renameConversation: 'Preimenuj pogovor', - conversationName: 'Ime pogovora', - conversationNamePlaceholder: 'Vnesite ime pogovora', - conversationNameCanNotEmpty: 'Ime pogovora je obvezno', citation: { - title: 'CITATI', - linkToDataset: 'Povezava do znanja', - characters: 'Znakov:', - hitCount: 'Število zadetkov:', - vectorHash: 'Vektorski hash:', - hitScore: 'Ocena zadetka:', }, }, promptEditor: { - placeholder: 'Tukaj napišite svoje pozivno besedilo, vnesite \'{\' za vstavljanje spremenljivke, vnesite \'/\' za vstavljanje vsebinskega bloka poziva', context: { item: { - title: 'Kontekst', - desc: 'Vstavi predlogo konteksta', }, modal: { - title: '{{num}} Znanost v kontekstu', - add: 'Dodaj kontekst ', - footer: 'Kontekste lahko upravljate v spodnjem razdelku Kontekst.', }, }, history: { item: { - title: 'Zgodovina pogovora', - desc: 'Vstavi predlogo zgodovinskega sporočila', }, modal: { - title: 'PRIMER', - user: 'Pozdravljeni', - assistant: 'Pozdravljeni! Kako vam lahko pomagam danes?', - edit: 'Uredi imena vlog pogovora', }, }, variable: { item: { - title: 'Spremenljivke in zunanji orodja', - desc: 'Vstavi spremenljivke in zunanja orodja', }, outputToolDisabledItem: { - title: 'Spremenljivke', - desc: 'Vstavi spremenljivke', }, modal: { - add: 'Nova spremenljivka', - addTool: 'Novo orodje', }, }, query: { item: { - title: 'Poizvedba', - desc: 'Vstavi predlogo uporabniške poizvedbe', }, }, - existed: 'Že obstaja v pozivu', }, imageUploader: { - uploadFromComputer: 'Naloži iz računalnika', - uploadFromComputerReadError: 'Branje slike ni uspelo, poskusite znova.', - uploadFromComputerUploadError: 'Nalaganje slike ni uspelo, poskusite znova.', - uploadFromComputerLimit: 'Nalaganje slik ne sme presegati {{size}} MB', - pasteImageLink: 'Prilepi povezavo do slike', - pasteImageLinkInputPlaceholder: 'Tukaj prilepite povezavo do slike', - pasteImageLinkInvalid: 'Neveljavna povezava slike', - imageUpload: 'Nalaganje slike', }, tag: { - placeholder: 'Vse oznake', - addNew: 'Dodaj novo oznako', - noTag: 'Ni oznak', - noTagYet: 'Še ni oznak', - addTag: 'Dodaj oznake', - editTag: 'Uredi oznake', - manageTags: 'Upravljaj oznake', - selectorPlaceholder: 'Vnesite za iskanje ali ustvarjanje', - create: 'Ustvari', - delete: 'Izbriši oznako', - deleteTip: 'Oznaka se uporablja, jo želite izbrisati?', - created: 'Oznaka uspešno ustvarjena', - failed: 'Ustvarjanje oznake ni uspelo', }, discoverMore: 'Odkrijte več v', installProvider: 'Namestitev ponudnikov modelov', diff --git a/web/i18n/sl-SI/dataset-creation.ts b/web/i18n/sl-SI/dataset-creation.ts index 08e65c2437..5dd9ac1e35 100644 --- a/web/i18n/sl-SI/dataset-creation.ts +++ b/web/i18n/sl-SI/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Ustvari Znanje', - update: 'Dodaj podatke', fallbackRoute: 'Znanje', }, one: 'Izberi vir podatkov', diff --git a/web/i18n/sl-SI/dataset-documents.ts b/web/i18n/sl-SI/dataset-documents.ts index a163197e86..436dce6fdf 100644 --- a/web/i18n/sl-SI/dataset-documents.ts +++ b/web/i18n/sl-SI/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'Sinhroniziraj', pause: 'Zaustavi', resume: 'Nadaljuj', - download: 'Prenesi datoteko', }, index: { enable: 'Omogoči', @@ -343,7 +342,6 @@ const translation = { keywords: 'Ključne besede', addKeyWord: 'Dodaj ključno besedo', keywordError: 'Največja dolžina ključne besede je 20', - characters: 'znakov', hitCount: 'Število pridobitev', vectorHash: 'Vektorski hash: ', questionPlaceholder: 'dodajte vprašanje tukaj', diff --git a/web/i18n/sl-SI/dataset-hit-testing.ts b/web/i18n/sl-SI/dataset-hit-testing.ts index 645fd654d2..b01f4538ae 100644 --- a/web/i18n/sl-SI/dataset-hit-testing.ts +++ b/web/i18n/sl-SI/dataset-hit-testing.ts @@ -3,7 +3,6 @@ const translation = { settingTitle: 'Nastavitve pridobivanja', desc: 'Preizkusite učinkovitost zadetkov znanja na podlagi podanega poizvedbenega besedila', dateTimeFormat: 'DD/MM/YYYY hh:mm A', - recents: 'Nedavno', table: { header: { source: 'Vir', diff --git a/web/i18n/th-TH/app-debug.ts b/web/i18n/th-TH/app-debug.ts index 5476e7bc68..0e8cc1d9cd 100644 --- a/web/i18n/th-TH/app-debug.ts +++ b/web/i18n/th-TH/app-debug.ts @@ -283,11 +283,8 @@ const translation = { apply: 'ใช้', resTitle: 'พรอมต์ที่สร้างขึ้น', title: 'เครื่องกําเนิดพร้อมท์', - noDataLine2: 'ตัวอย่างการประสานเสียงจะแสดงที่นี่', tryIt: 'ลองดู', overwriteTitle: 'แทนที่การกําหนดค่าที่มีอยู่ใช่ไหม', - noDataLine1: 'อธิบายกรณีการใช้งานของคุณทางด้านซ้าย', - instructionPlaceHolder: 'เขียนคําแนะนําที่ชัดเจนและเฉพาะเจาะจง', overwriteMessage: 'การใช้พรอมต์นี้จะแทนที่การกําหนดค่าที่มีอยู่', description: 'ตัวสร้างพรอมต์ใช้โมเดลที่กําหนดค่าเพื่อปรับพรอมต์ให้เหมาะสมเพื่อคุณภาพที่สูงขึ้นและโครงสร้างที่ดีขึ้น โปรดเขียนคําแนะนําที่ชัดเจนและละเอียด', loading: 'กําลังประสานงานแอปพลิเคชันสําหรับคุณ...', diff --git a/web/i18n/th-TH/billing.ts b/web/i18n/th-TH/billing.ts index afbe9318c4..59afefe162 100644 --- a/web/i18n/th-TH/billing.ts +++ b/web/i18n/th-TH/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'รับความสามารถและการสนับสนุนเต็มรูปแบบสําหรับระบบที่สําคัญต่อภารกิจขนาดใหญ่', includesTitle: 'ทุกอย่างในแผนทีม รวมถึง:', features: { - 4: 'SSO', - 2: 'คุณสมบัติพิเศษสําหรับองค์กร', - 5: 'SLA ที่เจรจาโดย Dify Partners', - 1: 'การอนุญาตใบอนุญาตเชิงพาณิชย์', - 8: 'การสนับสนุนด้านเทคนิคอย่างมืออาชีพ', - 0: 'โซลูชันการปรับใช้ที่ปรับขนาดได้ระดับองค์กร', - 7: 'การอัปเดตและบํารุงรักษาโดย Dify อย่างเป็นทางการ', - 3: 'พื้นที่ทํางานหลายแห่งและการจัดการองค์กร', - 6: 'การรักษาความปลอดภัยและการควบคุมขั้นสูง', }, btnText: 'ติดต่อฝ่ายขาย', price: 'ที่กำหนดเอง', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 1: 'พื้นที่ทํางานเดียว', - 2: 'สอดคล้องกับใบอนุญาตโอเพ่นซอร์ส Dify', - 0: 'คุณสมบัติหลักทั้งหมดที่เผยแพร่ภายใต้ที่เก็บสาธารณะ', }, name: 'ชุมชน', price: 'ฟรี', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 2: 'โลโก้ WebApp และการปรับแต่งแบรนด์', - 3: 'การสนับสนุนทางอีเมลและแชทลําดับความสําคัญ', - 1: 'พื้นที่ทํางานเดียว', - 0: 'ความน่าเชื่อถือที่จัดการด้วยตนเองโดยผู้ให้บริการคลาวด์ต่างๆ', }, priceTip: 'อิงตามตลาดคลาวด์', for: 'สำหรับองค์กรและทีมขนาดกลาง', diff --git a/web/i18n/th-TH/common.ts b/web/i18n/th-TH/common.ts index b8d01880ff..4869a5a0b8 100644 --- a/web/i18n/th-TH/common.ts +++ b/web/i18n/th-TH/common.ts @@ -200,7 +200,6 @@ const translation = { showAppLength: 'แสดง {{length}} แอป', delete: 'ลบบัญชี', deleteTip: 'การลบบัญชีของคุณจะเป็นการลบข้อมูลทั้งหมดของคุณอย่างถาวรและไม่สามารถกู้คืนได้', - deleteConfirmTip: 'เพื่อยืนยัน โปรดส่งข้อมูลต่อไปนี้จากอีเมลที่ลงทะเบียนไว้ที่', deletePrivacyLinkTip: 'สําหรับข้อมูลเพิ่มเติมเกี่ยวกับวิธีที่เราจัดการกับข้อมูลของคุณ โปรดดูที่', deletePrivacyLink: 'นโยบายความเป็นส่วนตัว', deleteLabel: 'เพื่อยืนยัน โปรดพิมพ์อีเมลของคุณด้านล่าง', diff --git a/web/i18n/th-TH/dataset-creation.ts b/web/i18n/th-TH/dataset-creation.ts index 795444cfab..6509e78f49 100644 --- a/web/i18n/th-TH/dataset-creation.ts +++ b/web/i18n/th-TH/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'สร้างความรู้', - update: 'เพิ่มข้อมูล', fallbackRoute: 'ความรู้', }, one: 'เลือกแหล่งข้อมูล', diff --git a/web/i18n/th-TH/dataset-documents.ts b/web/i18n/th-TH/dataset-documents.ts index 539dadfd18..80d934aa3a 100644 --- a/web/i18n/th-TH/dataset-documents.ts +++ b/web/i18n/th-TH/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'ซิงค์', pause: 'หยุด', resume: 'ดำเนิน', - download: 'ดาวน์โหลดไฟล์', }, index: { enable: 'เปิด', @@ -342,7 +341,6 @@ const translation = { keywords: 'คําสําคัญ', addKeyWord: 'เพิ่มคําสําคัญ', keywordError: 'ความยาวสูงสุดของคําหลักคือ 20', - characters: 'อักขระ', hitCount: 'จํานวนการดึงข้อมูล', vectorHash: 'แฮชเวกเตอร์:', questionPlaceholder: 'เพิ่มคําถามที่นี่', diff --git a/web/i18n/th-TH/dataset-hit-testing.ts b/web/i18n/th-TH/dataset-hit-testing.ts index d04f2be2fc..03490899f2 100644 --- a/web/i18n/th-TH/dataset-hit-testing.ts +++ b/web/i18n/th-TH/dataset-hit-testing.ts @@ -3,7 +3,6 @@ const translation = { settingTitle: 'การตั้งค่าการดึงข้อมูล', desc: 'ทดสอบเอฟเฟกต์การตีของความรู้ตามข้อความแบบสอบถามที่กําหนด', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'ล่าสุด', table: { header: { source: 'ที่มา', diff --git a/web/i18n/tr-TR/app-debug.ts b/web/i18n/tr-TR/app-debug.ts index 782f65f19c..0f32eaefa4 100644 --- a/web/i18n/tr-TR/app-debug.ts +++ b/web/i18n/tr-TR/app-debug.ts @@ -232,11 +232,8 @@ const translation = { description: 'Prompt Oluşturucu, yapılandırılan modeli kullanarak promptları daha iyi kalite ve yapı için optimize eder. Lütfen açık ve ayrıntılı talimatlar yazın.', tryIt: 'Deneyin', instruction: 'Talimatlar', - instructionPlaceHolder: 'Açık ve belirli talimatlar yazın.', generate: 'Oluştur', resTitle: 'Oluşturulmuş Prompt', - noDataLine1: 'Kullanım durumunuzu solda açıklayın,', - noDataLine2: 'orkestrasyon önizlemesi burada görünecek.', apply: 'Uygula', loading: 'Uygulama orkestrasyonu yapılıyor...', overwriteTitle: 'Mevcut yapılandırmanın üzerine yazılsın mı?', diff --git a/web/i18n/tr-TR/billing.ts b/web/i18n/tr-TR/billing.ts index d85de6b5a2..ba80c49f78 100644 --- a/web/i18n/tr-TR/billing.ts +++ b/web/i18n/tr-TR/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Büyük ölçekli kritik sistemler için tam yetenekler ve destek.', includesTitle: 'Takım plandaki her şey, artı:', features: { - 8: 'Profesyonel Teknik Destek', - 1: 'Ticari Lisans Yetkilendirmesi', - 6: 'Gelişmiş Güvenlik ve Kontroller', - 5: 'Dify Partners tarafından müzakere edilen SLA\'lar', - 4: 'SSO', - 2: 'Özel Kurumsal Özellikler', - 0: 'Kurumsal Düzeyde Ölçeklenebilir Dağıtım Çözümleri', - 7: 'Resmi olarak Dify tarafından Güncellemeler ve Bakım', - 3: 'Çoklu Çalışma Alanları ve Kurumsal Yönetim', }, priceTip: 'Yıllık Faturalama Sadece', for: 'Büyük boyutlu Takımlar için', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 1: 'Tek Çalışma Alanı', - 0: 'Genel depo altında yayınlanan tüm temel özellikler', - 2: 'Dify Açık Kaynak Lisansı ile uyumludur', }, price: 'Ücretsiz', includesTitle: 'Ücretsiz Özellikler:', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 1: 'Tek Çalışma Alanı', - 0: 'Çeşitli Bulut Sağlayıcıları Tarafından Kendi Kendini Yöneten Güvenilirlik', - 2: 'WebApp Logosu ve Marka Özelleştirmesi', - 3: 'Öncelikli E-posta ve Sohbet Desteği', }, name: 'Premium', includesTitle: 'Topluluktan her şey, artı:', diff --git a/web/i18n/tr-TR/common.ts b/web/i18n/tr-TR/common.ts index 7dcebecff2..a5ea56f10e 100644 --- a/web/i18n/tr-TR/common.ts +++ b/web/i18n/tr-TR/common.ts @@ -202,7 +202,6 @@ const translation = { showAppLength: '{{length}} uygulamayı göster', delete: 'Hesabı Sil', deleteTip: 'Hesabınızı silmek tüm verilerinizi kalıcı olarak siler ve geri alınamaz.', - deleteConfirmTip: 'Onaylamak için, kayıtlı e-postanızdan şu adrese e-posta gönderin: ', account: 'Hesap', myAccount: 'Hesabım', studio: 'Dify Stüdyo', diff --git a/web/i18n/tr-TR/dataset-creation.ts b/web/i18n/tr-TR/dataset-creation.ts index 32fb8165eb..33c82b69f7 100644 --- a/web/i18n/tr-TR/dataset-creation.ts +++ b/web/i18n/tr-TR/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Bilgi Oluştur', - update: 'Veri ekle', fallbackRoute: 'Bilgi', }, one: 'Veri kaynağı seçin', diff --git a/web/i18n/tr-TR/dataset-documents.ts b/web/i18n/tr-TR/dataset-documents.ts index 984aad5a0a..0f5e4329a5 100644 --- a/web/i18n/tr-TR/dataset-documents.ts +++ b/web/i18n/tr-TR/dataset-documents.ts @@ -31,7 +31,6 @@ const translation = { sync: 'Senkronize et', pause: 'Duraklat', resume: 'Devam Et', - download: 'Dosyayı İndir', }, index: { enable: 'Etkinleştir', @@ -342,7 +341,6 @@ const translation = { keywords: 'Anahtar Kelimeler', addKeyWord: 'Anahtar kelime ekle', keywordError: 'Anahtar kelimenin maksimum uzunluğu 20', - characters: 'karakter', hitCount: 'Geri alım sayısı', vectorHash: 'Vektör hash: ', questionPlaceholder: 'soruyu buraya ekleyin', diff --git a/web/i18n/tr-TR/dataset-hit-testing.ts b/web/i18n/tr-TR/dataset-hit-testing.ts index d22df0d93e..9b1ea2dbc1 100644 --- a/web/i18n/tr-TR/dataset-hit-testing.ts +++ b/web/i18n/tr-TR/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Geri Alım Testi', desc: 'Verilen sorgu metnine göre Bilginin isabet etkisini test edin.', dateTimeFormat: 'GG/AA/YYYY ss:dd ÖÖ/ÖS', - recents: 'Sonuçlar', table: { header: { source: 'Kaynak', diff --git a/web/i18n/uk-UA/app-debug.ts b/web/i18n/uk-UA/app-debug.ts index 5bf7642c91..337da83e74 100644 --- a/web/i18n/uk-UA/app-debug.ts +++ b/web/i18n/uk-UA/app-debug.ts @@ -233,21 +233,6 @@ const translation = { }, }, automatic: { - title: 'Автоматизована оркестрація застосунків', - description: 'Опишіть свій сценарій, Dify збере для вас застосунок.', - intendedAudience: 'Хто є цільовою аудиторією?', - intendedAudiencePlaceHolder: 'напр. Студент', - solveProblem: 'Які проблеми вони сподіваються вирішити за допомогою AI?', - solveProblemPlaceHolder: 'напр. Оцінка успішності', - generate: 'Генерувати', - audiencesRequired: 'Необхідна аудиторія', - problemRequired: 'Необхідна проблема', - resTitle: 'Ми створили для вас такий застосунок.', - apply: 'Застосувати цю оркестрацію', - noData: 'Опишіть свій випадок використання зліва, тут буде показано попередній перегляд оркестрації.', - loading: 'Оркестрація програми для вас...', - overwriteTitle: 'Перезаписати існуючу конфігурацію?', - overwriteMessage: 'Застосування цієї оркестрації призведе до перезапису існуючої конфігурації.', }, resetConfig: { title: 'Підтвердіть скидання?', @@ -570,12 +555,9 @@ const translation = { apply: 'Застосовувати', tryIt: 'Спробуйте', overwriteTitle: 'Змінити існуючу конфігурацію?', - instructionPlaceHolder: 'Пишіть чіткі та конкретні інструкції.', loading: 'Оркестрування програми для вас...', - noDataLine1: 'Опишіть свій випадок використання зліва,', resTitle: 'Згенерований запит', title: 'Генератор підказок', - noDataLine2: 'Тут буде показано попередній перегляд оркестровки.', overwriteMessage: 'Застосування цього рядка замінить існуючу конфігурацію.', description: 'Генератор підказок використовує налаштовану модель для оптимізації запитів для кращої якості та кращої структури. Напишіть, будь ласка, зрозумілу та детальну інструкцію.', versions: 'Версії', diff --git a/web/i18n/uk-UA/billing.ts b/web/i18n/uk-UA/billing.ts index a048fe67cd..72fd9f6633 100644 --- a/web/i18n/uk-UA/billing.ts +++ b/web/i18n/uk-UA/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Отримайте повні можливості та підтримку для масштабних критично важливих систем.', includesTitle: 'Все, що входить до плану Team, плюс:', features: { - 4: 'Єдиний вхід', - 7: 'Оновлення та обслуговування від Dify Official', - 1: 'Авторизація комерційної ліцензії', - 8: 'Професійна технічна підтримка', - 2: 'Ексклюзивні функції підприємства', - 6: 'Розширені функції безпеки та керування', - 3: 'Кілька робочих областей і управління підприємством', - 5: 'Угода про рівень обслуговування за домовленістю від Dify Partners', - 0: 'Масштабовані рішення для розгортання корпоративного рівня', }, btnText: 'Зв\'язатися з відділом продажу', priceTip: 'Тільки річна оплата', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 1: 'Єдине робоче місце', - 2: 'Відповідає ліцензії Dify з відкритим вихідним кодом', - 0: 'Усі основні функції випущено в загальнодоступному репозиторії', }, btnText: 'Розпочніть з громади', includesTitle: 'Безкоштовні можливості:', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 1: 'Єдине робоче місце', - 2: 'Налаштування логотипу WebApp та брендингу', - 3: 'Пріоритетна підтримка електронною поштою та в чаті', - 0: 'Самокерована надійність різними хмарними провайдерами', }, description: 'Для середніх підприємств та команд', btnText: 'Отримайте Преміум у', diff --git a/web/i18n/uk-UA/common.ts b/web/i18n/uk-UA/common.ts index 550148ad32..c40b330eb4 100644 --- a/web/i18n/uk-UA/common.ts +++ b/web/i18n/uk-UA/common.ts @@ -198,7 +198,6 @@ const translation = { showAppLength: 'Показати {{length}} програм', delete: 'Видалити обліковий запис', deleteTip: 'Видалення вашого облікового запису призведе до остаточного видалення всіх ваших даних, і їх неможливо буде відновити.', - deleteConfirmTip: 'Щоб підтвердити, будь ласка, надішліть наступне з вашої зареєстрованої електронної пошти на ', account: 'Рахунок', studio: 'Студія Dify', myAccount: 'Особистий кабінет', diff --git a/web/i18n/uk-UA/dataset-creation.ts b/web/i18n/uk-UA/dataset-creation.ts index 8ea32c0d81..2685db70b4 100644 --- a/web/i18n/uk-UA/dataset-creation.ts +++ b/web/i18n/uk-UA/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Створити Знання', - update: 'Додати дані', fallbackRoute: 'Знання', }, one: 'Виберіть джерело даних', diff --git a/web/i18n/uk-UA/dataset-documents.ts b/web/i18n/uk-UA/dataset-documents.ts index f4a40081c5..fecc8fef47 100644 --- a/web/i18n/uk-UA/dataset-documents.ts +++ b/web/i18n/uk-UA/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: 'Синхронізувати', pause: 'Пауза', resume: 'Продовжити', - download: 'Завантажити файл', }, index: { enable: 'Активувати', @@ -254,7 +253,6 @@ const translation = { cs: 'Чеська', th: 'Тайська', id: 'Індонезійська', - uk: 'Українська', }, categoryMap: { book: { @@ -343,7 +341,6 @@ const translation = { keywords: 'Ключові слова', addKeyWord: 'Додати ключове слово', keywordError: 'Максимальна довжина ключового слова – 20 символів', - characters: 'символів', hitCount: 'Кількість пошуку', vectorHash: 'Векторний хеш: ', questionPlaceholder: 'додайте запитання тут', diff --git a/web/i18n/uk-UA/dataset-hit-testing.ts b/web/i18n/uk-UA/dataset-hit-testing.ts index 3567c098f2..65f4f1d6c0 100644 --- a/web/i18n/uk-UA/dataset-hit-testing.ts +++ b/web/i18n/uk-UA/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Тестування вибірки', desc: 'Тестування ефективності пошуку знань на основі наданого текстового запиту.', dateTimeFormat: 'DD/MM/YYYY HH:mm A', - recents: 'Останні', table: { header: { source: 'Джерело', diff --git a/web/i18n/vi-VN/app-debug.ts b/web/i18n/vi-VN/app-debug.ts index bf34f04db5..9f6071da8e 100644 --- a/web/i18n/vi-VN/app-debug.ts +++ b/web/i18n/vi-VN/app-debug.ts @@ -228,21 +228,6 @@ const translation = { }, }, automatic: { - title: 'Tự động hóa triển khai ứng dụng', - description: 'Mô tả tình huống của bạn, Dify sẽ tự động hóa một ứng dụng cho bạn.', - intendedAudience: 'Đối tượng mục tiêu là ai?', - intendedAudiencePlaceHolder: 'Ví dụ: Sinh viên', - solveProblem: 'Họ hy vọng AI có thể giải quyết vấn đề gì?', - solveProblemPlaceHolder: 'Ví dụ: Đánh giá thành tích học tập', - generate: 'Tạo', - audiencesRequired: 'Yêu cầu nhập đối tượng mục tiêu', - problemRequired: 'Yêu cầu nhập vấn đề cần giải quyết', - resTitle: 'Chúng tôi đã tự động hóa ứng dụng sau đây cho bạn.', - apply: 'Áp dụng tự động hóa này', - noData: 'Mô tả tình huống sử dụng của bạn ở bên trái, xem trước tự động hóa sẽ hiển thị ở đây.', - loading: 'Đang tự động hóa ứng dụng cho bạn...', - overwriteTitle: 'Ghi đè cấu hình hiện tại?', - overwriteMessage: 'Áp dụng tự động hóa này sẽ ghi đè lên cấu hình hiện tại.', }, resetConfig: { title: 'Xác nhận đặt lại?', @@ -536,17 +521,14 @@ const translation = { }, generate: 'Đẻ ra', tryIt: 'Dùng thử', - noDataLine2: 'Bản xem trước Orchestration sẽ hiển thị ở đây.', apply: 'Áp dụng', instruction: 'Chỉ thị', title: 'Trình tạo nhắc nhở', resTitle: 'Lời nhắc được tạo', loading: 'Sắp xếp ứng dụng cho bạn...', - noDataLine1: 'Mô tả trường hợp sử dụng của bạn ở bên trái,', description: 'Trình tạo lời nhắc sử dụng mô hình được định cấu hình để tối ưu hóa lời nhắc cho chất lượng cao hơn và cấu trúc tốt hơn. Vui lòng viết hướng dẫn rõ ràng và chi tiết.', overwriteMessage: 'Áp dụng lời nhắc này sẽ ghi đè cấu hình hiện có.', overwriteTitle: 'Ghi đè cấu hình hiện có?', - instructionPlaceHolder: 'Viết hướng dẫn rõ ràng và cụ thể.', versions: 'Phiên bản', optimizationNote: 'Chú thích tối ưu hóa', to: 'đến', diff --git a/web/i18n/vi-VN/billing.ts b/web/i18n/vi-VN/billing.ts index 69035dc595..45c6529f74 100644 --- a/web/i18n/vi-VN/billing.ts +++ b/web/i18n/vi-VN/billing.ts @@ -115,15 +115,6 @@ const translation = { description: 'Nhận toàn bộ khả năng và hỗ trợ cho các hệ thống quan trọng cho nhiệm vụ quy mô lớn.', includesTitle: 'Tất cả trong kế hoạch Nhóm, cộng thêm:', features: { - 2: 'Các tính năng dành riêng cho doanh nghiệp', - 3: 'Nhiều không gian làm việc & quản lý doanh nghiệp', - 7: 'Cập nhật và bảo trì bởi Dify chính thức', - 4: 'SSO', - 8: 'Hỗ trợ kỹ thuật chuyên nghiệp', - 5: 'SLA được đàm phán bởi Dify Partners', - 1: 'Ủy quyền giấy phép thương mại', - 6: 'Bảo mật & Kiểm soát nâng cao', - 0: 'Giải pháp triển khai có thể mở rộng cấp doanh nghiệp', }, price: 'Tùy chỉnh', for: 'Dành cho các đội lớn', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 1: 'Không gian làm việc đơn', - 0: 'Tất cả các tính năng cốt lõi được phát hành trong kho lưu trữ công cộng', - 2: 'Tuân thủ Giấy phép nguồn mở Dify', }, description: 'Dành cho người dùng cá nhân, nhóm nhỏ hoặc các dự án phi thương mại', name: 'Cộng đồng', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 1: 'Không gian làm việc đơn', - 2: 'Logo WebApp & Tùy chỉnh thương hiệu', - 3: 'Hỗ trợ email & trò chuyện ưu tiên', - 0: 'Độ tin cậy tự quản lý của các nhà cung cấp đám mây khác nhau', }, comingSoon: 'Hỗ trợ Microsoft Azure & Google Cloud Sẽ Đến Sớm', priceTip: 'Dựa trên Thị trường Đám mây', diff --git a/web/i18n/vi-VN/common.ts b/web/i18n/vi-VN/common.ts index 384c4dbf61..60cf113ab2 100644 --- a/web/i18n/vi-VN/common.ts +++ b/web/i18n/vi-VN/common.ts @@ -198,7 +198,6 @@ const translation = { showAppLength: 'Hiển thị {{length}} ứng dụng', delete: 'Xóa tài khoản', deleteTip: 'Xóa tài khoản của bạn sẽ xóa vĩnh viễn tất cả dữ liệu của bạn và không thể khôi phục được.', - deleteConfirmTip: 'Để xác nhận, vui lòng gửi thông tin sau từ email đã đăng ký của bạn tới ', studio: 'Dify Studio', myAccount: 'Tài khoản của tôi', account: 'Tài khoản', diff --git a/web/i18n/vi-VN/dataset-creation.ts b/web/i18n/vi-VN/dataset-creation.ts index 39215fde68..63d44a93ea 100644 --- a/web/i18n/vi-VN/dataset-creation.ts +++ b/web/i18n/vi-VN/dataset-creation.ts @@ -1,8 +1,6 @@ const translation = { steps: { header: { - creation: 'Tạo Kiến thức', - update: 'Thêm dữ liệu', fallbackRoute: 'Kiến thức', }, one: 'Chọn nguồn dữ liệu', diff --git a/web/i18n/vi-VN/dataset-documents.ts b/web/i18n/vi-VN/dataset-documents.ts index 1f514a1d6f..1833b00588 100644 --- a/web/i18n/vi-VN/dataset-documents.ts +++ b/web/i18n/vi-VN/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: 'Đồng bộ', pause: 'Tạm dừng', resume: 'Tiếp tục', - download: 'Tải xuống tập tin', }, index: { enable: 'Kích hoạt', @@ -342,7 +341,6 @@ const translation = { keywords: 'Từ khóa', addKeyWord: 'Thêm từ khóa', keywordError: 'Độ dài tối đa của từ khóa là 20', - characters: 'ký tự', hitCount: 'Số lần truy vấn', vectorHash: 'Mã băm vector: ', questionPlaceholder: 'thêm câu hỏi ở đây', diff --git a/web/i18n/vi-VN/dataset-hit-testing.ts b/web/i18n/vi-VN/dataset-hit-testing.ts index 02a2547938..a08532ae17 100644 --- a/web/i18n/vi-VN/dataset-hit-testing.ts +++ b/web/i18n/vi-VN/dataset-hit-testing.ts @@ -2,7 +2,6 @@ const translation = { title: 'Kiểm tra truy vấn', desc: 'Kiểm tra hiệu quả truy xuất của Kiến thức dựa trên văn bản truy vấn đã cho.', dateTimeFormat: 'MM/DD/YYYY hh:mm A', - recents: 'Gần đây', table: { header: { source: 'Nguồn', diff --git a/web/i18n/zh-Hans/app-debug.ts b/web/i18n/zh-Hans/app-debug.ts index bb64f41bf1..1610a766f6 100644 --- a/web/i18n/zh-Hans/app-debug.ts +++ b/web/i18n/zh-Hans/app-debug.ts @@ -240,8 +240,6 @@ const translation = { apply: '应用', applyChanges: '应用更改', resTitle: '生成的代码', - newNoDataLine1: '在左侧描述您的用例,点击生成查看响应。', - newNoDataLine2: '了解提示词设计', overwriteConfirmTitle: '是否覆盖现有代码?', overwriteConfirmMessage: '此操作将覆盖现有代码。您确定要继续吗?', }, diff --git a/web/i18n/zh-Hans/dataset-documents.ts b/web/i18n/zh-Hans/dataset-documents.ts index 15e3071e51..581bc851f7 100644 --- a/web/i18n/zh-Hans/dataset-documents.ts +++ b/web/i18n/zh-Hans/dataset-documents.ts @@ -32,7 +32,6 @@ const translation = { sync: '同步', pause: '暂停', resume: '恢复', - download: '下载文件', }, index: { enable: '启用中', diff --git a/web/i18n/zh-Hant/app-debug.ts b/web/i18n/zh-Hant/app-debug.ts index d92a3bfd4e..7668e61663 100644 --- a/web/i18n/zh-Hant/app-debug.ts +++ b/web/i18n/zh-Hant/app-debug.ts @@ -523,16 +523,13 @@ const translation = { }, overwriteMessage: '應用此提示將覆蓋現有配置。', tryIt: '試試看', - noDataLine1: '在左側描述您的用例,', instruction: '指示', description: '提示生成器使用配置的模型來優化提示,以獲得更高的品質和更好的結構。請寫出清晰詳細的說明。', generate: '生成', apply: '應用', - instructionPlaceHolder: '寫出清晰具體的說明。', overwriteTitle: '覆蓋現有配置?', title: '提示生成器', loading: '為您編排應用程式...', - noDataLine2: '業務流程預覽將在此處顯示。', resTitle: '生成的提示', latest: '最新', to: '到', diff --git a/web/i18n/zh-Hant/billing.ts b/web/i18n/zh-Hant/billing.ts index bedf4550f8..f957bc4eab 100644 --- a/web/i18n/zh-Hant/billing.ts +++ b/web/i18n/zh-Hant/billing.ts @@ -115,15 +115,6 @@ const translation = { description: '獲得大規模關鍵任務系統的完整功能和支援。', includesTitle: 'Team 計劃中的一切,加上:', features: { - 8: '專業技術支持', - 3: '多個工作區和企業管理', - 0: '企業級可擴展部署解決方案', - 1: '商業許可證授權', - 7: 'Dify 官方更新和維護', - 6: '進階安全與控制', - 4: '單一登入', - 5: 'Dify 合作夥伴協商的 SLA', - 2: '獨家企業功能', }, price: '自訂', btnText: '聯繫銷售', @@ -132,9 +123,6 @@ const translation = { }, community: { features: { - 0: '所有核心功能在公共存儲庫下發布', - 1: '單一工作區', - 2: '符合 Dify 開源許可證', }, includesTitle: '免費功能:', btnText: '開始使用社區', @@ -145,10 +133,6 @@ const translation = { }, premium: { features: { - 3: '優先電子郵件和聊天支持', - 2: 'WebApp 標誌和品牌定制', - 0: '各種雲端供應商的自我管理可靠性', - 1: '單一工作區', }, for: '適用於中型組織和團隊', comingSoon: '微軟 Azure 與 Google Cloud 支持即將推出', diff --git a/web/i18n/zh-Hant/dataset-documents.ts b/web/i18n/zh-Hant/dataset-documents.ts index 7344db2df7..1b482f181f 100644 --- a/web/i18n/zh-Hant/dataset-documents.ts +++ b/web/i18n/zh-Hant/dataset-documents.ts @@ -30,7 +30,6 @@ const translation = { sync: '同步', resume: '恢復', pause: '暫停', - download: '下載檔案', }, index: { enable: '啟用中', From f70272f6389006d46f18531c0cae7ea53bd1d4db Mon Sep 17 00:00:00 2001 From: 17hz <0x149527@gmail.com> Date: Sun, 31 Aug 2025 17:08:29 +0800 Subject: [PATCH 16/96] refactor: replace clsx with classnames (#24776) --- .../components/base/pagination/pagination.tsx | 10 ++-- web/package.json | 1 - web/pnpm-lock.yaml | 47 +++++++++++++++++-- 3 files changed, 49 insertions(+), 9 deletions(-) diff --git a/web/app/components/base/pagination/pagination.tsx b/web/app/components/base/pagination/pagination.tsx index ec8b0355f4..6b99dcf9c0 100644 --- a/web/app/components/base/pagination/pagination.tsx +++ b/web/app/components/base/pagination/pagination.tsx @@ -1,5 +1,5 @@ import React from 'react' -import clsx from 'clsx' +import cn from 'classnames' import usePagination from './hook' import type { ButtonProps, @@ -45,7 +45,7 @@ export const PrevButton = ({ previous()} tabIndex={disabled ? '-1' : 0} disabled={disabled} @@ -80,7 +80,7 @@ export const NextButton = ({ next()} tabIndex={disabled ? '-1' : 0} disabled={disabled} @@ -132,7 +132,7 @@ export const PageButton = ({
  • pagination.setCurrentPage(page - 1)} - className={clsx( + className={cn( className, pagination.currentPage + 1 === page ? activeClassName diff --git a/web/package.json b/web/package.json index e579d688a3..a422c7fd6c 100644 --- a/web/package.json +++ b/web/package.json @@ -73,7 +73,6 @@ "ahooks": "^3.8.4", "class-variance-authority": "^0.7.0", "classnames": "^2.5.1", - "clsx": "^2.1.1", "cmdk": "^1.1.1", "copy-to-clipboard": "^3.3.3", "crypto-js": "^4.2.0", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index b3695a0b89..3dbbf4f070 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -141,9 +141,6 @@ importers: classnames: specifier: ^2.5.1 version: 2.5.1 - clsx: - specifier: ^2.1.1 - version: 2.1.1 cmdk: specifier: ^1.1.1 version: 1.1.1(@types/react-dom@19.1.7(@types/react@19.1.11))(@types/react@19.1.11)(react-dom@19.1.1(react@19.1.1))(react@19.1.1) @@ -1724,144 +1721,170 @@ packages: resolution: {integrity: sha512-9B+taZ8DlyyqzZQnoeIvDVR/2F4EbMepXMc/NdVbkzsJbzkUjhXv/70GQJ7tdLA4YJgNP25zukcxpX2/SueNrA==} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-arm64@1.2.0': resolution: {integrity: sha512-RXwd0CgG+uPRX5YYrkzKyalt2OJYRiJQ8ED/fi1tq9WQW2jsQIn0tqrlR5l5dr/rjqq6AHAxURhj2DVjyQWSOA==} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-arm@1.0.5': resolution: {integrity: sha512-gvcC4ACAOPRNATg/ov8/MnbxFDJqf/pDePbBnuBDcjsI8PssmjoKMAz4LtLaVi+OnSb5FK/yIOamqDwGmXW32g==} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-arm@1.2.0': resolution: {integrity: sha512-mWd2uWvDtL/nvIzThLq3fr2nnGfyr/XMXlq8ZJ9WMR6PXijHlC3ksp0IpuhK6bougvQrchUAfzRLnbsen0Cqvw==} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-ppc64@1.2.0': resolution: {integrity: sha512-Xod/7KaDDHkYu2phxxfeEPXfVXFKx70EAFZ0qyUdOjCcxbjqyJOEUpDe6RIyaunGxT34Anf9ue/wuWOqBW2WcQ==} cpu: [ppc64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-s390x@1.0.4': resolution: {integrity: sha512-u7Wz6ntiSSgGSGcjZ55im6uvTrOxSIS8/dgoVMoiGE9I6JAfU50yH5BoDlYA1tcuGS7g/QNtetJnxA6QEsCVTA==} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-s390x@1.2.0': resolution: {integrity: sha512-eMKfzDxLGT8mnmPJTNMcjfO33fLiTDsrMlUVcp6b96ETbnJmd4uvZxVJSKPQfS+odwfVaGifhsB07J1LynFehw==} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-x64@1.0.4': resolution: {integrity: sha512-MmWmQ3iPFZr0Iev+BAgVMb3ZyC4KeFc3jFxnNbEPas60e1cIfevbtuyf9nDGIzOaW9PdnDciJm+wFFaTlj5xYw==} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linux-x64@1.2.0': resolution: {integrity: sha512-ZW3FPWIc7K1sH9E3nxIGB3y3dZkpJlMnkk7z5tu1nSkBoCgw2nSRTFHI5pB/3CQaJM0pdzMF3paf9ckKMSE9Tg==} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-libvips-linuxmusl-arm64@1.0.4': resolution: {integrity: sha512-9Ti+BbTYDcsbp4wfYib8Ctm1ilkugkA/uscUn6UXK1ldpC1JjiXbLfFZtRlBhjPZ5o1NCLiDbg8fhUPKStHoTA==} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-libvips-linuxmusl-arm64@1.2.0': resolution: {integrity: sha512-UG+LqQJbf5VJ8NWJ5Z3tdIe/HXjuIdo4JeVNADXBFuG7z9zjoegpzzGIyV5zQKi4zaJjnAd2+g2nna8TZvuW9Q==} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-libvips-linuxmusl-x64@1.0.4': resolution: {integrity: sha512-viYN1KX9m+/hGkJtvYYp+CCLgnJXwiQB39damAO7WMdKWlIhmYTfHjwSbQeUK/20vY154mwezd9HflVFM1wVSw==} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-libvips-linuxmusl-x64@1.2.0': resolution: {integrity: sha512-SRYOLR7CXPgNze8akZwjoGBoN1ThNZoqpOgfnOxmWsklTGVfJiGJoC/Lod7aNMGA1jSsKWM1+HRX43OP6p9+6Q==} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-linux-arm64@0.33.5': resolution: {integrity: sha512-JMVv+AMRyGOHtO1RFBiJy/MBsgz0x4AWrT6QoEVVTyh1E39TrCUpTRI7mx9VksGX4awWASxqCYLCV4wBZHAYxA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-linux-arm64@0.34.3': resolution: {integrity: sha512-QdrKe3EvQrqwkDrtuTIjI0bu6YEJHTgEeqdzI3uWJOH6G1O8Nl1iEeVYRGdj1h5I21CqxSvQp1Yv7xeU3ZewbA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [glibc] '@img/sharp-linux-arm@0.33.5': resolution: {integrity: sha512-JTS1eldqZbJxjvKaAkxhZmBqPRGmxgu+qFKSInv8moZ2AmT5Yib3EQ1c6gp493HvrvV8QgdOXdyaIBrhvFhBMQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-linux-arm@0.34.3': resolution: {integrity: sha512-oBK9l+h6KBN0i3dC8rYntLiVfW8D8wH+NPNT3O/WBHeW0OQWCjfWksLUaPidsrDKpJgXp3G3/hkmhptAW0I3+A==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm] os: [linux] + libc: [glibc] '@img/sharp-linux-ppc64@0.34.3': resolution: {integrity: sha512-GLtbLQMCNC5nxuImPR2+RgrviwKwVql28FWZIW1zWruy6zLgA5/x2ZXk3mxj58X/tszVF69KK0Is83V8YgWhLA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [ppc64] os: [linux] + libc: [glibc] '@img/sharp-linux-s390x@0.33.5': resolution: {integrity: sha512-y/5PCd+mP4CA/sPDKl2961b+C9d+vPAveS33s6Z3zfASk2j5upL6fXVPZi7ztePZ5CuH+1kW8JtvxgbuXHRa4Q==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-linux-s390x@0.34.3': resolution: {integrity: sha512-3gahT+A6c4cdc2edhsLHmIOXMb17ltffJlxR0aC2VPZfwKoTGZec6u5GrFgdR7ciJSsHT27BD3TIuGcuRT0KmQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [s390x] os: [linux] + libc: [glibc] '@img/sharp-linux-x64@0.33.5': resolution: {integrity: sha512-opC+Ok5pRNAzuvq1AG0ar+1owsu842/Ab+4qvU879ippJBHvyY5n2mxF1izXqkPYlGuP/M556uh53jRLJmzTWA==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-linux-x64@0.34.3': resolution: {integrity: sha512-8kYso8d806ypnSq3/Ly0QEw90V5ZoHh10yH0HnrzOCr6DKAPI6QVHvwleqMkVQ0m+fc7EH8ah0BB0QPuWY6zJQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [glibc] '@img/sharp-linuxmusl-arm64@0.33.5': resolution: {integrity: sha512-XrHMZwGQGvJg2V/oRSUfSAfjfPxO+4DkiRh6p2AFjLQztWUuY/o8Mq0eMQVIY7HJ1CDQUJlxGGZRw1a5bqmd1g==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-linuxmusl-arm64@0.34.3': resolution: {integrity: sha512-vAjbHDlr4izEiXM1OTggpCcPg9tn4YriK5vAjowJsHwdBIdx0fYRsURkxLG2RLm9gyBq66gwtWI8Gx0/ov+JKQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [arm64] os: [linux] + libc: [musl] '@img/sharp-linuxmusl-x64@0.33.5': resolution: {integrity: sha512-WT+d/cgqKkkKySYmqoZ8y3pxx7lx9vVejxW/W4DOFMYVSkErR+w7mf2u8m/y4+xHe7yY9DAXQMWQhpnMuFfScw==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-linuxmusl-x64@0.34.3': resolution: {integrity: sha512-gCWUn9547K5bwvOn9l5XGAEjVTTRji4aPTqLzGXHvIr6bIDZKNTA34seMPgM0WmSf+RYBH411VavCejp3PkOeQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} cpu: [x64] os: [linux] + libc: [musl] '@img/sharp-wasm32@0.33.5': resolution: {integrity: sha512-ykUW4LVGaMcU9lu9thv85CbRMAwfeadCJHRsg2GmeRa/cJxsVY9Rbd57JcMxBkKHag5U/x7TSBpScF4U8ElVzg==} @@ -2145,24 +2168,28 @@ packages: engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [glibc] '@next/swc-linux-arm64-musl@15.5.0': resolution: {integrity: sha512-biWqIOE17OW/6S34t1X8K/3vb1+svp5ji5QQT/IKR+VfM3B7GvlCwmz5XtlEan2ukOUf9tj2vJJBffaGH4fGRw==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] + libc: [musl] '@next/swc-linux-x64-gnu@15.5.0': resolution: {integrity: sha512-zPisT+obYypM/l6EZ0yRkK3LEuoZqHaSoYKj+5jiD9ESHwdr6QhnabnNxYkdy34uCigNlWIaCbjFmQ8FY5AlxA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [glibc] '@next/swc-linux-x64-musl@15.5.0': resolution: {integrity: sha512-+t3+7GoU9IYmk+N+FHKBNFdahaReoAktdOpXHFIPOU1ixxtdge26NgQEEkJkCw2dHT9UwwK5zw4mAsURw4E8jA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] + libc: [musl] '@next/swc-win32-arm64-msvc@15.5.0': resolution: {integrity: sha512-d8MrXKh0A+c9DLiy1BUFwtg3Hu90Lucj3k6iKTUdPOv42Ve2UiIG8HYi3UAb8kFVluXxEfdpCoPPCSODk5fDcw==} @@ -2384,36 +2411,42 @@ packages: engines: {node: '>= 10.0.0'} cpu: [arm] os: [linux] + libc: [glibc] '@parcel/watcher-linux-arm-musl@2.5.1': resolution: {integrity: sha512-6E+m/Mm1t1yhB8X412stiKFG3XykmgdIOqhjWj+VL8oHkKABfu/gjFj8DvLrYVHSBNC+/u5PeNrujiSQ1zwd1Q==} engines: {node: '>= 10.0.0'} cpu: [arm] os: [linux] + libc: [musl] '@parcel/watcher-linux-arm64-glibc@2.5.1': resolution: {integrity: sha512-LrGp+f02yU3BN9A+DGuY3v3bmnFUggAITBGriZHUREfNEzZh/GO06FF5u2kx8x+GBEUYfyTGamol4j3m9ANe8w==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [linux] + libc: [glibc] '@parcel/watcher-linux-arm64-musl@2.5.1': resolution: {integrity: sha512-cFOjABi92pMYRXS7AcQv9/M1YuKRw8SZniCDw0ssQb/noPkRzA+HBDkwmyOJYp5wXcsTrhxO0zq1U11cK9jsFg==} engines: {node: '>= 10.0.0'} cpu: [arm64] os: [linux] + libc: [musl] '@parcel/watcher-linux-x64-glibc@2.5.1': resolution: {integrity: sha512-GcESn8NZySmfwlTsIur+49yDqSny2IhPeZfXunQi48DMugKeZ7uy1FX83pO0X22sHntJ4Ub+9k34XQCX+oHt2A==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [linux] + libc: [glibc] '@parcel/watcher-linux-x64-musl@2.5.1': resolution: {integrity: sha512-n0E2EQbatQ3bXhcH2D1XIAANAcTZkQICBPVaxMeaCVBtOpBZpWJuf7LwyWPSBDITb7In8mqQgJ7gH8CILCURXg==} engines: {node: '>= 10.0.0'} cpu: [x64] os: [linux] + libc: [musl] '@parcel/watcher-win32-arm64@2.5.1': resolution: {integrity: sha512-RFzklRvmc3PkjKjry3hLF9wD7ppR4AKcWNzH7kXR7GUe0Igb3Nz8fyPwtZCSquGrhU5HhUNDr/mKBqj7tqA2Vw==} @@ -3528,41 +3561,49 @@ packages: resolution: {integrity: sha512-34gw7PjDGB9JgePJEmhEqBhWvCiiWCuXsL9hYphDF7crW7UgI05gyBAi6MF58uGcMOiOqSJ2ybEeCvHcq0BCmQ==} cpu: [arm64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-arm64-musl@1.11.1': resolution: {integrity: sha512-RyMIx6Uf53hhOtJDIamSbTskA99sPHS96wxVE/bJtePJJtpdKGXO1wY90oRdXuYOGOTuqjT8ACccMc4K6QmT3w==} cpu: [arm64] os: [linux] + libc: [musl] '@unrs/resolver-binding-linux-ppc64-gnu@1.11.1': resolution: {integrity: sha512-D8Vae74A4/a+mZH0FbOkFJL9DSK2R6TFPC9M+jCWYia/q2einCubX10pecpDiTmkJVUH+y8K3BZClycD8nCShA==} cpu: [ppc64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-riscv64-gnu@1.11.1': resolution: {integrity: sha512-frxL4OrzOWVVsOc96+V3aqTIQl1O2TjgExV4EKgRY09AJ9leZpEg8Ak9phadbuX0BA4k8U5qtvMSQQGGmaJqcQ==} cpu: [riscv64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-riscv64-musl@1.11.1': resolution: {integrity: sha512-mJ5vuDaIZ+l/acv01sHoXfpnyrNKOk/3aDoEdLO/Xtn9HuZlDD6jKxHlkN8ZhWyLJsRBxfv9GYM2utQ1SChKew==} cpu: [riscv64] os: [linux] + libc: [musl] '@unrs/resolver-binding-linux-s390x-gnu@1.11.1': resolution: {integrity: sha512-kELo8ebBVtb9sA7rMe1Cph4QHreByhaZ2QEADd9NzIQsYNQpt9UkM9iqr2lhGr5afh885d/cB5QeTXSbZHTYPg==} cpu: [s390x] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-x64-gnu@1.11.1': resolution: {integrity: sha512-C3ZAHugKgovV5YvAMsxhq0gtXuwESUKc5MhEtjBpLoHPLYM+iuwSj3lflFwK3DPm68660rZ7G8BMcwSro7hD5w==} cpu: [x64] os: [linux] + libc: [glibc] '@unrs/resolver-binding-linux-x64-musl@1.11.1': resolution: {integrity: sha512-rV0YSoyhK2nZ4vEswT/QwqzqQXw5I6CjoaYMOX0TqBlWhojUf8P94mvI7nuJTeaCkkds3QE4+zS8Ko+GdXuZtA==} cpu: [x64] os: [linux] + libc: [musl] '@unrs/resolver-binding-wasm32-wasi@1.11.1': resolution: {integrity: sha512-5u4RkfxJm+Ng7IWgkzi3qrFOvLvQYnPBmjmZQ8+szTK/b31fQCnleNl1GgEt7nIsZRIf5PLhPwT0WM+q45x/UQ==} From cb04c211418e70835946a1a5aff2f51d6326a9a0 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Mon, 1 Sep 2025 00:21:41 +0900 Subject: [PATCH 17/96] model_config = ConfigDict(extra='allow') (#24859) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/core/app/app_config/features/more_like_this/manager.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/api/core/app/app_config/features/more_like_this/manager.py b/api/core/app/app_config/features/more_like_this/manager.py index f0ec6b0f6f..5d5c5ffd7f 100644 --- a/api/core/app/app_config/features/more_like_this/manager.py +++ b/api/core/app/app_config/features/more_like_this/manager.py @@ -1,12 +1,14 @@ -from pydantic import BaseModel, Field, ValidationError +from pydantic import BaseModel, ConfigDict, Field, ValidationError class MoreLikeThisConfig(BaseModel): enabled: bool = False + model_config = ConfigDict(extra="allow") class AppConfigModel(BaseModel): more_like_this: MoreLikeThisConfig = Field(default_factory=MoreLikeThisConfig) + model_config = ConfigDict(extra="allow") class MoreLikeThisConfigManager: @@ -23,7 +25,7 @@ class MoreLikeThisConfigManager: @classmethod def validate_and_set_defaults(cls, config: dict) -> tuple[dict, list[str]]: try: - return AppConfigModel.model_validate(config).dict(), ["more_like_this"] + return AppConfigModel.model_validate(config).model_dump(), ["more_like_this"] except ValidationError as e: raise ValueError( "more_like_this must be of dict type and enabled in more_like_this must be of boolean type" From e2f4c9ba8d0ff7ba31f415101c607820efcefa07 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 02:08:08 +0800 Subject: [PATCH 18/96] refactor(graph_engine): Merge state managers into unified_state_manager Signed-off-by: -LAN- --- api/.importlinter | 18 +- .../event_management/event_handlers.py | 6 +- .../workflow/graph_engine/graph_engine.py | 30 +- .../graph_traversal/branch_handler.py | 4 +- .../graph_traversal/edge_processor.py | 6 +- .../graph_traversal/skip_propagator.py | 6 +- .../orchestration/execution_coordinator.py | 6 +- .../graph_engine/state_management/__init__.py | 8 +- .../state_management/edge_state_manager.py | 114 ------ .../state_management/execution_tracker.py | 89 ----- .../state_management/node_state_manager.py | 97 ----- .../state_management/unified_state_manager.py | 343 +++++++++++++++++ .../worker_management/enhanced_worker_pool.py | 360 ++++++++++++++++++ 13 files changed, 737 insertions(+), 350 deletions(-) delete mode 100644 api/core/workflow/graph_engine/state_management/edge_state_manager.py delete mode 100644 api/core/workflow/graph_engine/state_management/execution_tracker.py delete mode 100644 api/core/workflow/graph_engine/state_management/node_state_manager.py create mode 100644 api/core/workflow/graph_engine/state_management/unified_state_manager.py create mode 100644 api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py diff --git a/api/.importlinter b/api/.importlinter index 9aa1073c38..9205b7c94d 100644 --- a/api/.importlinter +++ b/api/.importlinter @@ -34,7 +34,7 @@ ignore_imports = [importlinter:contract:rsc] name = RSC type = layers -layers = +layers = graph_engine response_coordinator output_registry @@ -44,7 +44,7 @@ containers = [importlinter:contract:worker] name = Worker type = layers -layers = +layers = graph_engine worker containers = @@ -77,18 +77,8 @@ forbidden_modules = core.workflow.graph_engine.layers core.workflow.graph_engine.protocols -[importlinter:contract:state-management-layers] -name = State Management Layers -type = layers -layers = - execution_tracker - node_state_manager - edge_state_manager -containers = - core.workflow.graph_engine.state_management - [importlinter:contract:worker-management-layers] -name = Worker Management Layers +name = Worker Management Layers type = layers layers = worker_pool @@ -119,4 +109,4 @@ name = Command Channels Independence type = independence modules = core.workflow.graph_engine.command_channels.in_memory_channel - core.workflow.graph_engine.command_channels.redis_channel \ No newline at end of file + core.workflow.graph_engine.command_channels.redis_channel diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py index 842bd2635f..bdd1c4d245 100644 --- a/api/core/workflow/graph_engine/event_management/event_handlers.py +++ b/api/core/workflow/graph_engine/event_management/event_handlers.py @@ -32,7 +32,7 @@ from ..response_coordinator import ResponseStreamCoordinator if TYPE_CHECKING: from ..error_handling import ErrorHandler from ..graph_traversal import BranchHandler, EdgeProcessor - from ..state_management import ExecutionTracker, NodeStateManager + from ..state_management import UnifiedStateManager from .event_collector import EventCollector logger = logging.getLogger(__name__) @@ -56,8 +56,8 @@ class EventHandlerRegistry: event_collector: "EventCollector", branch_handler: "BranchHandler", edge_processor: "EdgeProcessor", - node_state_manager: "NodeStateManager", - execution_tracker: "ExecutionTracker", + node_state_manager: "UnifiedStateManager", + execution_tracker: "UnifiedStateManager", error_handler: "ErrorHandler", ) -> None: """ diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 828e9b329f..7398b846d8 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -39,7 +39,7 @@ from .orchestration import Dispatcher, ExecutionCoordinator from .output_registry import OutputRegistry from .protocols.command_channel import CommandChannel from .response_coordinator import ResponseStreamCoordinator -from .state_management import EdgeStateManager, ExecutionTracker, NodeStateManager +from .state_management import UnifiedStateManager from .worker_management import ActivityTracker, DynamicScaler, WorkerFactory, WorkerPool logger = logging.getLogger(__name__) @@ -119,10 +119,8 @@ class GraphEngine: def _initialize_subsystems(self) -> None: """Initialize all subsystems with proper dependency injection.""" - # State management - self.node_state_manager = NodeStateManager(self.graph, self.ready_queue) - self.edge_state_manager = EdgeStateManager(self.graph) - self.execution_tracker = ExecutionTracker() + # Unified state management - single instance handles all state operations + self.state_manager = UnifiedStateManager(self.graph, self.ready_queue) # Response coordination self.output_registry = OutputRegistry(self.graph_runtime_state.variable_pool) @@ -139,20 +137,20 @@ class GraphEngine: self.node_readiness_checker = NodeReadinessChecker(self.graph) self.edge_processor = EdgeProcessor( graph=self.graph, - edge_state_manager=self.edge_state_manager, - node_state_manager=self.node_state_manager, + edge_state_manager=self.state_manager, + node_state_manager=self.state_manager, response_coordinator=self.response_coordinator, ) self.skip_propagator = SkipPropagator( graph=self.graph, - edge_state_manager=self.edge_state_manager, - node_state_manager=self.node_state_manager, + edge_state_manager=self.state_manager, + node_state_manager=self.state_manager, ) self.branch_handler = BranchHandler( graph=self.graph, edge_processor=self.edge_processor, skip_propagator=self.skip_propagator, - edge_state_manager=self.edge_state_manager, + edge_state_manager=self.state_manager, ) # Event handler registry with all dependencies @@ -164,8 +162,8 @@ class GraphEngine: event_collector=self.event_collector, branch_handler=self.branch_handler, edge_processor=self.edge_processor, - node_state_manager=self.node_state_manager, - execution_tracker=self.execution_tracker, + node_state_manager=self.state_manager, + execution_tracker=self.state_manager, error_handler=self.error_handler, ) @@ -182,8 +180,8 @@ class GraphEngine: # Orchestration self.execution_coordinator = ExecutionCoordinator( graph_execution=self.graph_execution, - node_state_manager=self.node_state_manager, - execution_tracker=self.execution_tracker, + node_state_manager=self.state_manager, + execution_tracker=self.state_manager, event_handler=self.event_handler_registry, event_collector=self.event_collector, command_processor=self.command_processor, @@ -335,8 +333,8 @@ class GraphEngine: # Enqueue root node root_node = self.graph.root_node - self.node_state_manager.enqueue_node(root_node.id) - self.execution_tracker.add(root_node.id) + self.state_manager.enqueue_node(root_node.id) + self.state_manager.add(root_node.id) # Start dispatcher self.dispatcher.start() diff --git a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py index b371f3bc73..8e08a03e3c 100644 --- a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py +++ b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py @@ -8,7 +8,7 @@ from typing import final from core.workflow.graph import Graph from core.workflow.graph_events.node import NodeRunStreamChunkEvent -from ..state_management import EdgeStateManager +from ..state_management import UnifiedStateManager from .edge_processor import EdgeProcessor from .skip_propagator import SkipPropagator @@ -27,7 +27,7 @@ class BranchHandler: graph: Graph, edge_processor: EdgeProcessor, skip_propagator: SkipPropagator, - edge_state_manager: EdgeStateManager, + edge_state_manager: UnifiedStateManager, ) -> None: """ Initialize the branch handler. diff --git a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py index ac2c658b4b..6efb56f046 100644 --- a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py +++ b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py @@ -10,7 +10,7 @@ from core.workflow.graph import Edge, Graph from core.workflow.graph_events import NodeRunStreamChunkEvent from ..response_coordinator import ResponseStreamCoordinator -from ..state_management import EdgeStateManager, NodeStateManager +from ..state_management import UnifiedStateManager @final @@ -25,8 +25,8 @@ class EdgeProcessor: def __init__( self, graph: Graph, - edge_state_manager: EdgeStateManager, - node_state_manager: NodeStateManager, + edge_state_manager: UnifiedStateManager, + node_state_manager: UnifiedStateManager, response_coordinator: ResponseStreamCoordinator, ) -> None: """ diff --git a/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py b/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py index 5ac445d405..01426809eb 100644 --- a/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py +++ b/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py @@ -7,7 +7,7 @@ from typing import final from core.workflow.graph import Edge, Graph -from ..state_management import EdgeStateManager, NodeStateManager +from ..state_management import UnifiedStateManager @final @@ -22,8 +22,8 @@ class SkipPropagator: def __init__( self, graph: Graph, - edge_state_manager: EdgeStateManager, - node_state_manager: NodeStateManager, + edge_state_manager: UnifiedStateManager, + node_state_manager: UnifiedStateManager, ) -> None: """ Initialize the skip propagator. diff --git a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py index 5f95b5b29e..3d9783703e 100644 --- a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py +++ b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py @@ -7,7 +7,7 @@ from typing import TYPE_CHECKING, final from ..command_processing import CommandProcessor from ..domain import GraphExecution from ..event_management import EventCollector -from ..state_management import ExecutionTracker, NodeStateManager +from ..state_management import UnifiedStateManager from ..worker_management import WorkerPool if TYPE_CHECKING: @@ -26,8 +26,8 @@ class ExecutionCoordinator: def __init__( self, graph_execution: GraphExecution, - node_state_manager: NodeStateManager, - execution_tracker: ExecutionTracker, + node_state_manager: UnifiedStateManager, + execution_tracker: UnifiedStateManager, event_handler: "EventHandlerRegistry", event_collector: EventCollector, command_processor: CommandProcessor, diff --git a/api/core/workflow/graph_engine/state_management/__init__.py b/api/core/workflow/graph_engine/state_management/__init__.py index 6680696ed2..9a632a3b9f 100644 --- a/api/core/workflow/graph_engine/state_management/__init__.py +++ b/api/core/workflow/graph_engine/state_management/__init__.py @@ -5,12 +5,8 @@ This package manages node states, edge states, and execution tracking during workflow graph execution. """ -from .edge_state_manager import EdgeStateManager -from .execution_tracker import ExecutionTracker -from .node_state_manager import NodeStateManager +from .unified_state_manager import UnifiedStateManager __all__ = [ - "EdgeStateManager", - "ExecutionTracker", - "NodeStateManager", + "UnifiedStateManager", ] diff --git a/api/core/workflow/graph_engine/state_management/edge_state_manager.py b/api/core/workflow/graph_engine/state_management/edge_state_manager.py deleted file mode 100644 index 747062284a..0000000000 --- a/api/core/workflow/graph_engine/state_management/edge_state_manager.py +++ /dev/null @@ -1,114 +0,0 @@ -""" -Manager for edge states during graph execution. -""" - -import threading -from collections.abc import Sequence -from typing import TypedDict, final - -from core.workflow.enums import NodeState -from core.workflow.graph import Edge, Graph - - -class EdgeStateAnalysis(TypedDict): - """Analysis result for edge states.""" - - has_unknown: bool - has_taken: bool - all_skipped: bool - - -@final -class EdgeStateManager: - """ - Manages edge states and transitions during graph execution. - - This handles edge state changes and provides analysis of edge - states for decision making during execution. - """ - - def __init__(self, graph: Graph) -> None: - """ - Initialize the edge state manager. - - Args: - graph: The workflow graph - """ - self.graph = graph - self._lock = threading.RLock() - - def mark_edge_taken(self, edge_id: str) -> None: - """ - Mark an edge as TAKEN. - - Args: - edge_id: The ID of the edge to mark - """ - with self._lock: - self.graph.edges[edge_id].state = NodeState.TAKEN - - def mark_edge_skipped(self, edge_id: str) -> None: - """ - Mark an edge as SKIPPED. - - Args: - edge_id: The ID of the edge to mark - """ - with self._lock: - self.graph.edges[edge_id].state = NodeState.SKIPPED - - def analyze_edge_states(self, edges: list[Edge]) -> EdgeStateAnalysis: - """ - Analyze the states of edges and return summary flags. - - Args: - edges: List of edges to analyze - - Returns: - Analysis result with state flags - """ - with self._lock: - states = {edge.state for edge in edges} - - return EdgeStateAnalysis( - has_unknown=NodeState.UNKNOWN in states, - has_taken=NodeState.TAKEN in states, - all_skipped=states == {NodeState.SKIPPED} if states else True, - ) - - def get_edge_state(self, edge_id: str) -> NodeState: - """ - Get the current state of an edge. - - Args: - edge_id: The ID of the edge - - Returns: - The current edge state - """ - with self._lock: - return self.graph.edges[edge_id].state - - def categorize_branch_edges(self, node_id: str, selected_handle: str) -> tuple[Sequence[Edge], Sequence[Edge]]: - """ - Categorize branch edges into selected and unselected. - - Args: - node_id: The ID of the branch node - selected_handle: The handle of the selected edge - - Returns: - A tuple of (selected_edges, unselected_edges) - """ - with self._lock: - outgoing_edges = self.graph.get_outgoing_edges(node_id) - selected_edges: list[Edge] = [] - unselected_edges: list[Edge] = [] - - for edge in outgoing_edges: - if edge.source_handle == selected_handle: - selected_edges.append(edge) - else: - unselected_edges.append(edge) - - return selected_edges, unselected_edges diff --git a/api/core/workflow/graph_engine/state_management/execution_tracker.py b/api/core/workflow/graph_engine/state_management/execution_tracker.py deleted file mode 100644 index 01fa80f2ce..0000000000 --- a/api/core/workflow/graph_engine/state_management/execution_tracker.py +++ /dev/null @@ -1,89 +0,0 @@ -""" -Tracker for currently executing nodes. -""" - -import threading -from typing import final - - -@final -class ExecutionTracker: - """ - Tracks nodes that are currently being executed. - - This replaces the ExecutingNodesManager with a cleaner interface - focused on tracking which nodes are in progress. - """ - - def __init__(self) -> None: - """Initialize the execution tracker.""" - self._executing_nodes: set[str] = set() - self._lock = threading.RLock() - - def add(self, node_id: str) -> None: - """ - Mark a node as executing. - - Args: - node_id: The ID of the node starting execution - """ - with self._lock: - self._executing_nodes.add(node_id) - - def remove(self, node_id: str) -> None: - """ - Mark a node as no longer executing. - - Args: - node_id: The ID of the node finishing execution - """ - with self._lock: - self._executing_nodes.discard(node_id) - - def is_executing(self, node_id: str) -> bool: - """ - Check if a node is currently executing. - - Args: - node_id: The ID of the node to check - - Returns: - True if the node is executing - """ - with self._lock: - return node_id in self._executing_nodes - - def is_empty(self) -> bool: - """ - Check if no nodes are currently executing. - - Returns: - True if no nodes are executing - """ - with self._lock: - return len(self._executing_nodes) == 0 - - def count(self) -> int: - """ - Get the count of currently executing nodes. - - Returns: - Number of executing nodes - """ - with self._lock: - return len(self._executing_nodes) - - def get_executing_nodes(self) -> set[str]: - """ - Get a copy of the set of executing node IDs. - - Returns: - Set of node IDs currently executing - """ - with self._lock: - return self._executing_nodes.copy() - - def clear(self) -> None: - """Clear all executing nodes.""" - with self._lock: - self._executing_nodes.clear() diff --git a/api/core/workflow/graph_engine/state_management/node_state_manager.py b/api/core/workflow/graph_engine/state_management/node_state_manager.py deleted file mode 100644 index d5ed42ad1d..0000000000 --- a/api/core/workflow/graph_engine/state_management/node_state_manager.py +++ /dev/null @@ -1,97 +0,0 @@ -""" -Manager for node states during graph execution. -""" - -import queue -import threading -from typing import final - -from core.workflow.enums import NodeState -from core.workflow.graph import Graph - - -@final -class NodeStateManager: - """ - Manages node states and the ready queue for execution. - - This centralizes node state transitions and enqueueing logic, - ensuring thread-safe operations on node states. - """ - - def __init__(self, graph: Graph, ready_queue: queue.Queue[str]) -> None: - """ - Initialize the node state manager. - - Args: - graph: The workflow graph - ready_queue: Queue for nodes ready to execute - """ - self.graph = graph - self.ready_queue = ready_queue - self._lock = threading.RLock() - - def enqueue_node(self, node_id: str) -> None: - """ - Mark a node as TAKEN and add it to the ready queue. - - This combines the state transition and enqueueing operations - that always occur together when preparing a node for execution. - - Args: - node_id: The ID of the node to enqueue - """ - with self._lock: - self.graph.nodes[node_id].state = NodeState.TAKEN - self.ready_queue.put(node_id) - - def mark_node_skipped(self, node_id: str) -> None: - """ - Mark a node as SKIPPED. - - Args: - node_id: The ID of the node to skip - """ - with self._lock: - self.graph.nodes[node_id].state = NodeState.SKIPPED - - def is_node_ready(self, node_id: str) -> bool: - """ - Check if a node is ready to be executed. - - A node is ready when all its incoming edges from taken branches - have been satisfied. - - Args: - node_id: The ID of the node to check - - Returns: - True if the node is ready for execution - """ - with self._lock: - # Get all incoming edges to this node - incoming_edges = self.graph.get_incoming_edges(node_id) - - # If no incoming edges, node is always ready - if not incoming_edges: - return True - - # If any edge is UNKNOWN, node is not ready - if any(edge.state == NodeState.UNKNOWN for edge in incoming_edges): - return False - - # Node is ready if at least one edge is TAKEN - return any(edge.state == NodeState.TAKEN for edge in incoming_edges) - - def get_node_state(self, node_id: str) -> NodeState: - """ - Get the current state of a node. - - Args: - node_id: The ID of the node - - Returns: - The current node state - """ - with self._lock: - return self.graph.nodes[node_id].state diff --git a/api/core/workflow/graph_engine/state_management/unified_state_manager.py b/api/core/workflow/graph_engine/state_management/unified_state_manager.py new file mode 100644 index 0000000000..3f50b68213 --- /dev/null +++ b/api/core/workflow/graph_engine/state_management/unified_state_manager.py @@ -0,0 +1,343 @@ +""" +Unified state manager that combines node, edge, and execution tracking. + +This is a proposed simplification that merges NodeStateManager, EdgeStateManager, +and ExecutionTracker into a single cohesive class. +""" + +import queue +import threading +from collections.abc import Sequence +from typing import TypedDict, final + +from core.workflow.enums import NodeState +from core.workflow.graph import Edge, Graph + + +class EdgeStateAnalysis(TypedDict): + """Analysis result for edge states.""" + + has_unknown: bool + has_taken: bool + all_skipped: bool + + +@final +class UnifiedStateManager: + """ + Unified manager for all graph state operations. + + This class combines the responsibilities of: + - NodeStateManager: Node state transitions and ready queue + - EdgeStateManager: Edge state transitions and analysis + - ExecutionTracker: Tracking executing nodes + + Benefits: + - Single lock for all state operations (reduced contention) + - Cohesive state management interface + - Simplified dependency injection + """ + + def __init__(self, graph: Graph, ready_queue: queue.Queue[str]) -> None: + """ + Initialize the unified state manager. + + Args: + graph: The workflow graph + ready_queue: Queue for nodes ready to execute + """ + self.graph = graph + self.ready_queue = ready_queue + self._lock = threading.RLock() + + # Execution tracking state + self._executing_nodes: set[str] = set() + + # ============= Node State Operations ============= + + def enqueue_node(self, node_id: str) -> None: + """ + Mark a node as TAKEN and add it to the ready queue. + + This combines the state transition and enqueueing operations + that always occur together when preparing a node for execution. + + Args: + node_id: The ID of the node to enqueue + """ + with self._lock: + self.graph.nodes[node_id].state = NodeState.TAKEN + self.ready_queue.put(node_id) + + def mark_node_skipped(self, node_id: str) -> None: + """ + Mark a node as SKIPPED. + + Args: + node_id: The ID of the node to skip + """ + with self._lock: + self.graph.nodes[node_id].state = NodeState.SKIPPED + + def is_node_ready(self, node_id: str) -> bool: + """ + Check if a node is ready to be executed. + + A node is ready when all its incoming edges from taken branches + have been satisfied. + + Args: + node_id: The ID of the node to check + + Returns: + True if the node is ready for execution + """ + with self._lock: + # Get all incoming edges to this node + incoming_edges = self.graph.get_incoming_edges(node_id) + + # If no incoming edges, node is always ready + if not incoming_edges: + return True + + # If any edge is UNKNOWN, node is not ready + if any(edge.state == NodeState.UNKNOWN for edge in incoming_edges): + return False + + # Node is ready if at least one edge is TAKEN + return any(edge.state == NodeState.TAKEN for edge in incoming_edges) + + def get_node_state(self, node_id: str) -> NodeState: + """ + Get the current state of a node. + + Args: + node_id: The ID of the node + + Returns: + The current node state + """ + with self._lock: + return self.graph.nodes[node_id].state + + # ============= Edge State Operations ============= + + def mark_edge_taken(self, edge_id: str) -> None: + """ + Mark an edge as TAKEN. + + Args: + edge_id: The ID of the edge to mark + """ + with self._lock: + self.graph.edges[edge_id].state = NodeState.TAKEN + + def mark_edge_skipped(self, edge_id: str) -> None: + """ + Mark an edge as SKIPPED. + + Args: + edge_id: The ID of the edge to mark + """ + with self._lock: + self.graph.edges[edge_id].state = NodeState.SKIPPED + + def analyze_edge_states(self, edges: list[Edge]) -> EdgeStateAnalysis: + """ + Analyze the states of edges and return summary flags. + + Args: + edges: List of edges to analyze + + Returns: + Analysis result with state flags + """ + with self._lock: + states = {edge.state for edge in edges} + + return EdgeStateAnalysis( + has_unknown=NodeState.UNKNOWN in states, + has_taken=NodeState.TAKEN in states, + all_skipped=states == {NodeState.SKIPPED} if states else True, + ) + + def get_edge_state(self, edge_id: str) -> NodeState: + """ + Get the current state of an edge. + + Args: + edge_id: The ID of the edge + + Returns: + The current edge state + """ + with self._lock: + return self.graph.edges[edge_id].state + + def categorize_branch_edges(self, node_id: str, selected_handle: str) -> tuple[Sequence[Edge], Sequence[Edge]]: + """ + Categorize branch edges into selected and unselected. + + Args: + node_id: The ID of the branch node + selected_handle: The handle of the selected edge + + Returns: + A tuple of (selected_edges, unselected_edges) + """ + with self._lock: + outgoing_edges = self.graph.get_outgoing_edges(node_id) + selected_edges: list[Edge] = [] + unselected_edges: list[Edge] = [] + + for edge in outgoing_edges: + if edge.source_handle == selected_handle: + selected_edges.append(edge) + else: + unselected_edges.append(edge) + + return selected_edges, unselected_edges + + # ============= Execution Tracking Operations ============= + + def start_execution(self, node_id: str) -> None: + """ + Mark a node as executing. + + Args: + node_id: The ID of the node starting execution + """ + with self._lock: + self._executing_nodes.add(node_id) + + def finish_execution(self, node_id: str) -> None: + """ + Mark a node as no longer executing. + + Args: + node_id: The ID of the node finishing execution + """ + with self._lock: + self._executing_nodes.discard(node_id) + + def is_executing(self, node_id: str) -> bool: + """ + Check if a node is currently executing. + + Args: + node_id: The ID of the node to check + + Returns: + True if the node is executing + """ + with self._lock: + return node_id in self._executing_nodes + + def get_executing_count(self) -> int: + """ + Get the count of currently executing nodes. + + Returns: + Number of executing nodes + """ + with self._lock: + return len(self._executing_nodes) + + def get_executing_nodes(self) -> set[str]: + """ + Get a copy of the set of executing node IDs. + + Returns: + Set of node IDs currently executing + """ + with self._lock: + return self._executing_nodes.copy() + + def clear_executing(self) -> None: + """Clear all executing nodes.""" + with self._lock: + self._executing_nodes.clear() + + # ============= Composite Operations ============= + + def is_execution_complete(self) -> bool: + """ + Check if graph execution is complete. + + Execution is complete when: + - Ready queue is empty + - No nodes are executing + + Returns: + True if execution is complete + """ + with self._lock: + return self.ready_queue.empty() and len(self._executing_nodes) == 0 + + def get_queue_depth(self) -> int: + """ + Get the current depth of the ready queue. + + Returns: + Number of nodes in the ready queue + """ + return self.ready_queue.qsize() + + def get_execution_stats(self) -> dict[str, int]: + """ + Get execution statistics. + + Returns: + Dictionary with execution statistics + """ + with self._lock: + taken_nodes = sum(1 for node in self.graph.nodes.values() if node.state == NodeState.TAKEN) + skipped_nodes = sum(1 for node in self.graph.nodes.values() if node.state == NodeState.SKIPPED) + unknown_nodes = sum(1 for node in self.graph.nodes.values() if node.state == NodeState.UNKNOWN) + + return { + "queue_depth": self.ready_queue.qsize(), + "executing": len(self._executing_nodes), + "taken_nodes": taken_nodes, + "skipped_nodes": skipped_nodes, + "unknown_nodes": unknown_nodes, + } + + # ============= Backward Compatibility Methods ============= + # These methods provide compatibility with existing code + + @property + def execution_tracker(self) -> "UnifiedStateManager": + """Compatibility property for ExecutionTracker access.""" + return self + + @property + def node_state_manager(self) -> "UnifiedStateManager": + """Compatibility property for NodeStateManager access.""" + return self + + @property + def edge_state_manager(self) -> "UnifiedStateManager": + """Compatibility property for EdgeStateManager access.""" + return self + + # ExecutionTracker compatibility methods + def add(self, node_id: str) -> None: + """Compatibility method for ExecutionTracker.add().""" + self.start_execution(node_id) + + def remove(self, node_id: str) -> None: + """Compatibility method for ExecutionTracker.remove().""" + self.finish_execution(node_id) + + def is_empty(self) -> bool: + """Compatibility method for ExecutionTracker.is_empty().""" + return len(self._executing_nodes) == 0 + + def count(self) -> int: + """Compatibility method for ExecutionTracker.count().""" + return self.get_executing_count() + + def clear(self) -> None: + """Compatibility method for ExecutionTracker.clear().""" + self.clear_executing() diff --git a/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py b/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py new file mode 100644 index 0000000000..015fb79e4f --- /dev/null +++ b/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py @@ -0,0 +1,360 @@ +""" +Enhanced worker pool with integrated activity tracking and dynamic scaling. + +This is a proposed simplification that merges WorkerPool, ActivityTracker, +and DynamicScaler into a single cohesive class. +""" + +import queue +import threading +import time +from typing import TYPE_CHECKING, final + +from configs import dify_config +from core.workflow.graph import Graph +from core.workflow.graph_events import GraphNodeEventBase + +from ..worker import Worker + +if TYPE_CHECKING: + from contextvars import Context + + from flask import Flask + + +@final +class EnhancedWorkerPool: + """ + Enhanced worker pool with integrated features. + + This class combines the responsibilities of: + - WorkerPool: Managing worker threads + - ActivityTracker: Tracking worker activity + - DynamicScaler: Making scaling decisions + + Benefits: + - Simplified interface with fewer classes + - Direct integration of related features + - Reduced inter-class communication overhead + """ + + def __init__( + self, + ready_queue: queue.Queue[str], + event_queue: queue.Queue[GraphNodeEventBase], + graph: Graph, + flask_app: "Flask | None" = None, + context_vars: "Context | None" = None, + min_workers: int | None = None, + max_workers: int | None = None, + scale_up_threshold: int | None = None, + scale_down_idle_time: float | None = None, + ) -> None: + """ + Initialize the enhanced worker pool. + + Args: + ready_queue: Queue of nodes ready for execution + event_queue: Queue for worker events + graph: The workflow graph + flask_app: Optional Flask app for context preservation + context_vars: Optional context variables + min_workers: Minimum number of workers + max_workers: Maximum number of workers + scale_up_threshold: Queue depth to trigger scale up + scale_down_idle_time: Seconds before scaling down idle workers + """ + self.ready_queue = ready_queue + self.event_queue = event_queue + self.graph = graph + self.flask_app = flask_app + self.context_vars = context_vars + + # Scaling parameters + self.min_workers = min_workers or dify_config.GRAPH_ENGINE_MIN_WORKERS + self.max_workers = max_workers or dify_config.GRAPH_ENGINE_MAX_WORKERS + self.scale_up_threshold = scale_up_threshold or dify_config.GRAPH_ENGINE_SCALE_UP_THRESHOLD + self.scale_down_idle_time = scale_down_idle_time or dify_config.GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME + + # Worker management + self.workers: list[Worker] = [] + self._worker_counter = 0 + self._lock = threading.RLock() + self._running = False + + # Activity tracking (integrated) + self._worker_activity: dict[int, tuple[bool, float]] = {} + + # Scaling control + self._last_scale_check = time.time() + self._scale_check_interval = 1.0 # Check scaling every second + + def start(self, initial_count: int | None = None) -> None: + """ + Start the worker pool with initial workers. + + Args: + initial_count: Number of workers to start with (auto-calculated if None) + """ + with self._lock: + if self._running: + return + + self._running = True + + # Calculate initial worker count if not specified + if initial_count is None: + initial_count = self._calculate_initial_workers() + + # Create initial workers + for _ in range(initial_count): + self._add_worker() + + def stop(self) -> None: + """Stop all workers in the pool.""" + with self._lock: + self._running = False + + # Stop all workers + for worker in self.workers: + worker.stop() + + # Wait for workers to finish + for worker in self.workers: + if worker.is_alive(): + worker.join(timeout=10.0) + + self.workers.clear() + self._worker_activity.clear() + + def check_and_scale(self) -> None: + """ + Check and perform scaling if needed. + + This method should be called periodically to adjust pool size. + """ + current_time = time.time() + + # Rate limit scaling checks + if current_time - self._last_scale_check < self._scale_check_interval: + return + + self._last_scale_check = current_time + + with self._lock: + if not self._running: + return + + current_count = len(self.workers) + queue_depth = self.ready_queue.qsize() + + # Check for scale up + if self._should_scale_up(current_count, queue_depth): + self._add_worker() + + # Check for scale down + idle_workers = self._get_idle_workers(current_time) + if idle_workers and self._should_scale_down(current_count): + # Remove the most idle worker + self._remove_worker(idle_workers[0]) + + # ============= Private Methods ============= + + def _calculate_initial_workers(self) -> int: + """ + Calculate initial number of workers based on graph complexity. + + Returns: + Initial worker count + """ + # Simple heuristic: start with min_workers, scale based on graph size + node_count = len(self.graph.nodes) + + if node_count < 10: + return self.min_workers + elif node_count < 50: + return min(self.min_workers + 1, self.max_workers) + else: + return min(self.min_workers + 2, self.max_workers) + + def _should_scale_up(self, current_count: int, queue_depth: int) -> bool: + """ + Determine if pool should scale up. + + Args: + current_count: Current number of workers + queue_depth: Current queue depth + + Returns: + True if should scale up + """ + if current_count >= self.max_workers: + return False + + # Scale up if queue is deep + if queue_depth > self.scale_up_threshold: + return True + + # Scale up if all workers are busy and queue is not empty + active_count = self._get_active_count() + if active_count == current_count and queue_depth > 0: + return True + + return False + + def _should_scale_down(self, current_count: int) -> bool: + """ + Determine if pool should scale down. + + Args: + current_count: Current number of workers + + Returns: + True if should scale down + """ + return current_count > self.min_workers + + def _add_worker(self) -> None: + """Add a new worker to the pool.""" + worker_id = self._worker_counter + self._worker_counter += 1 + + # Create worker with activity callbacks + worker = Worker( + ready_queue=self.ready_queue, + event_queue=self.event_queue, + graph=self.graph, + worker_id=worker_id, + flask_app=self.flask_app, + context_vars=self.context_vars, + on_idle_callback=self._on_worker_idle, + on_active_callback=self._on_worker_active, + ) + + worker.start() + self.workers.append(worker) + self._worker_activity[worker_id] = (False, time.time()) + + def _remove_worker(self, worker_id: int) -> None: + """ + Remove a specific worker from the pool. + + Args: + worker_id: ID of worker to remove + """ + worker_to_remove = None + for worker in self.workers: + if worker.worker_id == worker_id: + worker_to_remove = worker + break + + if worker_to_remove: + worker_to_remove.stop() + self.workers.remove(worker_to_remove) + self._worker_activity.pop(worker_id, None) + + if worker_to_remove.is_alive(): + worker_to_remove.join(timeout=1.0) + + def _on_worker_idle(self, worker_id: int) -> None: + """ + Callback when worker becomes idle. + + Args: + worker_id: ID of the idle worker + """ + with self._lock: + self._worker_activity[worker_id] = (False, time.time()) + + def _on_worker_active(self, worker_id: int) -> None: + """ + Callback when worker becomes active. + + Args: + worker_id: ID of the active worker + """ + with self._lock: + self._worker_activity[worker_id] = (True, time.time()) + + def _get_idle_workers(self, current_time: float) -> list[int]: + """ + Get list of workers that have been idle too long. + + Args: + current_time: Current timestamp + + Returns: + List of idle worker IDs sorted by idle time (longest first) + """ + idle_workers: list[tuple[int, float]] = [] + + for worker_id, (is_active, last_change) in self._worker_activity.items(): + if not is_active: + idle_time = current_time - last_change + if idle_time > self.scale_down_idle_time: + idle_workers.append((worker_id, idle_time)) + + # Sort by idle time (longest first) + idle_workers.sort(key=lambda x: x[1], reverse=True) + return [worker_id for worker_id, _ in idle_workers] + + def _get_active_count(self) -> int: + """ + Get count of currently active workers. + + Returns: + Number of active workers + """ + return sum(1 for is_active, _ in self._worker_activity.values() if is_active) + + # ============= Public Status Methods ============= + + def get_worker_count(self) -> int: + """Get current number of workers.""" + with self._lock: + return len(self.workers) + + def get_status(self) -> dict[str, int]: + """ + Get pool status information. + + Returns: + Dictionary with status information + """ + with self._lock: + return { + "total_workers": len(self.workers), + "active_workers": self._get_active_count(), + "idle_workers": len(self.workers) - self._get_active_count(), + "queue_depth": self.ready_queue.qsize(), + "min_workers": self.min_workers, + "max_workers": self.max_workers, + } + + # ============= Backward Compatibility ============= + + def scale_up(self) -> None: + """Compatibility method for manual scale up.""" + with self._lock: + if self._running and len(self.workers) < self.max_workers: + self._add_worker() + + def scale_down(self, worker_ids: list[int]) -> None: + """Compatibility method for manual scale down.""" + with self._lock: + if not self._running: + return + + for worker_id in worker_ids: + if len(self.workers) > self.min_workers: + self._remove_worker(worker_id) + + def check_scaling(self, queue_depth: int, executing_count: int) -> None: + """ + Compatibility method for checking scaling. + + Args: + queue_depth: Current queue depth (ignored, we check directly) + executing_count: Number of executing nodes (ignored) + """ + self.check_and_scale() From 202fdfcb816faa30dda9e16ac9e3ae1880685f27 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 02:41:16 +0800 Subject: [PATCH 19/96] refactor(graph_engine): Remove backward compatibility code Signed-off-by: -LAN- --- .../event_management/event_handlers.py | 17 ++++---- .../workflow/graph_engine/graph_engine.py | 16 +++----- .../graph_traversal/branch_handler.py | 8 ++-- .../graph_traversal/edge_processor.py | 17 ++++---- .../graph_traversal/skip_propagator.py | 19 ++++----- .../orchestration/execution_coordinator.py | 15 +++---- .../state_management/unified_state_manager.py | 39 ------------------- .../worker_management/enhanced_worker_pool.py | 28 ------------- 8 files changed, 38 insertions(+), 121 deletions(-) diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py index bdd1c4d245..3ec8e8b028 100644 --- a/api/core/workflow/graph_engine/event_management/event_handlers.py +++ b/api/core/workflow/graph_engine/event_management/event_handlers.py @@ -56,8 +56,7 @@ class EventHandlerRegistry: event_collector: "EventCollector", branch_handler: "BranchHandler", edge_processor: "EdgeProcessor", - node_state_manager: "UnifiedStateManager", - execution_tracker: "UnifiedStateManager", + state_manager: "UnifiedStateManager", error_handler: "ErrorHandler", ) -> None: """ @@ -71,8 +70,7 @@ class EventHandlerRegistry: event_collector: Event collector for collecting events branch_handler: Branch handler for branch node processing edge_processor: Edge processor for edge traversal - node_state_manager: Node state manager - execution_tracker: Execution tracker + state_manager: Unified state manager error_handler: Error handler """ self._graph = graph @@ -82,8 +80,7 @@ class EventHandlerRegistry: self._event_collector = event_collector self._branch_handler = branch_handler self._edge_processor = edge_processor - self._node_state_manager = node_state_manager - self._execution_tracker = execution_tracker + self._state_manager = state_manager self._error_handler = error_handler def handle_event(self, event: GraphNodeEventBase) -> None: @@ -199,11 +196,11 @@ class EventHandlerRegistry: # Enqueue ready nodes for node_id in ready_nodes: - self._node_state_manager.enqueue_node(node_id) - self._execution_tracker.add(node_id) + self._state_manager.enqueue_node(node_id) + self._state_manager.start_execution(node_id) # Update execution tracking - self._execution_tracker.remove(event.node_id) + self._state_manager.finish_execution(event.node_id) # Handle response node outputs if node.execution_type == NodeExecutionType.RESPONSE: @@ -232,7 +229,7 @@ class EventHandlerRegistry: # Abort execution self._graph_execution.fail(RuntimeError(event.error)) self._event_collector.collect(event) - self._execution_tracker.remove(event.node_id) + self._state_manager.finish_execution(event.node_id) def _handle_node_exception(self, event: NodeRunExceptionEvent) -> None: """ diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 7398b846d8..f8f609b042 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -137,20 +137,18 @@ class GraphEngine: self.node_readiness_checker = NodeReadinessChecker(self.graph) self.edge_processor = EdgeProcessor( graph=self.graph, - edge_state_manager=self.state_manager, - node_state_manager=self.state_manager, + state_manager=self.state_manager, response_coordinator=self.response_coordinator, ) self.skip_propagator = SkipPropagator( graph=self.graph, - edge_state_manager=self.state_manager, - node_state_manager=self.state_manager, + state_manager=self.state_manager, ) self.branch_handler = BranchHandler( graph=self.graph, edge_processor=self.edge_processor, skip_propagator=self.skip_propagator, - edge_state_manager=self.state_manager, + state_manager=self.state_manager, ) # Event handler registry with all dependencies @@ -162,8 +160,7 @@ class GraphEngine: event_collector=self.event_collector, branch_handler=self.branch_handler, edge_processor=self.edge_processor, - node_state_manager=self.state_manager, - execution_tracker=self.state_manager, + state_manager=self.state_manager, error_handler=self.error_handler, ) @@ -180,8 +177,7 @@ class GraphEngine: # Orchestration self.execution_coordinator = ExecutionCoordinator( graph_execution=self.graph_execution, - node_state_manager=self.state_manager, - execution_tracker=self.state_manager, + state_manager=self.state_manager, event_handler=self.event_handler_registry, event_collector=self.event_collector, command_processor=self.command_processor, @@ -334,7 +330,7 @@ class GraphEngine: # Enqueue root node root_node = self.graph.root_node self.state_manager.enqueue_node(root_node.id) - self.state_manager.add(root_node.id) + self.state_manager.start_execution(root_node.id) # Start dispatcher self.dispatcher.start() diff --git a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py index 8e08a03e3c..cf4d9db5f8 100644 --- a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py +++ b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py @@ -27,7 +27,7 @@ class BranchHandler: graph: Graph, edge_processor: EdgeProcessor, skip_propagator: SkipPropagator, - edge_state_manager: UnifiedStateManager, + state_manager: UnifiedStateManager, ) -> None: """ Initialize the branch handler. @@ -36,12 +36,12 @@ class BranchHandler: graph: The workflow graph edge_processor: Processor for edges skip_propagator: Propagator for skip states - edge_state_manager: Manager for edge states + state_manager: Unified state manager """ self.graph = graph self.edge_processor = edge_processor self.skip_propagator = skip_propagator - self.edge_state_manager = edge_state_manager + self.state_manager = state_manager def handle_branch_completion( self, node_id: str, selected_handle: str | None @@ -63,7 +63,7 @@ class BranchHandler: raise ValueError(f"Branch node {node_id} completed without selecting a branch") # Categorize edges into selected and unselected - _, unselected_edges = self.edge_state_manager.categorize_branch_edges(node_id, selected_handle) + _, unselected_edges = self.state_manager.categorize_branch_edges(node_id, selected_handle) # Skip all unselected paths self.skip_propagator.skip_branch_paths(unselected_edges) diff --git a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py index 6efb56f046..369257aa45 100644 --- a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py +++ b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py @@ -25,8 +25,7 @@ class EdgeProcessor: def __init__( self, graph: Graph, - edge_state_manager: UnifiedStateManager, - node_state_manager: UnifiedStateManager, + state_manager: UnifiedStateManager, response_coordinator: ResponseStreamCoordinator, ) -> None: """ @@ -34,13 +33,11 @@ class EdgeProcessor: Args: graph: The workflow graph - edge_state_manager: Manager for edge states - node_state_manager: Manager for node states + state_manager: Unified state manager response_coordinator: Response stream coordinator """ self.graph = graph - self.edge_state_manager = edge_state_manager - self.node_state_manager = node_state_manager + self.state_manager = state_manager self.response_coordinator = response_coordinator def process_node_success( @@ -107,7 +104,7 @@ class EdgeProcessor: all_streaming_events: list[NodeRunStreamChunkEvent] = [] # Categorize edges - selected_edges, unselected_edges = self.edge_state_manager.categorize_branch_edges(node_id, selected_handle) + selected_edges, unselected_edges = self.state_manager.categorize_branch_edges(node_id, selected_handle) # Process unselected edges first (mark as skipped) for edge in unselected_edges: @@ -132,14 +129,14 @@ class EdgeProcessor: Tuple of (list containing downstream node ID if it's ready, list of streaming events) """ # Mark edge as taken - self.edge_state_manager.mark_edge_taken(edge.id) + self.state_manager.mark_edge_taken(edge.id) # Notify response coordinator and get streaming events streaming_events = self.response_coordinator.on_edge_taken(edge.id) # Check if downstream node is ready ready_nodes: list[str] = [] - if self.node_state_manager.is_node_ready(edge.head): + if self.state_manager.is_node_ready(edge.head): ready_nodes.append(edge.head) return ready_nodes, streaming_events @@ -151,4 +148,4 @@ class EdgeProcessor: Args: edge: The edge to skip """ - self.edge_state_manager.mark_edge_skipped(edge.id) + self.state_manager.mark_edge_skipped(edge.id) diff --git a/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py b/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py index 01426809eb..17f30a6a38 100644 --- a/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py +++ b/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py @@ -22,20 +22,17 @@ class SkipPropagator: def __init__( self, graph: Graph, - edge_state_manager: UnifiedStateManager, - node_state_manager: UnifiedStateManager, + state_manager: UnifiedStateManager, ) -> None: """ Initialize the skip propagator. Args: graph: The workflow graph - edge_state_manager: Manager for edge states - node_state_manager: Manager for node states + state_manager: Unified state manager """ self.graph = graph - self.edge_state_manager = edge_state_manager - self.node_state_manager = node_state_manager + self.state_manager = state_manager def propagate_skip_from_edge(self, edge_id: str) -> None: """ @@ -53,7 +50,7 @@ class SkipPropagator: incoming_edges = self.graph.get_incoming_edges(downstream_node_id) # Analyze edge states - edge_states = self.edge_state_manager.analyze_edge_states(incoming_edges) + edge_states = self.state_manager.analyze_edge_states(incoming_edges) # Stop if there are unknown edges (not yet processed) if edge_states["has_unknown"]: @@ -62,7 +59,7 @@ class SkipPropagator: # If any edge is taken, node may still execute if edge_states["has_taken"]: # Enqueue node - self.node_state_manager.enqueue_node(downstream_node_id) + self.state_manager.enqueue_node(downstream_node_id) return # All edges are skipped, propagate skip to this node @@ -77,12 +74,12 @@ class SkipPropagator: node_id: The ID of the node to skip """ # Mark node as skipped - self.node_state_manager.mark_node_skipped(node_id) + self.state_manager.mark_node_skipped(node_id) # Mark all outgoing edges as skipped and propagate outgoing_edges = self.graph.get_outgoing_edges(node_id) for edge in outgoing_edges: - self.edge_state_manager.mark_edge_skipped(edge.id) + self.state_manager.mark_edge_skipped(edge.id) # Recursively propagate skip self.propagate_skip_from_edge(edge.id) @@ -94,5 +91,5 @@ class SkipPropagator: unselected_edges: List of edges not taken by the branch """ for edge in unselected_edges: - self.edge_state_manager.mark_edge_skipped(edge.id) + self.state_manager.mark_edge_skipped(edge.id) self.propagate_skip_from_edge(edge.id) diff --git a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py index 3d9783703e..49689813a1 100644 --- a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py +++ b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py @@ -26,8 +26,7 @@ class ExecutionCoordinator: def __init__( self, graph_execution: GraphExecution, - node_state_manager: UnifiedStateManager, - execution_tracker: UnifiedStateManager, + state_manager: UnifiedStateManager, event_handler: "EventHandlerRegistry", event_collector: EventCollector, command_processor: CommandProcessor, @@ -38,16 +37,14 @@ class ExecutionCoordinator: Args: graph_execution: Graph execution aggregate - node_state_manager: Manager for node states - execution_tracker: Tracker for executing nodes + state_manager: Unified state manager event_handler: Event handler registry for processing events event_collector: Event collector for collecting events command_processor: Processor for commands worker_pool: Pool of workers """ self.graph_execution = graph_execution - self.node_state_manager = node_state_manager - self.execution_tracker = execution_tracker + self.state_manager = state_manager self.event_handler = event_handler self.event_collector = event_collector self.command_processor = command_processor @@ -59,8 +56,8 @@ class ExecutionCoordinator: def check_scaling(self) -> None: """Check and perform worker scaling if needed.""" - queue_depth = self.node_state_manager.ready_queue.qsize() - executing_count = self.execution_tracker.count() + queue_depth = self.state_manager.ready_queue.qsize() + executing_count = self.state_manager.get_executing_count() self.worker_pool.check_scaling(queue_depth, executing_count) def is_execution_complete(self) -> bool: @@ -75,7 +72,7 @@ class ExecutionCoordinator: return True # Complete if no work remains - return self.node_state_manager.ready_queue.empty() and self.execution_tracker.is_empty() + return self.state_manager.is_execution_complete() def mark_complete(self) -> None: """Mark execution as complete.""" diff --git a/api/core/workflow/graph_engine/state_management/unified_state_manager.py b/api/core/workflow/graph_engine/state_management/unified_state_manager.py index 3f50b68213..0d4e5a4d43 100644 --- a/api/core/workflow/graph_engine/state_management/unified_state_manager.py +++ b/api/core/workflow/graph_engine/state_management/unified_state_manager.py @@ -302,42 +302,3 @@ class UnifiedStateManager: "skipped_nodes": skipped_nodes, "unknown_nodes": unknown_nodes, } - - # ============= Backward Compatibility Methods ============= - # These methods provide compatibility with existing code - - @property - def execution_tracker(self) -> "UnifiedStateManager": - """Compatibility property for ExecutionTracker access.""" - return self - - @property - def node_state_manager(self) -> "UnifiedStateManager": - """Compatibility property for NodeStateManager access.""" - return self - - @property - def edge_state_manager(self) -> "UnifiedStateManager": - """Compatibility property for EdgeStateManager access.""" - return self - - # ExecutionTracker compatibility methods - def add(self, node_id: str) -> None: - """Compatibility method for ExecutionTracker.add().""" - self.start_execution(node_id) - - def remove(self, node_id: str) -> None: - """Compatibility method for ExecutionTracker.remove().""" - self.finish_execution(node_id) - - def is_empty(self) -> bool: - """Compatibility method for ExecutionTracker.is_empty().""" - return len(self._executing_nodes) == 0 - - def count(self) -> int: - """Compatibility method for ExecutionTracker.count().""" - return self.get_executing_count() - - def clear(self) -> None: - """Compatibility method for ExecutionTracker.clear().""" - self.clear_executing() diff --git a/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py b/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py index 015fb79e4f..d45d9be9b8 100644 --- a/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py @@ -330,31 +330,3 @@ class EnhancedWorkerPool: "min_workers": self.min_workers, "max_workers": self.max_workers, } - - # ============= Backward Compatibility ============= - - def scale_up(self) -> None: - """Compatibility method for manual scale up.""" - with self._lock: - if self._running and len(self.workers) < self.max_workers: - self._add_worker() - - def scale_down(self, worker_ids: list[int]) -> None: - """Compatibility method for manual scale down.""" - with self._lock: - if not self._running: - return - - for worker_id in worker_ids: - if len(self.workers) > self.min_workers: - self._remove_worker(worker_id) - - def check_scaling(self, queue_depth: int, executing_count: int) -> None: - """ - Compatibility method for checking scaling. - - Args: - queue_depth: Current queue depth (ignored, we check directly) - executing_count: Number of executing nodes (ignored) - """ - self.check_and_scale() From 64c1234724e4498eb24c61f6edb6eeb921c7bb1b Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 03:23:47 +0800 Subject: [PATCH 20/96] refactor(graph_engine): Merge worker management into one WorkerPool Signed-off-by: -LAN- --- api/.importlinter | 17 +- .../workflow/graph_engine/graph_engine.py | 40 +-- .../orchestration/execution_coordinator.py | 8 +- .../worker_management/__init__.py | 10 +- .../worker_management/activity_tracker.py | 76 ---- .../worker_management/dynamic_scaler.py | 101 ------ .../worker_management/enhanced_worker_pool.py | 332 ------------------ .../worker_management/simple_worker_pool.py | 168 +++++++++ .../worker_management/worker_factory.py | 76 ---- .../worker_management/worker_pool.py | 148 -------- 10 files changed, 192 insertions(+), 784 deletions(-) delete mode 100644 api/core/workflow/graph_engine/worker_management/activity_tracker.py delete mode 100644 api/core/workflow/graph_engine/worker_management/dynamic_scaler.py delete mode 100644 api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py create mode 100644 api/core/workflow/graph_engine/worker_management/simple_worker_pool.py delete mode 100644 api/core/workflow/graph_engine/worker_management/worker_factory.py delete mode 100644 api/core/workflow/graph_engine/worker_management/worker_pool.py diff --git a/api/.importlinter b/api/.importlinter index 9205b7c94d..6e15f06a5c 100644 --- a/api/.importlinter +++ b/api/.importlinter @@ -77,16 +77,15 @@ forbidden_modules = core.workflow.graph_engine.layers core.workflow.graph_engine.protocols -[importlinter:contract:worker-management-layers] -name = Worker Management Layers -type = layers -layers = - worker_pool - worker_factory - dynamic_scaler - activity_tracker -containers = +[importlinter:contract:worker-management] +name = Worker Management +type = forbidden +source_modules = core.workflow.graph_engine.worker_management +forbidden_modules = + core.workflow.graph_engine.orchestration + core.workflow.graph_engine.command_processing + core.workflow.graph_engine.event_management [importlinter:contract:error-handling-strategies] name = Error Handling Strategies diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index f8f609b042..b627ccc634 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -13,7 +13,6 @@ from typing import final from flask import Flask, current_app -from configs import dify_config from core.app.entities.app_invoke_entities import InvokeFrom from core.workflow.entities import GraphRuntimeState from core.workflow.enums import NodeExecutionType @@ -40,7 +39,7 @@ from .output_registry import OutputRegistry from .protocols.command_channel import CommandChannel from .response_coordinator import ResponseStreamCoordinator from .state_management import UnifiedStateManager -from .worker_management import ActivityTracker, DynamicScaler, WorkerFactory, WorkerPool +from .worker_management import SimpleWorkerPool logger = logging.getLogger(__name__) @@ -215,31 +214,17 @@ class GraphEngine: context_vars = contextvars.copy_context() - # Create worker management components - self._activity_tracker = ActivityTracker() - self._dynamic_scaler = DynamicScaler( - min_workers=(self._min_workers if self._min_workers is not None else dify_config.GRAPH_ENGINE_MIN_WORKERS), - max_workers=(self._max_workers if self._max_workers is not None else dify_config.GRAPH_ENGINE_MAX_WORKERS), - scale_up_threshold=( - self._scale_up_threshold - if self._scale_up_threshold is not None - else dify_config.GRAPH_ENGINE_SCALE_UP_THRESHOLD - ), - scale_down_idle_time=( - self._scale_down_idle_time - if self._scale_down_idle_time is not None - else dify_config.GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME - ), - ) - self._worker_factory = WorkerFactory(flask_app, context_vars) - - self._worker_pool = WorkerPool( + # Create simple worker pool + self._worker_pool = SimpleWorkerPool( ready_queue=self.ready_queue, event_queue=self.event_queue, graph=self.graph, - worker_factory=self._worker_factory, - dynamic_scaler=self._dynamic_scaler, - activity_tracker=self._activity_tracker, + flask_app=flask_app, + context_vars=context_vars, + min_workers=self._min_workers, + max_workers=self._max_workers, + scale_up_threshold=self._scale_up_threshold, + scale_down_idle_time=self._scale_down_idle_time, ) def _validate_graph_state_consistency(self) -> None: @@ -316,11 +301,8 @@ class GraphEngine: def _start_execution(self) -> None: """Start execution subsystems.""" - # Calculate initial worker count - initial_workers = self._dynamic_scaler.calculate_initial_workers(self.graph) - - # Start worker pool - self._worker_pool.start(initial_workers) + # Start worker pool (it calculates initial workers internally) + self._worker_pool.start() # Register response nodes for node in self.graph.nodes.values(): diff --git a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py index 49689813a1..95902f1846 100644 --- a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py +++ b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py @@ -8,7 +8,7 @@ from ..command_processing import CommandProcessor from ..domain import GraphExecution from ..event_management import EventCollector from ..state_management import UnifiedStateManager -from ..worker_management import WorkerPool +from ..worker_management import SimpleWorkerPool if TYPE_CHECKING: from ..event_management import EventHandlerRegistry @@ -30,7 +30,7 @@ class ExecutionCoordinator: event_handler: "EventHandlerRegistry", event_collector: EventCollector, command_processor: CommandProcessor, - worker_pool: WorkerPool, + worker_pool: SimpleWorkerPool, ) -> None: """ Initialize the execution coordinator. @@ -56,9 +56,7 @@ class ExecutionCoordinator: def check_scaling(self) -> None: """Check and perform worker scaling if needed.""" - queue_depth = self.state_manager.ready_queue.qsize() - executing_count = self.state_manager.get_executing_count() - self.worker_pool.check_scaling(queue_depth, executing_count) + self.worker_pool.check_and_scale() def is_execution_complete(self) -> bool: """ diff --git a/api/core/workflow/graph_engine/worker_management/__init__.py b/api/core/workflow/graph_engine/worker_management/__init__.py index 1737f32151..5b25dbc79a 100644 --- a/api/core/workflow/graph_engine/worker_management/__init__.py +++ b/api/core/workflow/graph_engine/worker_management/__init__.py @@ -5,14 +5,8 @@ This package manages the worker pool, including creation, scaling, and activity tracking. """ -from .activity_tracker import ActivityTracker -from .dynamic_scaler import DynamicScaler -from .worker_factory import WorkerFactory -from .worker_pool import WorkerPool +from .simple_worker_pool import SimpleWorkerPool __all__ = [ - "ActivityTracker", - "DynamicScaler", - "WorkerFactory", - "WorkerPool", + "SimpleWorkerPool", ] diff --git a/api/core/workflow/graph_engine/worker_management/activity_tracker.py b/api/core/workflow/graph_engine/worker_management/activity_tracker.py deleted file mode 100644 index 19c4ddaeb5..0000000000 --- a/api/core/workflow/graph_engine/worker_management/activity_tracker.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Activity tracker for monitoring worker activity. -""" - -import threading -import time -from typing import final - - -@final -class ActivityTracker: - """ - Tracks worker activity for scaling decisions. - - This monitors which workers are active or idle to support - dynamic scaling decisions. - """ - - def __init__(self, idle_threshold: float = 30.0) -> None: - """ - Initialize the activity tracker. - - Args: - idle_threshold: Seconds before a worker is considered idle - """ - self.idle_threshold = idle_threshold - self._worker_activity: dict[int, tuple[bool, float]] = {} - self._lock = threading.RLock() - - def track_activity(self, worker_id: int, is_active: bool) -> None: - """ - Track worker activity state. - - Args: - worker_id: ID of the worker - is_active: Whether the worker is active - """ - with self._lock: - self._worker_activity[worker_id] = (is_active, time.time()) - - def get_idle_workers(self) -> list[int]: - """ - Get list of workers that have been idle too long. - - Returns: - List of idle worker IDs - """ - current_time = time.time() - idle_workers: list[int] = [] - - with self._lock: - for worker_id, (is_active, last_change) in self._worker_activity.items(): - if not is_active and (current_time - last_change) > self.idle_threshold: - idle_workers.append(worker_id) - - return idle_workers - - def remove_worker(self, worker_id: int) -> None: - """ - Remove a worker from tracking. - - Args: - worker_id: ID of the worker to remove - """ - with self._lock: - self._worker_activity.pop(worker_id, None) - - def get_active_count(self) -> int: - """ - Get count of currently active workers. - - Returns: - Number of active workers - """ - with self._lock: - return sum(1 for is_active, _ in self._worker_activity.values() if is_active) diff --git a/api/core/workflow/graph_engine/worker_management/dynamic_scaler.py b/api/core/workflow/graph_engine/worker_management/dynamic_scaler.py deleted file mode 100644 index 7450b02618..0000000000 --- a/api/core/workflow/graph_engine/worker_management/dynamic_scaler.py +++ /dev/null @@ -1,101 +0,0 @@ -""" -Dynamic scaler for worker pool sizing. -""" - -from typing import final - -from core.workflow.graph import Graph - - -@final -class DynamicScaler: - """ - Manages dynamic scaling decisions for the worker pool. - - This encapsulates the logic for when to scale up or down - based on workload and configuration. - """ - - def __init__( - self, - min_workers: int = 2, - max_workers: int = 10, - scale_up_threshold: int = 5, - scale_down_idle_time: float = 30.0, - ) -> None: - """ - Initialize the dynamic scaler. - - Args: - min_workers: Minimum number of workers - max_workers: Maximum number of workers - scale_up_threshold: Queue depth to trigger scale up - scale_down_idle_time: Idle time before scaling down - """ - self.min_workers = min_workers - self.max_workers = max_workers - self.scale_up_threshold = scale_up_threshold - self.scale_down_idle_time = scale_down_idle_time - - def calculate_initial_workers(self, graph: Graph) -> int: - """ - Calculate initial worker count based on graph complexity. - - Args: - graph: The workflow graph - - Returns: - Initial number of workers to create - """ - node_count = len(graph.nodes) - - # Simple heuristic: more nodes = more workers - if node_count < 10: - initial = self.min_workers - elif node_count < 50: - initial = min(4, self.max_workers) - elif node_count < 100: - initial = min(6, self.max_workers) - else: - initial = min(8, self.max_workers) - - return max(self.min_workers, initial) - - def should_scale_up(self, current_workers: int, queue_depth: int, executing_count: int) -> bool: - """ - Determine if scaling up is needed. - - Args: - current_workers: Current number of workers - queue_depth: Number of nodes waiting - executing_count: Number of nodes executing - - Returns: - True if should scale up - """ - if current_workers >= self.max_workers: - return False - - # Scale up if queue is deep and workers are busy - if queue_depth > self.scale_up_threshold: - if executing_count >= current_workers * 0.8: - return True - - return False - - def should_scale_down(self, current_workers: int, idle_workers: list[int]) -> bool: - """ - Determine if scaling down is appropriate. - - Args: - current_workers: Current number of workers - idle_workers: List of idle worker IDs - - Returns: - True if should scale down - """ - if current_workers <= self.min_workers: - return False - - # Scale down if we have idle workers - return len(idle_workers) > 0 diff --git a/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py b/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py deleted file mode 100644 index d45d9be9b8..0000000000 --- a/api/core/workflow/graph_engine/worker_management/enhanced_worker_pool.py +++ /dev/null @@ -1,332 +0,0 @@ -""" -Enhanced worker pool with integrated activity tracking and dynamic scaling. - -This is a proposed simplification that merges WorkerPool, ActivityTracker, -and DynamicScaler into a single cohesive class. -""" - -import queue -import threading -import time -from typing import TYPE_CHECKING, final - -from configs import dify_config -from core.workflow.graph import Graph -from core.workflow.graph_events import GraphNodeEventBase - -from ..worker import Worker - -if TYPE_CHECKING: - from contextvars import Context - - from flask import Flask - - -@final -class EnhancedWorkerPool: - """ - Enhanced worker pool with integrated features. - - This class combines the responsibilities of: - - WorkerPool: Managing worker threads - - ActivityTracker: Tracking worker activity - - DynamicScaler: Making scaling decisions - - Benefits: - - Simplified interface with fewer classes - - Direct integration of related features - - Reduced inter-class communication overhead - """ - - def __init__( - self, - ready_queue: queue.Queue[str], - event_queue: queue.Queue[GraphNodeEventBase], - graph: Graph, - flask_app: "Flask | None" = None, - context_vars: "Context | None" = None, - min_workers: int | None = None, - max_workers: int | None = None, - scale_up_threshold: int | None = None, - scale_down_idle_time: float | None = None, - ) -> None: - """ - Initialize the enhanced worker pool. - - Args: - ready_queue: Queue of nodes ready for execution - event_queue: Queue for worker events - graph: The workflow graph - flask_app: Optional Flask app for context preservation - context_vars: Optional context variables - min_workers: Minimum number of workers - max_workers: Maximum number of workers - scale_up_threshold: Queue depth to trigger scale up - scale_down_idle_time: Seconds before scaling down idle workers - """ - self.ready_queue = ready_queue - self.event_queue = event_queue - self.graph = graph - self.flask_app = flask_app - self.context_vars = context_vars - - # Scaling parameters - self.min_workers = min_workers or dify_config.GRAPH_ENGINE_MIN_WORKERS - self.max_workers = max_workers or dify_config.GRAPH_ENGINE_MAX_WORKERS - self.scale_up_threshold = scale_up_threshold or dify_config.GRAPH_ENGINE_SCALE_UP_THRESHOLD - self.scale_down_idle_time = scale_down_idle_time or dify_config.GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME - - # Worker management - self.workers: list[Worker] = [] - self._worker_counter = 0 - self._lock = threading.RLock() - self._running = False - - # Activity tracking (integrated) - self._worker_activity: dict[int, tuple[bool, float]] = {} - - # Scaling control - self._last_scale_check = time.time() - self._scale_check_interval = 1.0 # Check scaling every second - - def start(self, initial_count: int | None = None) -> None: - """ - Start the worker pool with initial workers. - - Args: - initial_count: Number of workers to start with (auto-calculated if None) - """ - with self._lock: - if self._running: - return - - self._running = True - - # Calculate initial worker count if not specified - if initial_count is None: - initial_count = self._calculate_initial_workers() - - # Create initial workers - for _ in range(initial_count): - self._add_worker() - - def stop(self) -> None: - """Stop all workers in the pool.""" - with self._lock: - self._running = False - - # Stop all workers - for worker in self.workers: - worker.stop() - - # Wait for workers to finish - for worker in self.workers: - if worker.is_alive(): - worker.join(timeout=10.0) - - self.workers.clear() - self._worker_activity.clear() - - def check_and_scale(self) -> None: - """ - Check and perform scaling if needed. - - This method should be called periodically to adjust pool size. - """ - current_time = time.time() - - # Rate limit scaling checks - if current_time - self._last_scale_check < self._scale_check_interval: - return - - self._last_scale_check = current_time - - with self._lock: - if not self._running: - return - - current_count = len(self.workers) - queue_depth = self.ready_queue.qsize() - - # Check for scale up - if self._should_scale_up(current_count, queue_depth): - self._add_worker() - - # Check for scale down - idle_workers = self._get_idle_workers(current_time) - if idle_workers and self._should_scale_down(current_count): - # Remove the most idle worker - self._remove_worker(idle_workers[0]) - - # ============= Private Methods ============= - - def _calculate_initial_workers(self) -> int: - """ - Calculate initial number of workers based on graph complexity. - - Returns: - Initial worker count - """ - # Simple heuristic: start with min_workers, scale based on graph size - node_count = len(self.graph.nodes) - - if node_count < 10: - return self.min_workers - elif node_count < 50: - return min(self.min_workers + 1, self.max_workers) - else: - return min(self.min_workers + 2, self.max_workers) - - def _should_scale_up(self, current_count: int, queue_depth: int) -> bool: - """ - Determine if pool should scale up. - - Args: - current_count: Current number of workers - queue_depth: Current queue depth - - Returns: - True if should scale up - """ - if current_count >= self.max_workers: - return False - - # Scale up if queue is deep - if queue_depth > self.scale_up_threshold: - return True - - # Scale up if all workers are busy and queue is not empty - active_count = self._get_active_count() - if active_count == current_count and queue_depth > 0: - return True - - return False - - def _should_scale_down(self, current_count: int) -> bool: - """ - Determine if pool should scale down. - - Args: - current_count: Current number of workers - - Returns: - True if should scale down - """ - return current_count > self.min_workers - - def _add_worker(self) -> None: - """Add a new worker to the pool.""" - worker_id = self._worker_counter - self._worker_counter += 1 - - # Create worker with activity callbacks - worker = Worker( - ready_queue=self.ready_queue, - event_queue=self.event_queue, - graph=self.graph, - worker_id=worker_id, - flask_app=self.flask_app, - context_vars=self.context_vars, - on_idle_callback=self._on_worker_idle, - on_active_callback=self._on_worker_active, - ) - - worker.start() - self.workers.append(worker) - self._worker_activity[worker_id] = (False, time.time()) - - def _remove_worker(self, worker_id: int) -> None: - """ - Remove a specific worker from the pool. - - Args: - worker_id: ID of worker to remove - """ - worker_to_remove = None - for worker in self.workers: - if worker.worker_id == worker_id: - worker_to_remove = worker - break - - if worker_to_remove: - worker_to_remove.stop() - self.workers.remove(worker_to_remove) - self._worker_activity.pop(worker_id, None) - - if worker_to_remove.is_alive(): - worker_to_remove.join(timeout=1.0) - - def _on_worker_idle(self, worker_id: int) -> None: - """ - Callback when worker becomes idle. - - Args: - worker_id: ID of the idle worker - """ - with self._lock: - self._worker_activity[worker_id] = (False, time.time()) - - def _on_worker_active(self, worker_id: int) -> None: - """ - Callback when worker becomes active. - - Args: - worker_id: ID of the active worker - """ - with self._lock: - self._worker_activity[worker_id] = (True, time.time()) - - def _get_idle_workers(self, current_time: float) -> list[int]: - """ - Get list of workers that have been idle too long. - - Args: - current_time: Current timestamp - - Returns: - List of idle worker IDs sorted by idle time (longest first) - """ - idle_workers: list[tuple[int, float]] = [] - - for worker_id, (is_active, last_change) in self._worker_activity.items(): - if not is_active: - idle_time = current_time - last_change - if idle_time > self.scale_down_idle_time: - idle_workers.append((worker_id, idle_time)) - - # Sort by idle time (longest first) - idle_workers.sort(key=lambda x: x[1], reverse=True) - return [worker_id for worker_id, _ in idle_workers] - - def _get_active_count(self) -> int: - """ - Get count of currently active workers. - - Returns: - Number of active workers - """ - return sum(1 for is_active, _ in self._worker_activity.values() if is_active) - - # ============= Public Status Methods ============= - - def get_worker_count(self) -> int: - """Get current number of workers.""" - with self._lock: - return len(self.workers) - - def get_status(self) -> dict[str, int]: - """ - Get pool status information. - - Returns: - Dictionary with status information - """ - with self._lock: - return { - "total_workers": len(self.workers), - "active_workers": self._get_active_count(), - "idle_workers": len(self.workers) - self._get_active_count(), - "queue_depth": self.ready_queue.qsize(), - "min_workers": self.min_workers, - "max_workers": self.max_workers, - } diff --git a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py b/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py new file mode 100644 index 0000000000..c07ea1e4dd --- /dev/null +++ b/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py @@ -0,0 +1,168 @@ +""" +Simple worker pool that consolidates functionality. + +This is a simpler implementation that merges WorkerPool, ActivityTracker, +DynamicScaler, and WorkerFactory into a single class. +""" + +import queue +import threading +from typing import TYPE_CHECKING, final + +from configs import dify_config +from core.workflow.graph import Graph +from core.workflow.graph_events import GraphNodeEventBase + +from ..worker import Worker + +if TYPE_CHECKING: + from contextvars import Context + + from flask import Flask + + +@final +class SimpleWorkerPool: + """ + Simple worker pool with integrated management. + + This class consolidates all worker management functionality into + a single, simpler implementation without excessive abstraction. + """ + + def __init__( + self, + ready_queue: queue.Queue[str], + event_queue: queue.Queue[GraphNodeEventBase], + graph: Graph, + flask_app: "Flask | None" = None, + context_vars: "Context | None" = None, + min_workers: int | None = None, + max_workers: int | None = None, + scale_up_threshold: int | None = None, + scale_down_idle_time: float | None = None, + ) -> None: + """ + Initialize the simple worker pool. + + Args: + ready_queue: Queue of nodes ready for execution + event_queue: Queue for worker events + graph: The workflow graph + flask_app: Optional Flask app for context preservation + context_vars: Optional context variables + min_workers: Minimum number of workers + max_workers: Maximum number of workers + scale_up_threshold: Queue depth to trigger scale up + scale_down_idle_time: Seconds before scaling down idle workers + """ + self.ready_queue = ready_queue + self.event_queue = event_queue + self.graph = graph + self.flask_app = flask_app + self.context_vars = context_vars + + # Scaling parameters with defaults + self.min_workers = min_workers or dify_config.GRAPH_ENGINE_MIN_WORKERS + self.max_workers = max_workers or dify_config.GRAPH_ENGINE_MAX_WORKERS + self.scale_up_threshold = scale_up_threshold or dify_config.GRAPH_ENGINE_SCALE_UP_THRESHOLD + self.scale_down_idle_time = scale_down_idle_time or dify_config.GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME + + # Worker management + self.workers: list[Worker] = [] + self._worker_counter = 0 + self._lock = threading.RLock() + self._running = False + + def start(self, initial_count: int | None = None) -> None: + """ + Start the worker pool. + + Args: + initial_count: Number of workers to start with (auto-calculated if None) + """ + with self._lock: + if self._running: + return + + self._running = True + + # Calculate initial worker count + if initial_count is None: + node_count = len(self.graph.nodes) + if node_count < 10: + initial_count = self.min_workers + elif node_count < 50: + initial_count = min(self.min_workers + 1, self.max_workers) + else: + initial_count = min(self.min_workers + 2, self.max_workers) + + # Create initial workers + for _ in range(initial_count): + self._create_worker() + + def stop(self) -> None: + """Stop all workers in the pool.""" + with self._lock: + self._running = False + + # Stop all workers + for worker in self.workers: + worker.stop() + + # Wait for workers to finish + for worker in self.workers: + if worker.is_alive(): + worker.join(timeout=10.0) + + self.workers.clear() + + def _create_worker(self) -> None: + """Create and start a new worker.""" + worker_id = self._worker_counter + self._worker_counter += 1 + + worker = Worker( + ready_queue=self.ready_queue, + event_queue=self.event_queue, + graph=self.graph, + worker_id=worker_id, + flask_app=self.flask_app, + context_vars=self.context_vars, + ) + + worker.start() + self.workers.append(worker) + + def check_and_scale(self) -> None: + """Check and perform scaling if needed.""" + with self._lock: + if not self._running: + return + + current_count = len(self.workers) + queue_depth = self.ready_queue.qsize() + + # Simple scaling logic + if queue_depth > self.scale_up_threshold and current_count < self.max_workers: + self._create_worker() + + def get_worker_count(self) -> int: + """Get current number of workers.""" + with self._lock: + return len(self.workers) + + def get_status(self) -> dict[str, int]: + """ + Get pool status information. + + Returns: + Dictionary with status information + """ + with self._lock: + return { + "total_workers": len(self.workers), + "queue_depth": self.ready_queue.qsize(), + "min_workers": self.min_workers, + "max_workers": self.max_workers, + } diff --git a/api/core/workflow/graph_engine/worker_management/worker_factory.py b/api/core/workflow/graph_engine/worker_management/worker_factory.py deleted file mode 100644 index cbb8e0b68e..0000000000 --- a/api/core/workflow/graph_engine/worker_management/worker_factory.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -Factory for creating worker instances. -""" - -import contextvars -import queue -from collections.abc import Callable -from typing import final - -from flask import Flask - -from core.workflow.graph import Graph -from core.workflow.graph_events import GraphNodeEventBase - -from ..worker import Worker - - -@final -class WorkerFactory: - """ - Factory for creating worker instances with proper context. - - This encapsulates worker creation logic and ensures all workers - are created with the necessary Flask and context variable setup. - """ - - def __init__( - self, - flask_app: Flask | None, - context_vars: contextvars.Context, - ) -> None: - """ - Initialize the worker factory. - - Args: - flask_app: Flask application context - context_vars: Context variables to propagate - """ - self.flask_app = flask_app - self.context_vars = context_vars - self._next_worker_id = 0 - - def create_worker( - self, - ready_queue: queue.Queue[str], - event_queue: queue.Queue[GraphNodeEventBase], - graph: Graph, - on_idle_callback: Callable[[int], None] | None = None, - on_active_callback: Callable[[int], None] | None = None, - ) -> Worker: - """ - Create a new worker instance. - - Args: - ready_queue: Queue of nodes ready for execution - event_queue: Queue for worker events - graph: The workflow graph - on_idle_callback: Callback when worker becomes idle - on_active_callback: Callback when worker becomes active - - Returns: - Configured worker instance - """ - worker_id = self._next_worker_id - self._next_worker_id += 1 - - return Worker( - ready_queue=ready_queue, - event_queue=event_queue, - graph=graph, - worker_id=worker_id, - flask_app=self.flask_app, - context_vars=self.context_vars, - on_idle_callback=on_idle_callback, - on_active_callback=on_active_callback, - ) diff --git a/api/core/workflow/graph_engine/worker_management/worker_pool.py b/api/core/workflow/graph_engine/worker_management/worker_pool.py deleted file mode 100644 index bdec3e5323..0000000000 --- a/api/core/workflow/graph_engine/worker_management/worker_pool.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Worker pool management. -""" - -import queue -import threading -from typing import final - -from core.workflow.graph import Graph -from core.workflow.graph_events import GraphNodeEventBase - -from ..worker import Worker -from .activity_tracker import ActivityTracker -from .dynamic_scaler import DynamicScaler -from .worker_factory import WorkerFactory - - -@final -class WorkerPool: - """ - Manages a pool of worker threads for executing nodes. - - This provides dynamic scaling, activity tracking, and lifecycle - management for worker threads. - """ - - def __init__( - self, - ready_queue: queue.Queue[str], - event_queue: queue.Queue[GraphNodeEventBase], - graph: Graph, - worker_factory: WorkerFactory, - dynamic_scaler: DynamicScaler, - activity_tracker: ActivityTracker, - ) -> None: - """ - Initialize the worker pool. - - Args: - ready_queue: Queue of nodes ready for execution - event_queue: Queue for worker events - graph: The workflow graph - worker_factory: Factory for creating workers - dynamic_scaler: Scaler for dynamic sizing - activity_tracker: Tracker for worker activity - """ - self.ready_queue = ready_queue - self.event_queue = event_queue - self.graph = graph - self.worker_factory = worker_factory - self.dynamic_scaler = dynamic_scaler - self.activity_tracker = activity_tracker - - self.workers: list[Worker] = [] - self._lock = threading.RLock() - self._running = False - - def start(self, initial_count: int) -> None: - """ - Start the worker pool with initial workers. - - Args: - initial_count: Number of workers to start with - """ - with self._lock: - if self._running: - return - - self._running = True - - # Create initial workers - for _ in range(initial_count): - worker = self.worker_factory.create_worker(self.ready_queue, self.event_queue, self.graph) - worker.start() - self.workers.append(worker) - - def stop(self) -> None: - """Stop all workers in the pool.""" - with self._lock: - self._running = False - - # Stop all workers - for worker in self.workers: - worker.stop() - - # Wait for workers to finish - for worker in self.workers: - if worker.is_alive(): - worker.join(timeout=10.0) - - self.workers.clear() - - def scale_up(self) -> None: - """Add a worker to the pool if allowed.""" - with self._lock: - if not self._running: - return - - if len(self.workers) >= self.dynamic_scaler.max_workers: - return - - worker = self.worker_factory.create_worker(self.ready_queue, self.event_queue, self.graph) - worker.start() - self.workers.append(worker) - - def scale_down(self, worker_ids: list[int]) -> None: - """ - Remove specific workers from the pool. - - Args: - worker_ids: IDs of workers to remove - """ - with self._lock: - if not self._running: - return - - if len(self.workers) <= self.dynamic_scaler.min_workers: - return - - workers_to_remove = [w for w in self.workers if w.worker_id in worker_ids] - - for worker in workers_to_remove: - worker.stop() - self.workers.remove(worker) - if worker.is_alive(): - worker.join(timeout=1.0) - - def get_worker_count(self) -> int: - """Get current number of workers.""" - with self._lock: - return len(self.workers) - - def check_scaling(self, queue_depth: int, executing_count: int) -> None: - """ - Check and perform scaling if needed. - - Args: - queue_depth: Current queue depth - executing_count: Number of executing nodes - """ - current_count = self.get_worker_count() - - if self.dynamic_scaler.should_scale_up(current_count, queue_depth, executing_count): - self.scale_up() - - idle_workers = self.activity_tracker.get_idle_workers() - if idle_workers: - self.scale_down(idle_workers) From a5cb9d2b73c3abe3142c4974191315d5dae269f2 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 03:59:53 +0800 Subject: [PATCH 21/96] refactor(graph_engine): inline output_registry into response_coordinator Signed-off-by: -LAN- --- api/.importlinter | 1 - .../workflow/graph_engine/graph_engine.py | 6 +- .../graph_engine/output_registry/__init__.py | 10 - .../graph_engine/output_registry/registry.py | 148 -------- .../graph_engine/output_registry/stream.py | 70 ---- .../response_coordinator/coordinator.py | 119 +++++- .../graph_engine/test_output_registry.py | 135 ------- .../graph_engine/test_response_coordinator.py | 347 ------------------ 8 files changed, 110 insertions(+), 726 deletions(-) delete mode 100644 api/core/workflow/graph_engine/output_registry/__init__.py delete mode 100644 api/core/workflow/graph_engine/output_registry/registry.py delete mode 100644 api/core/workflow/graph_engine/output_registry/stream.py delete mode 100644 api/tests/unit_tests/core/workflow/graph_engine/test_output_registry.py delete mode 100644 api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py diff --git a/api/.importlinter b/api/.importlinter index 6e15f06a5c..14a66f2ff9 100644 --- a/api/.importlinter +++ b/api/.importlinter @@ -37,7 +37,6 @@ type = layers layers = graph_engine response_coordinator - output_registry containers = core.workflow.graph_engine diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index b627ccc634..8ac27143e3 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -35,7 +35,6 @@ from .event_management import EventCollector, EventEmitter, EventHandlerRegistry from .graph_traversal import BranchHandler, EdgeProcessor, NodeReadinessChecker, SkipPropagator from .layers.base import Layer from .orchestration import Dispatcher, ExecutionCoordinator -from .output_registry import OutputRegistry from .protocols.command_channel import CommandChannel from .response_coordinator import ResponseStreamCoordinator from .state_management import UnifiedStateManager @@ -122,8 +121,9 @@ class GraphEngine: self.state_manager = UnifiedStateManager(self.graph, self.ready_queue) # Response coordination - self.output_registry = OutputRegistry(self.graph_runtime_state.variable_pool) - self.response_coordinator = ResponseStreamCoordinator(registry=self.output_registry, graph=self.graph) + self.response_coordinator = ResponseStreamCoordinator( + variable_pool=self.graph_runtime_state.variable_pool, graph=self.graph + ) # Event management self.event_collector = EventCollector() diff --git a/api/core/workflow/graph_engine/output_registry/__init__.py b/api/core/workflow/graph_engine/output_registry/__init__.py deleted file mode 100644 index a65a62ec53..0000000000 --- a/api/core/workflow/graph_engine/output_registry/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" -OutputRegistry - Thread-safe storage for node outputs (streams and scalars) - -This component provides thread-safe storage and retrieval of node outputs, -supporting both scalar values and streaming chunks with proper state management. -""" - -from .registry import OutputRegistry - -__all__ = ["OutputRegistry"] diff --git a/api/core/workflow/graph_engine/output_registry/registry.py b/api/core/workflow/graph_engine/output_registry/registry.py deleted file mode 100644 index 29eefa5abe..0000000000 --- a/api/core/workflow/graph_engine/output_registry/registry.py +++ /dev/null @@ -1,148 +0,0 @@ -""" -Main OutputRegistry implementation. - -This module contains the public OutputRegistry class that provides -thread-safe storage for node outputs. -""" - -from collections.abc import Sequence -from threading import RLock -from typing import TYPE_CHECKING, Any, Union, final - -from core.variables import Segment -from core.workflow.entities.variable_pool import VariablePool - -from .stream import Stream - -if TYPE_CHECKING: - from core.workflow.graph_events import NodeRunStreamChunkEvent - - -@final -class OutputRegistry: - """ - Thread-safe registry for storing and retrieving node outputs. - - Supports both scalar values and streaming chunks with proper state management. - All operations are thread-safe using internal locking. - """ - - def __init__(self, variable_pool: VariablePool) -> None: - """Initialize empty registry with thread-safe storage.""" - self._lock = RLock() - self._scalars = variable_pool - self._streams: dict[tuple[str, ...], Stream] = {} - - def _selector_to_key(self, selector: Sequence[str]) -> tuple[str, ...]: - """Convert selector list to tuple key for internal storage.""" - return tuple(selector) - - def set_scalar( - self, selector: Sequence[str], value: Union[str, int, float, bool, dict[str, Any], list[Any]] - ) -> None: - """ - Set a scalar value for the given selector. - - Args: - selector: List of strings identifying the output location - value: The scalar value to store - """ - with self._lock: - self._scalars.add(selector, value) - - def get_scalar(self, selector: Sequence[str]) -> "Segment | None": - """ - Get a scalar value for the given selector. - - Args: - selector: List of strings identifying the output location - - Returns: - The stored Variable object, or None if not found - """ - with self._lock: - return self._scalars.get(selector) - - def append_chunk(self, selector: Sequence[str], event: "NodeRunStreamChunkEvent") -> None: - """ - Append a NodeRunStreamChunkEvent to the stream for the given selector. - - Args: - selector: List of strings identifying the stream location - event: The NodeRunStreamChunkEvent to append - - Raises: - ValueError: If the stream is already closed - """ - key = self._selector_to_key(selector) - with self._lock: - if key not in self._streams: - self._streams[key] = Stream() - - try: - self._streams[key].append(event) - except ValueError: - raise ValueError(f"Stream {'.'.join(selector)} is already closed") - - def pop_chunk(self, selector: Sequence[str]) -> "NodeRunStreamChunkEvent | None": - """ - Pop the next unread NodeRunStreamChunkEvent from the stream. - - Args: - selector: List of strings identifying the stream location - - Returns: - The next event, or None if no unread events available - """ - key = self._selector_to_key(selector) - with self._lock: - if key not in self._streams: - return None - - return self._streams[key].pop_next() - - def has_unread(self, selector: Sequence[str]) -> bool: - """ - Check if the stream has unread events. - - Args: - selector: List of strings identifying the stream location - - Returns: - True if there are unread events, False otherwise - """ - key = self._selector_to_key(selector) - with self._lock: - if key not in self._streams: - return False - - return self._streams[key].has_unread() - - def close_stream(self, selector: Sequence[str]) -> None: - """ - Mark a stream as closed (no more chunks can be appended). - - Args: - selector: List of strings identifying the stream location - """ - key = self._selector_to_key(selector) - with self._lock: - if key not in self._streams: - self._streams[key] = Stream() - self._streams[key].close() - - def stream_closed(self, selector: Sequence[str]) -> bool: - """ - Check if a stream is closed. - - Args: - selector: List of strings identifying the stream location - - Returns: - True if the stream is closed, False otherwise - """ - key = self._selector_to_key(selector) - with self._lock: - if key not in self._streams: - return False - return self._streams[key].is_closed diff --git a/api/core/workflow/graph_engine/output_registry/stream.py b/api/core/workflow/graph_engine/output_registry/stream.py deleted file mode 100644 index 8a99b56d1f..0000000000 --- a/api/core/workflow/graph_engine/output_registry/stream.py +++ /dev/null @@ -1,70 +0,0 @@ -""" -Internal stream implementation for OutputRegistry. - -This module contains the private Stream class used internally by OutputRegistry -to manage streaming data chunks. -""" - -from typing import TYPE_CHECKING, final - -if TYPE_CHECKING: - from core.workflow.graph_events import NodeRunStreamChunkEvent - - -@final -class Stream: - """ - A stream that holds NodeRunStreamChunkEvent objects and tracks read position. - - This class encapsulates stream-specific data and operations, - including event storage, read position tracking, and closed state. - - Note: This is an internal class not exposed in the public API. - """ - - def __init__(self) -> None: - """Initialize an empty stream.""" - self.events: list[NodeRunStreamChunkEvent] = [] - self.read_position: int = 0 - self.is_closed: bool = False - - def append(self, event: "NodeRunStreamChunkEvent") -> None: - """ - Append a NodeRunStreamChunkEvent to the stream. - - Args: - event: The NodeRunStreamChunkEvent to append - - Raises: - ValueError: If the stream is already closed - """ - if self.is_closed: - raise ValueError("Cannot append to a closed stream") - self.events.append(event) - - def pop_next(self) -> "NodeRunStreamChunkEvent | None": - """ - Pop the next unread NodeRunStreamChunkEvent from the stream. - - Returns: - The next event, or None if no unread events available - """ - if self.read_position >= len(self.events): - return None - - event = self.events[self.read_position] - self.read_position += 1 - return event - - def has_unread(self) -> bool: - """ - Check if the stream has unread events. - - Returns: - True if there are unread events, False otherwise - """ - return self.read_position < len(self.events) - - def close(self) -> None: - """Mark the stream as closed (no more chunks can be appended).""" - self.is_closed = True diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index 1fb58852d2..a7b77bdf4a 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -12,12 +12,12 @@ from threading import RLock from typing import TypeAlias, final from uuid import uuid4 +from core.workflow.entities.variable_pool import VariablePool from core.workflow.enums import NodeExecutionType, NodeState from core.workflow.graph import Graph from core.workflow.graph_events import NodeRunStreamChunkEvent, NodeRunSucceededEvent from core.workflow.nodes.base.template import TextSegment, VariableSegment -from ..output_registry import OutputRegistry from .path import Path from .session import ResponseSession @@ -36,20 +36,25 @@ class ResponseStreamCoordinator: Ensures ordered streaming of responses based on upstream node outputs and constants. """ - def __init__(self, registry: OutputRegistry, graph: "Graph") -> None: + def __init__(self, variable_pool: "VariablePool", graph: "Graph") -> None: """ - Initialize coordinator with output registry. + Initialize coordinator with variable pool. Args: - registry: OutputRegistry instance for accessing node outputs + variable_pool: VariablePool instance for accessing node variables graph: Graph instance for looking up node information """ - self.registry = registry + self.variable_pool = variable_pool self.graph = graph self.active_session: ResponseSession | None = None self.waiting_sessions: deque[ResponseSession] = deque() self.lock = RLock() + # Internal stream management (replacing OutputRegistry) + self._stream_buffers: dict[tuple[str, ...], list[NodeRunStreamChunkEvent]] = {} + self._stream_positions: dict[tuple[str, ...], int] = {} + self._closed_streams: set[tuple[str, ...]] = set() + # Track response nodes self._response_nodes: set[NodeID] = set() @@ -256,15 +261,15 @@ class ResponseStreamCoordinator: ) -> Sequence[NodeRunStreamChunkEvent]: with self.lock: if isinstance(event, NodeRunStreamChunkEvent): - self.registry.append_chunk(event.selector, event) + self._append_stream_chunk(event.selector, event) if event.is_final: - self.registry.close_stream(event.selector) + self._close_stream(event.selector) return self.try_flush() else: # Skip cause we share the same variable pool. # # for variable_name, variable_value in event.node_run_result.outputs.items(): - # self.registry.set_scalar((event.node_id, variable_name), variable_value) + # self.variable_pool.add((event.node_id, variable_name), variable_value) return self.try_flush() return [] @@ -327,8 +332,8 @@ class ResponseStreamCoordinator: execution_id = self._get_or_create_execution_id(output_node_id) # Stream all available chunks - while self.registry.has_unread(segment.selector): - if event := self.registry.pop_chunk(segment.selector): + while self._has_unread_stream(segment.selector): + if event := self._pop_stream_chunk(segment.selector): # For special selectors, we need to update the event to use # the active response node's information if self.active_session and source_selector_prefix not in self.graph.nodes: @@ -349,12 +354,12 @@ class ResponseStreamCoordinator: events.append(event) # Check if this is the last chunk by looking ahead - stream_closed = self.registry.stream_closed(segment.selector) + stream_closed = self._is_stream_closed(segment.selector) # Check if stream is closed to determine if segment is complete if stream_closed: is_complete = True - elif value := self.registry.get_scalar(segment.selector): + elif value := self.variable_pool.get(segment.selector): # Process scalar value is_last_segment = bool( self.active_session and self.active_session.index == len(self.active_session.template.segments) - 1 @@ -464,3 +469,93 @@ class ResponseStreamCoordinator: events = self.try_flush() return events + + # ============= Internal Stream Management Methods ============= + + def _append_stream_chunk(self, selector: Sequence[str], event: NodeRunStreamChunkEvent) -> None: + """ + Append a stream chunk to the internal buffer. + + Args: + selector: List of strings identifying the stream location + event: The NodeRunStreamChunkEvent to append + + Raises: + ValueError: If the stream is already closed + """ + key = tuple(selector) + + if key in self._closed_streams: + raise ValueError(f"Stream {'.'.join(selector)} is already closed") + + if key not in self._stream_buffers: + self._stream_buffers[key] = [] + self._stream_positions[key] = 0 + + self._stream_buffers[key].append(event) + + def _pop_stream_chunk(self, selector: Sequence[str]) -> NodeRunStreamChunkEvent | None: + """ + Pop the next unread stream chunk from the buffer. + + Args: + selector: List of strings identifying the stream location + + Returns: + The next event, or None if no unread events available + """ + key = tuple(selector) + + if key not in self._stream_buffers: + return None + + position = self._stream_positions.get(key, 0) + buffer = self._stream_buffers[key] + + if position >= len(buffer): + return None + + event = buffer[position] + self._stream_positions[key] = position + 1 + return event + + def _has_unread_stream(self, selector: Sequence[str]) -> bool: + """ + Check if the stream has unread events. + + Args: + selector: List of strings identifying the stream location + + Returns: + True if there are unread events, False otherwise + """ + key = tuple(selector) + + if key not in self._stream_buffers: + return False + + position = self._stream_positions.get(key, 0) + return position < len(self._stream_buffers[key]) + + def _close_stream(self, selector: Sequence[str]) -> None: + """ + Mark a stream as closed (no more chunks can be appended). + + Args: + selector: List of strings identifying the stream location + """ + key = tuple(selector) + self._closed_streams.add(key) + + def _is_stream_closed(self, selector: Sequence[str]) -> bool: + """ + Check if a stream is closed. + + Args: + selector: List of strings identifying the stream location + + Returns: + True if the stream is closed, False otherwise + """ + key = tuple(selector) + return key in self._closed_streams diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_output_registry.py b/api/tests/unit_tests/core/workflow/graph_engine/test_output_registry.py deleted file mode 100644 index d27f610fe6..0000000000 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_output_registry.py +++ /dev/null @@ -1,135 +0,0 @@ -from uuid import uuid4 - -import pytest - -from core.workflow.entities.variable_pool import VariablePool -from core.workflow.enums import NodeType -from core.workflow.graph_engine.output_registry import OutputRegistry -from core.workflow.graph_events import NodeRunStreamChunkEvent - - -class TestOutputRegistry: - def test_scalar_operations(self): - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Test setting and getting scalar - registry.set_scalar(["node1", "output"], "test_value") - - segment = registry.get_scalar(["node1", "output"]) - assert segment - assert segment.text == "test_value" - - # Test getting non-existent scalar - assert registry.get_scalar(["non_existent"]) is None - - def test_stream_operations(self): - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Create test events - event1 = NodeRunStreamChunkEvent( - id=str(uuid4()), - node_id="node1", - node_type=NodeType.LLM, - selector=["node1", "stream"], - chunk="chunk1", - is_final=False, - ) - event2 = NodeRunStreamChunkEvent( - id=str(uuid4()), - node_id="node1", - node_type=NodeType.LLM, - selector=["node1", "stream"], - chunk="chunk2", - is_final=True, - ) - - # Test appending events - registry.append_chunk(["node1", "stream"], event1) - registry.append_chunk(["node1", "stream"], event2) - - # Test has_unread - assert registry.has_unread(["node1", "stream"]) is True - - # Test popping events - popped_event1 = registry.pop_chunk(["node1", "stream"]) - assert popped_event1 == event1 - assert popped_event1.chunk == "chunk1" - - popped_event2 = registry.pop_chunk(["node1", "stream"]) - assert popped_event2 == event2 - assert popped_event2.chunk == "chunk2" - - assert registry.pop_chunk(["node1", "stream"]) is None - - # Test has_unread after popping all - assert registry.has_unread(["node1", "stream"]) is False - - def test_stream_closing(self): - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Test stream is not closed initially - assert registry.stream_closed(["node1", "stream"]) is False - - # Test closing stream - registry.close_stream(["node1", "stream"]) - assert registry.stream_closed(["node1", "stream"]) is True - - # Test appending to closed stream raises error - event = NodeRunStreamChunkEvent( - id=str(uuid4()), - node_id="node1", - node_type=NodeType.LLM, - selector=["node1", "stream"], - chunk="chunk", - is_final=False, - ) - with pytest.raises(ValueError, match="Stream node1.stream is already closed"): - registry.append_chunk(["node1", "stream"], event) - - def test_thread_safety(self): - import threading - - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - results = [] - - def append_chunks(thread_id: int): - for i in range(100): - event = NodeRunStreamChunkEvent( - id=str(uuid4()), - node_id="test_node", - node_type=NodeType.LLM, - selector=["stream"], - chunk=f"thread{thread_id}_chunk{i}", - is_final=False, - ) - registry.append_chunk(["stream"], event) - - # Start multiple threads - threads = [] - for i in range(5): - thread = threading.Thread(target=append_chunks, args=(i,)) - threads.append(thread) - thread.start() - - # Wait for threads - for thread in threads: - thread.join() - - # Verify all events are present - events = [] - while True: - event = registry.pop_chunk(["stream"]) - if event is None: - break - events.append(event) - - assert len(events) == 500 # 5 threads * 100 events each - # Verify the events have the expected chunk content format - chunk_texts = [e.chunk for e in events] - for i in range(5): - for j in range(100): - assert f"thread{i}_chunk{j}" in chunk_texts diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py deleted file mode 100644 index eadadfb8c8..0000000000 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py +++ /dev/null @@ -1,347 +0,0 @@ -"""Test cases for ResponseStreamCoordinator.""" - -from unittest.mock import Mock - -from core.variables import StringSegment -from core.workflow.entities.variable_pool import VariablePool -from core.workflow.enums import NodeState, NodeType -from core.workflow.graph import Graph -from core.workflow.graph_engine.output_registry import OutputRegistry -from core.workflow.graph_engine.response_coordinator import ResponseStreamCoordinator -from core.workflow.graph_engine.response_coordinator.session import ResponseSession -from core.workflow.nodes.answer.answer_node import AnswerNode -from core.workflow.nodes.base.node import Node -from core.workflow.nodes.base.template import Template, TextSegment, VariableSegment - - -class TestResponseStreamCoordinator: - """Test cases for ResponseStreamCoordinator.""" - - def test_skip_variable_segment_from_skipped_node(self): - """Test that VariableSegments from skipped nodes are properly skipped during try_flush.""" - # Create mock graph - graph = Mock(spec=Graph) - - # Create mock nodes - skipped_node = Mock(spec=Node) - skipped_node.id = "skipped_node" - skipped_node.state = NodeState.SKIPPED - skipped_node.node_type = NodeType.LLM - - active_node = Mock(spec=Node) - active_node.id = "active_node" - active_node.state = NodeState.TAKEN - active_node.node_type = NodeType.LLM - - response_node = Mock(spec=AnswerNode) - response_node.id = "response_node" - response_node.node_type = NodeType.ANSWER - - # Set up graph nodes dictionary - graph.nodes = {"skipped_node": skipped_node, "active_node": active_node, "response_node": response_node} - - # Create output registry with variable pool - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Add some test data to registry for the active node - registry.set_scalar(("active_node", "output"), StringSegment(value="Active output")) - - # Create RSC instance - rsc = ResponseStreamCoordinator(registry=registry, graph=graph) - - # Create template with segments from both skipped and active nodes - template = Template( - segments=[ - VariableSegment(selector=["skipped_node", "output"]), - TextSegment(text=" - "), - VariableSegment(selector=["active_node", "output"]), - ] - ) - - # Create and set active session - session = ResponseSession(node_id="response_node", template=template, index=0) - rsc.active_session = session - - # Execute try_flush - events = rsc.try_flush() - - # Verify that: - # 1. The skipped node's variable segment was skipped (index advanced) - # 2. The text segment was processed - # 3. The active node's variable segment was processed - assert len(events) == 2 # TextSegment + VariableSegment from active_node - - # Check that the first event is the text segment - assert events[0].chunk == " - " - - # Check that the second event is from the active node - assert events[1].chunk == "Active output" - assert events[1].selector == ["active_node", "output"] - - # Session should be complete - assert session.is_complete() - - def test_process_variable_segment_from_non_skipped_node(self): - """Test that VariableSegments from non-skipped nodes are processed normally.""" - # Create mock graph - graph = Mock(spec=Graph) - - # Create mock nodes - active_node1 = Mock(spec=Node) - active_node1.id = "node1" - active_node1.state = NodeState.TAKEN - active_node1.node_type = NodeType.LLM - - active_node2 = Mock(spec=Node) - active_node2.id = "node2" - active_node2.state = NodeState.TAKEN - active_node2.node_type = NodeType.LLM - - response_node = Mock(spec=AnswerNode) - response_node.id = "response_node" - response_node.node_type = NodeType.ANSWER - - # Set up graph nodes dictionary - graph.nodes = {"node1": active_node1, "node2": active_node2, "response_node": response_node} - - # Create output registry with variable pool - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Add test data to registry - registry.set_scalar(("node1", "output"), StringSegment(value="Output 1")) - registry.set_scalar(("node2", "output"), StringSegment(value="Output 2")) - - # Create RSC instance - rsc = ResponseStreamCoordinator(registry=registry, graph=graph) - - # Create template with segments from active nodes - template = Template( - segments=[ - VariableSegment(selector=["node1", "output"]), - TextSegment(text=" | "), - VariableSegment(selector=["node2", "output"]), - ] - ) - - # Create and set active session - session = ResponseSession(node_id="response_node", template=template, index=0) - rsc.active_session = session - - # Execute try_flush - events = rsc.try_flush() - - # Verify all segments were processed - assert len(events) == 3 - - # Check events in order - assert events[0].chunk == "Output 1" - assert events[0].selector == ["node1", "output"] - - assert events[1].chunk == " | " - - assert events[2].chunk == "Output 2" - assert events[2].selector == ["node2", "output"] - - # Session should be complete - assert session.is_complete() - - def test_mixed_skipped_and_active_nodes(self): - """Test processing with a mix of skipped and active nodes.""" - # Create mock graph - graph = Mock(spec=Graph) - - # Create mock nodes with various states - skipped_node1 = Mock(spec=Node) - skipped_node1.id = "skip1" - skipped_node1.state = NodeState.SKIPPED - skipped_node1.node_type = NodeType.LLM - - active_node = Mock(spec=Node) - active_node.id = "active" - active_node.state = NodeState.TAKEN - active_node.node_type = NodeType.LLM - - skipped_node2 = Mock(spec=Node) - skipped_node2.id = "skip2" - skipped_node2.state = NodeState.SKIPPED - skipped_node2.node_type = NodeType.LLM - - response_node = Mock(spec=AnswerNode) - response_node.id = "response_node" - response_node.node_type = NodeType.ANSWER - - # Set up graph nodes dictionary - graph.nodes = { - "skip1": skipped_node1, - "active": active_node, - "skip2": skipped_node2, - "response_node": response_node, - } - - # Create output registry with variable pool - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Add data only for active node - registry.set_scalar(("active", "result"), StringSegment(value="Active Result")) - - # Create RSC instance - rsc = ResponseStreamCoordinator(registry=registry, graph=graph) - - # Create template with mixed segments - template = Template( - segments=[ - TextSegment(text="Start: "), - VariableSegment(selector=["skip1", "output"]), - VariableSegment(selector=["active", "result"]), - VariableSegment(selector=["skip2", "output"]), - TextSegment(text=" :End"), - ] - ) - - # Create and set active session - session = ResponseSession(node_id="response_node", template=template, index=0) - rsc.active_session = session - - # Execute try_flush - events = rsc.try_flush() - - # Should have: "Start: ", "Active Result", " :End" - assert len(events) == 3 - - assert events[0].chunk == "Start: " - assert events[1].chunk == "Active Result" - assert events[1].selector == ["active", "result"] - assert events[2].chunk == " :End" - - # Session should be complete - assert session.is_complete() - - def test_all_variable_segments_skipped(self): - """Test when all VariableSegments are from skipped nodes.""" - # Create mock graph - graph = Mock(spec=Graph) - - # Create all skipped nodes - skipped_node1 = Mock(spec=Node) - skipped_node1.id = "skip1" - skipped_node1.state = NodeState.SKIPPED - skipped_node1.node_type = NodeType.LLM - - skipped_node2 = Mock(spec=Node) - skipped_node2.id = "skip2" - skipped_node2.state = NodeState.SKIPPED - skipped_node2.node_type = NodeType.LLM - - response_node = Mock(spec=AnswerNode) - response_node.id = "response_node" - response_node.node_type = NodeType.ANSWER - - # Set up graph nodes dictionary - graph.nodes = {"skip1": skipped_node1, "skip2": skipped_node2, "response_node": response_node} - - # Create output registry (empty since nodes are skipped) with variable pool - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - - # Create RSC instance - rsc = ResponseStreamCoordinator(registry=registry, graph=graph) - - # Create template with only skipped segments - template = Template( - segments=[ - VariableSegment(selector=["skip1", "output"]), - VariableSegment(selector=["skip2", "output"]), - TextSegment(text="Final text"), - ] - ) - - # Create and set active session - session = ResponseSession(node_id="response_node", template=template, index=0) - rsc.active_session = session - - # Execute try_flush - events = rsc.try_flush() - - # Should only have the final text segment - assert len(events) == 1 - assert events[0].chunk == "Final text" - - # Session should be complete - assert session.is_complete() - - def test_special_prefix_selectors(self): - """Test that special prefix selectors (sys, env, conversation) are handled correctly.""" - # Create mock graph - graph = Mock(spec=Graph) - - # Create response node - response_node = Mock(spec=AnswerNode) - response_node.id = "response_node" - response_node.node_type = NodeType.ANSWER - - # Set up graph nodes dictionary (no sys, env, conversation nodes) - graph.nodes = {"response_node": response_node} - - # Create output registry with special selector data and variable pool - variable_pool = VariablePool() - registry = OutputRegistry(variable_pool) - registry.set_scalar(("sys", "user_id"), StringSegment(value="user123")) - registry.set_scalar(("env", "api_key"), StringSegment(value="key456")) - registry.set_scalar(("conversation", "id"), StringSegment(value="conv789")) - - # Create RSC instance - rsc = ResponseStreamCoordinator(registry=registry, graph=graph) - - # Create template with special selectors - template = Template( - segments=[ - TextSegment(text="User: "), - VariableSegment(selector=["sys", "user_id"]), - TextSegment(text=", API: "), - VariableSegment(selector=["env", "api_key"]), - TextSegment(text=", Conv: "), - VariableSegment(selector=["conversation", "id"]), - ] - ) - - # Create and set active session - session = ResponseSession(node_id="response_node", template=template, index=0) - rsc.active_session = session - - # Execute try_flush - events = rsc.try_flush() - - # Should have all segments processed - assert len(events) == 6 - - # Check text segments - assert events[0].chunk == "User: " - assert events[0].node_id == "response_node" - - # Check sys selector - should use response node's info - assert events[1].chunk == "user123" - assert events[1].selector == ["sys", "user_id"] - assert events[1].node_id == "response_node" - assert events[1].node_type == NodeType.ANSWER - - assert events[2].chunk == ", API: " - - # Check env selector - should use response node's info - assert events[3].chunk == "key456" - assert events[3].selector == ["env", "api_key"] - assert events[3].node_id == "response_node" - assert events[3].node_type == NodeType.ANSWER - - assert events[4].chunk == ", Conv: " - - # Check conversation selector - should use response node's info - assert events[5].chunk == "conv789" - assert events[5].selector == ["conversation", "id"] - assert events[5].node_id == "response_node" - assert events[5].node_type == NodeType.ANSWER - - # Session should be complete - assert session.is_complete() From 0fdb1b2bc9c23b9f35d271e631ffce736d17dc46 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 04:37:23 +0800 Subject: [PATCH 22/96] refactor(graph_engine): Correct private attributes and private methods naming Signed-off-by: -LAN- --- .../command_processing/command_processor.py | 8 +- .../error_handling/abort_strategy.py | 2 + .../error_handling/default_value_strategy.py | 1 + .../error_handling/fail_branch_strategy.py | 2 + .../event_management/event_collector.py | 8 +- .../event_management/event_emitter.py | 6 +- .../workflow/graph_engine/graph_engine.py | 169 ++++++++++-------- .../graph_traversal/branch_handler.py | 16 +- .../graph_traversal/edge_processor.py | 20 +-- .../graph_traversal/node_readiness.py | 6 +- .../graph_traversal/skip_propagator.py | 20 +-- .../graph_engine/orchestration/dispatcher.py | 32 ++-- .../orchestration/execution_coordinator.py | 26 +-- .../response_coordinator/coordinator.py | 101 ++++++----- .../state_management/unified_state_manager.py | 34 ++-- api/core/workflow/graph_engine/worker.py | 46 ++--- .../worker_management/simple_worker_pool.py | 62 +++---- 17 files changed, 287 insertions(+), 272 deletions(-) diff --git a/api/core/workflow/graph_engine/command_processing/command_processor.py b/api/core/workflow/graph_engine/command_processing/command_processor.py index 7051ece735..942c2d77a5 100644 --- a/api/core/workflow/graph_engine/command_processing/command_processor.py +++ b/api/core/workflow/graph_engine/command_processing/command_processor.py @@ -39,8 +39,8 @@ class CommandProcessor: command_channel: Channel for receiving commands graph_execution: Graph execution aggregate """ - self.command_channel = command_channel - self.graph_execution = graph_execution + self._command_channel = command_channel + self._graph_execution = graph_execution self._handlers: dict[type[GraphEngineCommand], CommandHandler] = {} def register_handler(self, command_type: type[GraphEngineCommand], handler: CommandHandler) -> None: @@ -56,7 +56,7 @@ class CommandProcessor: def process_commands(self) -> None: """Check for and process any pending commands.""" try: - commands = self.command_channel.fetch_commands() + commands = self._command_channel.fetch_commands() for command in commands: self._handle_command(command) except Exception as e: @@ -72,7 +72,7 @@ class CommandProcessor: handler = self._handlers.get(type(command)) if handler: try: - handler.handle(command, self.graph_execution) + handler.handle(command, self._graph_execution) except Exception: logger.exception("Error handling command %s", command.__class__.__name__) else: diff --git a/api/core/workflow/graph_engine/error_handling/abort_strategy.py b/api/core/workflow/graph_engine/error_handling/abort_strategy.py index 6a805bd124..4593f004f3 100644 --- a/api/core/workflow/graph_engine/error_handling/abort_strategy.py +++ b/api/core/workflow/graph_engine/error_handling/abort_strategy.py @@ -32,6 +32,8 @@ class AbortStrategy: Returns: None - signals abortion """ + _ = graph + _ = retry_count logger.error("Node %s failed with ABORT strategy: %s", event.node_id, event.error) # Return None to signal that execution should stop diff --git a/api/core/workflow/graph_engine/error_handling/default_value_strategy.py b/api/core/workflow/graph_engine/error_handling/default_value_strategy.py index 61d36399aa..3cdcec88e5 100644 --- a/api/core/workflow/graph_engine/error_handling/default_value_strategy.py +++ b/api/core/workflow/graph_engine/error_handling/default_value_strategy.py @@ -31,6 +31,7 @@ class DefaultValueStrategy: Returns: NodeRunExceptionEvent with default values """ + _ = retry_count node = graph.nodes[event.node_id] outputs = { diff --git a/api/core/workflow/graph_engine/error_handling/fail_branch_strategy.py b/api/core/workflow/graph_engine/error_handling/fail_branch_strategy.py index 437c2bc7da..1c156b5be1 100644 --- a/api/core/workflow/graph_engine/error_handling/fail_branch_strategy.py +++ b/api/core/workflow/graph_engine/error_handling/fail_branch_strategy.py @@ -31,6 +31,8 @@ class FailBranchStrategy: Returns: NodeRunExceptionEvent to continue via fail branch """ + _ = graph + _ = retry_count outputs = { "error_message": event.node_run_result.error, "error_type": event.node_run_result.error_type, diff --git a/api/core/workflow/graph_engine/event_management/event_collector.py b/api/core/workflow/graph_engine/event_management/event_collector.py index a41dcf5b10..683a23c928 100644 --- a/api/core/workflow/graph_engine/event_management/event_collector.py +++ b/api/core/workflow/graph_engine/event_management/event_collector.py @@ -23,7 +23,7 @@ class ReadWriteLock: def acquire_read(self) -> None: """Acquire a read lock.""" - self._read_ready.acquire() + _ = self._read_ready.acquire() try: self._readers += 1 finally: @@ -31,7 +31,7 @@ class ReadWriteLock: def release_read(self) -> None: """Release a read lock.""" - self._read_ready.acquire() + _ = self._read_ready.acquire() try: self._readers -= 1 if self._readers == 0: @@ -41,9 +41,9 @@ class ReadWriteLock: def acquire_write(self) -> None: """Acquire a write lock.""" - self._read_ready.acquire() + _ = self._read_ready.acquire() while self._readers > 0: - self._read_ready.wait() + _ = self._read_ready.wait() def release_write(self) -> None: """Release a write lock.""" diff --git a/api/core/workflow/graph_engine/event_management/event_emitter.py b/api/core/workflow/graph_engine/event_management/event_emitter.py index 6fb0b96e8c..660ab2d1ce 100644 --- a/api/core/workflow/graph_engine/event_management/event_emitter.py +++ b/api/core/workflow/graph_engine/event_management/event_emitter.py @@ -28,7 +28,7 @@ class EventEmitter: Args: event_collector: The collector to emit events from """ - self.event_collector = event_collector + self._event_collector = event_collector self._execution_complete = threading.Event() def mark_complete(self) -> None: @@ -44,9 +44,9 @@ class EventEmitter: """ yielded_count = 0 - while not self._execution_complete.is_set() or yielded_count < self.event_collector.event_count(): + while not self._execution_complete.is_set() or yielded_count < self._event_collector.event_count(): # Get new events since last yield - new_events = self.event_collector.get_new_events(yielded_count) + new_events = self._event_collector.get_new_events(yielded_count) # Yield any new events for event in new_events: diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 8ac27143e3..43ab486fe1 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -75,7 +75,7 @@ class GraphEngine: """Initialize the graph engine with separated concerns.""" # Create domain models - self.execution_context = ExecutionContext( + self._execution_context = ExecutionContext( tenant_id=tenant_id, app_id=app_id, workflow_id=workflow_id, @@ -87,13 +87,13 @@ class GraphEngine: max_execution_time=max_execution_time, ) - self.graph_execution = GraphExecution(workflow_id=workflow_id) + self._graph_execution = GraphExecution(workflow_id=workflow_id) # Store core dependencies - self.graph = graph - self.graph_config = graph_config - self.graph_runtime_state = graph_runtime_state - self.command_channel = command_channel + self._graph = graph + self._graph_config = graph_config + self._graph_runtime_state = graph_runtime_state + self._command_channel = command_channel # Store worker management parameters self._min_workers = min_workers @@ -102,8 +102,8 @@ class GraphEngine: self._scale_down_idle_time = scale_down_idle_time # Initialize queues - self.ready_queue: queue.Queue[str] = queue.Queue() - self.event_queue: queue.Queue[GraphNodeEventBase] = queue.Queue() + self._ready_queue: queue.Queue[str] = queue.Queue() + self._event_queue: queue.Queue[GraphNodeEventBase] = queue.Queue() # Initialize subsystems self._initialize_subsystems() @@ -118,55 +118,55 @@ class GraphEngine: """Initialize all subsystems with proper dependency injection.""" # Unified state management - single instance handles all state operations - self.state_manager = UnifiedStateManager(self.graph, self.ready_queue) + self._state_manager = UnifiedStateManager(self._graph, self._ready_queue) # Response coordination - self.response_coordinator = ResponseStreamCoordinator( - variable_pool=self.graph_runtime_state.variable_pool, graph=self.graph + self._response_coordinator = ResponseStreamCoordinator( + variable_pool=self._graph_runtime_state.variable_pool, graph=self._graph ) # Event management - self.event_collector = EventCollector() - self.event_emitter = EventEmitter(self.event_collector) + self._event_collector = EventCollector() + self._event_emitter = EventEmitter(self._event_collector) # Error handling - self.error_handler = ErrorHandler(self.graph, self.graph_execution) + self._error_handler = ErrorHandler(self._graph, self._graph_execution) # Graph traversal - self.node_readiness_checker = NodeReadinessChecker(self.graph) - self.edge_processor = EdgeProcessor( - graph=self.graph, - state_manager=self.state_manager, - response_coordinator=self.response_coordinator, + self._node_readiness_checker = NodeReadinessChecker(self._graph) + self._edge_processor = EdgeProcessor( + graph=self._graph, + state_manager=self._state_manager, + response_coordinator=self._response_coordinator, ) - self.skip_propagator = SkipPropagator( - graph=self.graph, - state_manager=self.state_manager, + self._skip_propagator = SkipPropagator( + graph=self._graph, + state_manager=self._state_manager, ) - self.branch_handler = BranchHandler( - graph=self.graph, - edge_processor=self.edge_processor, - skip_propagator=self.skip_propagator, - state_manager=self.state_manager, + self._branch_handler = BranchHandler( + graph=self._graph, + edge_processor=self._edge_processor, + skip_propagator=self._skip_propagator, + state_manager=self._state_manager, ) # Event handler registry with all dependencies - self.event_handler_registry = EventHandlerRegistry( - graph=self.graph, - graph_runtime_state=self.graph_runtime_state, - graph_execution=self.graph_execution, - response_coordinator=self.response_coordinator, - event_collector=self.event_collector, - branch_handler=self.branch_handler, - edge_processor=self.edge_processor, - state_manager=self.state_manager, - error_handler=self.error_handler, + self._event_handler_registry = EventHandlerRegistry( + graph=self._graph, + graph_runtime_state=self._graph_runtime_state, + graph_execution=self._graph_execution, + response_coordinator=self._response_coordinator, + event_collector=self._event_collector, + branch_handler=self._branch_handler, + edge_processor=self._edge_processor, + state_manager=self._state_manager, + error_handler=self._error_handler, ) # Command processing - self.command_processor = CommandProcessor( - command_channel=self.command_channel, - graph_execution=self.graph_execution, + self._command_processor = CommandProcessor( + command_channel=self._command_channel, + graph_execution=self._graph_execution, ) self._setup_command_handlers() @@ -174,29 +174,29 @@ class GraphEngine: self._setup_worker_management() # Orchestration - self.execution_coordinator = ExecutionCoordinator( - graph_execution=self.graph_execution, - state_manager=self.state_manager, - event_handler=self.event_handler_registry, - event_collector=self.event_collector, - command_processor=self.command_processor, + self._execution_coordinator = ExecutionCoordinator( + graph_execution=self._graph_execution, + state_manager=self._state_manager, + event_handler=self._event_handler_registry, + event_collector=self._event_collector, + command_processor=self._command_processor, worker_pool=self._worker_pool, ) - self.dispatcher = Dispatcher( - event_queue=self.event_queue, - event_handler=self.event_handler_registry, - event_collector=self.event_collector, - execution_coordinator=self.execution_coordinator, - max_execution_time=self.execution_context.max_execution_time, - event_emitter=self.event_emitter, + self._dispatcher = Dispatcher( + event_queue=self._event_queue, + event_handler=self._event_handler_registry, + event_collector=self._event_collector, + execution_coordinator=self._execution_coordinator, + max_execution_time=self._execution_context.max_execution_time, + event_emitter=self._event_emitter, ) def _setup_command_handlers(self) -> None: """Configure command handlers.""" # Create handler instance that follows the protocol abort_handler = AbortCommandHandler() - self.command_processor.register_handler( + self._command_processor.register_handler( AbortCommand, abort_handler, ) @@ -216,9 +216,9 @@ class GraphEngine: # Create simple worker pool self._worker_pool = SimpleWorkerPool( - ready_queue=self.ready_queue, - event_queue=self.event_queue, - graph=self.graph, + ready_queue=self._ready_queue, + event_queue=self._event_queue, + graph=self._graph, flask_app=flask_app, context_vars=context_vars, min_workers=self._min_workers, @@ -229,8 +229,8 @@ class GraphEngine: def _validate_graph_state_consistency(self) -> None: """Validate that all nodes share the same GraphRuntimeState.""" - expected_state_id = id(self.graph_runtime_state) - for node in self.graph.nodes.values(): + expected_state_id = id(self._graph_runtime_state) + for node in self._graph.nodes.values(): if id(node.graph_runtime_state) != expected_state_id: raise ValueError(f"GraphRuntimeState consistency violation: Node '{node.id}' has a different instance") @@ -251,7 +251,7 @@ class GraphEngine: self._initialize_layers() # Start execution - self.graph_execution.start() + self._graph_execution.start() start_event = GraphRunStartedEvent() yield start_event @@ -259,23 +259,23 @@ class GraphEngine: self._start_execution() # Yield events as they occur - yield from self.event_emitter.emit_events() + yield from self._event_emitter.emit_events() # Handle completion - if self.graph_execution.aborted: + if self._graph_execution.aborted: abort_reason = "Workflow execution aborted by user command" - if self.graph_execution.error: - abort_reason = str(self.graph_execution.error) + if self._graph_execution.error: + abort_reason = str(self._graph_execution.error) yield GraphRunAbortedEvent( reason=abort_reason, - outputs=self.graph_runtime_state.outputs, + outputs=self._graph_runtime_state.outputs, ) - elif self.graph_execution.has_error: - if self.graph_execution.error: - raise self.graph_execution.error + elif self._graph_execution.has_error: + if self._graph_execution.error: + raise self._graph_execution.error else: yield GraphRunSucceededEvent( - outputs=self.graph_runtime_state.outputs, + outputs=self._graph_runtime_state.outputs, ) except Exception as e: @@ -287,10 +287,10 @@ class GraphEngine: def _initialize_layers(self) -> None: """Initialize layers with context.""" - self.event_collector.set_layers(self._layers) + self._event_collector.set_layers(self._layers) for layer in self._layers: try: - layer.initialize(self.graph_runtime_state, self.command_channel) + layer.initialize(self._graph_runtime_state, self._command_channel) except Exception as e: logger.warning("Failed to initialize layer %s: %s", layer.__class__.__name__, e) @@ -305,21 +305,21 @@ class GraphEngine: self._worker_pool.start() # Register response nodes - for node in self.graph.nodes.values(): + for node in self._graph.nodes.values(): if node.execution_type == NodeExecutionType.RESPONSE: - self.response_coordinator.register(node.id) + self._response_coordinator.register(node.id) # Enqueue root node - root_node = self.graph.root_node - self.state_manager.enqueue_node(root_node.id) - self.state_manager.start_execution(root_node.id) + root_node = self._graph.root_node + self._state_manager.enqueue_node(root_node.id) + self._state_manager.start_execution(root_node.id) # Start dispatcher - self.dispatcher.start() + self._dispatcher.start() def _stop_execution(self) -> None: """Stop execution subsystems.""" - self.dispatcher.stop() + self._dispatcher.stop() self._worker_pool.stop() # Don't mark complete here as the dispatcher already does it @@ -328,6 +328,17 @@ class GraphEngine: for layer in self._layers: try: - layer.on_graph_end(self.graph_execution.error) + layer.on_graph_end(self._graph_execution.error) except Exception as e: logger.warning("Layer %s failed on_graph_end: %s", layer.__class__.__name__, e) + + # Public property accessors for attributes that need external access + @property + def graph_runtime_state(self) -> GraphRuntimeState: + """Get the graph runtime state.""" + return self._graph_runtime_state + + @property + def graph(self) -> Graph: + """Get the graph.""" + return self._graph diff --git a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py index cf4d9db5f8..3d646a8719 100644 --- a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py +++ b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py @@ -38,10 +38,10 @@ class BranchHandler: skip_propagator: Propagator for skip states state_manager: Unified state manager """ - self.graph = graph - self.edge_processor = edge_processor - self.skip_propagator = skip_propagator - self.state_manager = state_manager + self._graph = graph + self._edge_processor = edge_processor + self._skip_propagator = skip_propagator + self._state_manager = state_manager def handle_branch_completion( self, node_id: str, selected_handle: str | None @@ -63,13 +63,13 @@ class BranchHandler: raise ValueError(f"Branch node {node_id} completed without selecting a branch") # Categorize edges into selected and unselected - _, unselected_edges = self.state_manager.categorize_branch_edges(node_id, selected_handle) + _, unselected_edges = self._state_manager.categorize_branch_edges(node_id, selected_handle) # Skip all unselected paths - self.skip_propagator.skip_branch_paths(unselected_edges) + self._skip_propagator.skip_branch_paths(unselected_edges) # Process selected edges and get ready nodes and streaming events - return self.edge_processor.process_node_success(node_id, selected_handle) + return self._edge_processor.process_node_success(node_id, selected_handle) def validate_branch_selection(self, node_id: str, selected_handle: str) -> bool: """ @@ -82,6 +82,6 @@ class BranchHandler: Returns: True if the selection is valid """ - outgoing_edges = self.graph.get_outgoing_edges(node_id) + outgoing_edges = self._graph.get_outgoing_edges(node_id) valid_handles = {edge.source_handle for edge in outgoing_edges} return selected_handle in valid_handles diff --git a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py index 369257aa45..ebfcd7d9ee 100644 --- a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py +++ b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py @@ -36,9 +36,9 @@ class EdgeProcessor: state_manager: Unified state manager response_coordinator: Response stream coordinator """ - self.graph = graph - self.state_manager = state_manager - self.response_coordinator = response_coordinator + self._graph = graph + self._state_manager = state_manager + self._response_coordinator = response_coordinator def process_node_success( self, node_id: str, selected_handle: str | None = None @@ -53,7 +53,7 @@ class EdgeProcessor: Returns: Tuple of (list of downstream node IDs that are now ready, list of streaming events) """ - node = self.graph.nodes[node_id] + node = self._graph.nodes[node_id] if node.execution_type == NodeExecutionType.BRANCH: return self._process_branch_node_edges(node_id, selected_handle) @@ -72,7 +72,7 @@ class EdgeProcessor: """ ready_nodes: list[str] = [] all_streaming_events: list[NodeRunStreamChunkEvent] = [] - outgoing_edges = self.graph.get_outgoing_edges(node_id) + outgoing_edges = self._graph.get_outgoing_edges(node_id) for edge in outgoing_edges: nodes, events = self._process_taken_edge(edge) @@ -104,7 +104,7 @@ class EdgeProcessor: all_streaming_events: list[NodeRunStreamChunkEvent] = [] # Categorize edges - selected_edges, unselected_edges = self.state_manager.categorize_branch_edges(node_id, selected_handle) + selected_edges, unselected_edges = self._state_manager.categorize_branch_edges(node_id, selected_handle) # Process unselected edges first (mark as skipped) for edge in unselected_edges: @@ -129,14 +129,14 @@ class EdgeProcessor: Tuple of (list containing downstream node ID if it's ready, list of streaming events) """ # Mark edge as taken - self.state_manager.mark_edge_taken(edge.id) + self._state_manager.mark_edge_taken(edge.id) # Notify response coordinator and get streaming events - streaming_events = self.response_coordinator.on_edge_taken(edge.id) + streaming_events = self._response_coordinator.on_edge_taken(edge.id) # Check if downstream node is ready ready_nodes: list[str] = [] - if self.state_manager.is_node_ready(edge.head): + if self._state_manager.is_node_ready(edge.head): ready_nodes.append(edge.head) return ready_nodes, streaming_events @@ -148,4 +148,4 @@ class EdgeProcessor: Args: edge: The edge to skip """ - self.state_manager.mark_edge_skipped(edge.id) + self._state_manager.mark_edge_skipped(edge.id) diff --git a/api/core/workflow/graph_engine/graph_traversal/node_readiness.py b/api/core/workflow/graph_engine/graph_traversal/node_readiness.py index 59bce3942c..d1e3e434a6 100644 --- a/api/core/workflow/graph_engine/graph_traversal/node_readiness.py +++ b/api/core/workflow/graph_engine/graph_traversal/node_readiness.py @@ -24,7 +24,7 @@ class NodeReadinessChecker: Args: graph: The workflow graph """ - self.graph = graph + self._graph = graph def is_node_ready(self, node_id: str) -> bool: """ @@ -40,7 +40,7 @@ class NodeReadinessChecker: Returns: True if the node is ready for execution """ - incoming_edges = self.graph.get_incoming_edges(node_id) + incoming_edges = self._graph.get_incoming_edges(node_id) # No dependencies means always ready if not incoming_edges: @@ -75,7 +75,7 @@ class NodeReadinessChecker: List of node IDs that are now ready """ ready_nodes: list[str] = [] - outgoing_edges = self.graph.get_outgoing_edges(from_node_id) + outgoing_edges = self._graph.get_outgoing_edges(from_node_id) for edge in outgoing_edges: if edge.state == NodeState.TAKEN: diff --git a/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py b/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py index 17f30a6a38..51ab3c6739 100644 --- a/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py +++ b/api/core/workflow/graph_engine/graph_traversal/skip_propagator.py @@ -31,8 +31,8 @@ class SkipPropagator: graph: The workflow graph state_manager: Unified state manager """ - self.graph = graph - self.state_manager = state_manager + self._graph = graph + self._state_manager = state_manager def propagate_skip_from_edge(self, edge_id: str) -> None: """ @@ -46,11 +46,11 @@ class SkipPropagator: Args: edge_id: The ID of the skipped edge to start from """ - downstream_node_id = self.graph.edges[edge_id].head - incoming_edges = self.graph.get_incoming_edges(downstream_node_id) + downstream_node_id = self._graph.edges[edge_id].head + incoming_edges = self._graph.get_incoming_edges(downstream_node_id) # Analyze edge states - edge_states = self.state_manager.analyze_edge_states(incoming_edges) + edge_states = self._state_manager.analyze_edge_states(incoming_edges) # Stop if there are unknown edges (not yet processed) if edge_states["has_unknown"]: @@ -59,7 +59,7 @@ class SkipPropagator: # If any edge is taken, node may still execute if edge_states["has_taken"]: # Enqueue node - self.state_manager.enqueue_node(downstream_node_id) + self._state_manager.enqueue_node(downstream_node_id) return # All edges are skipped, propagate skip to this node @@ -74,12 +74,12 @@ class SkipPropagator: node_id: The ID of the node to skip """ # Mark node as skipped - self.state_manager.mark_node_skipped(node_id) + self._state_manager.mark_node_skipped(node_id) # Mark all outgoing edges as skipped and propagate - outgoing_edges = self.graph.get_outgoing_edges(node_id) + outgoing_edges = self._graph.get_outgoing_edges(node_id) for edge in outgoing_edges: - self.state_manager.mark_edge_skipped(edge.id) + self._state_manager.mark_edge_skipped(edge.id) # Recursively propagate skip self.propagate_skip_from_edge(edge.id) @@ -91,5 +91,5 @@ class SkipPropagator: unselected_edges: List of edges not taken by the branch """ for edge in unselected_edges: - self.state_manager.mark_edge_skipped(edge.id) + self._state_manager.mark_edge_skipped(edge.id) self.propagate_skip_from_edge(edge.id) diff --git a/api/core/workflow/graph_engine/orchestration/dispatcher.py b/api/core/workflow/graph_engine/orchestration/dispatcher.py index 694355298c..5ae1c3bbbe 100644 --- a/api/core/workflow/graph_engine/orchestration/dispatcher.py +++ b/api/core/workflow/graph_engine/orchestration/dispatcher.py @@ -48,12 +48,12 @@ class Dispatcher: max_execution_time: Maximum execution time in seconds event_emitter: Optional event emitter to signal completion """ - self.event_queue = event_queue - self.event_handler = event_handler - self.event_collector = event_collector - self.execution_coordinator = execution_coordinator - self.max_execution_time = max_execution_time - self.event_emitter = event_emitter + self._event_queue = event_queue + self._event_handler = event_handler + self._event_collector = event_collector + self._execution_coordinator = execution_coordinator + self._max_execution_time = max_execution_time + self._event_emitter = event_emitter self._thread: threading.Thread | None = None self._stop_event = threading.Event() @@ -80,28 +80,28 @@ class Dispatcher: try: while not self._stop_event.is_set(): # Check for commands - self.execution_coordinator.check_commands() + self._execution_coordinator.check_commands() # Check for scaling - self.execution_coordinator.check_scaling() + self._execution_coordinator.check_scaling() # Process events try: - event = self.event_queue.get(timeout=0.1) + event = self._event_queue.get(timeout=0.1) # Route to the event handler - self.event_handler.handle_event(event) - self.event_queue.task_done() + self._event_handler.handle_event(event) + self._event_queue.task_done() except queue.Empty: # Check if execution is complete - if self.execution_coordinator.is_execution_complete(): + if self._execution_coordinator.is_execution_complete(): break except Exception as e: logger.exception("Dispatcher error") - self.execution_coordinator.mark_failed(e) + self._execution_coordinator.mark_failed(e) finally: - self.execution_coordinator.mark_complete() + self._execution_coordinator.mark_complete() # Signal the event emitter that execution is complete - if self.event_emitter: - self.event_emitter.mark_complete() + if self._event_emitter: + self._event_emitter.mark_complete() diff --git a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py index 95902f1846..63e512f7b3 100644 --- a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py +++ b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py @@ -43,20 +43,20 @@ class ExecutionCoordinator: command_processor: Processor for commands worker_pool: Pool of workers """ - self.graph_execution = graph_execution - self.state_manager = state_manager - self.event_handler = event_handler - self.event_collector = event_collector - self.command_processor = command_processor - self.worker_pool = worker_pool + self._graph_execution = graph_execution + self._state_manager = state_manager + self._event_handler = event_handler + self._event_collector = event_collector + self._command_processor = command_processor + self._worker_pool = worker_pool def check_commands(self) -> None: """Process any pending commands.""" - self.command_processor.process_commands() + self._command_processor.process_commands() def check_scaling(self) -> None: """Check and perform worker scaling if needed.""" - self.worker_pool.check_and_scale() + self._worker_pool.check_and_scale() def is_execution_complete(self) -> bool: """ @@ -66,16 +66,16 @@ class ExecutionCoordinator: True if execution is complete """ # Check if aborted or failed - if self.graph_execution.aborted or self.graph_execution.has_error: + if self._graph_execution.aborted or self._graph_execution.has_error: return True # Complete if no work remains - return self.state_manager.is_execution_complete() + return self._state_manager.is_execution_complete() def mark_complete(self) -> None: """Mark execution as complete.""" - if not self.graph_execution.completed: - self.graph_execution.complete() + if not self._graph_execution.completed: + self._graph_execution.complete() def mark_failed(self, error: Exception) -> None: """ @@ -84,4 +84,4 @@ class ExecutionCoordinator: Args: error: The error that caused failure """ - self.graph_execution.fail(error) + self._graph_execution.fail(error) diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index a7b77bdf4a..b5224cbc22 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -44,11 +44,11 @@ class ResponseStreamCoordinator: variable_pool: VariablePool instance for accessing node variables graph: Graph instance for looking up node information """ - self.variable_pool = variable_pool - self.graph = graph - self.active_session: ResponseSession | None = None - self.waiting_sessions: deque[ResponseSession] = deque() - self.lock = RLock() + self._variable_pool = variable_pool + self._graph = graph + self._active_session: ResponseSession | None = None + self._waiting_sessions: deque[ResponseSession] = deque() + self._lock = RLock() # Internal stream management (replacing OutputRegistry) self._stream_buffers: dict[tuple[str, ...], list[NodeRunStreamChunkEvent]] = {} @@ -68,7 +68,7 @@ class ResponseStreamCoordinator: self._response_sessions: dict[NodeID, ResponseSession] = {} # node_id -> session def register(self, response_node_id: NodeID) -> None: - with self.lock: + with self._lock: self._response_nodes.add(response_node_id) # Build and save paths map for this response node @@ -76,7 +76,7 @@ class ResponseStreamCoordinator: self._paths_maps[response_node_id] = paths_map # Create and store response session for this node - response_node = self.graph.nodes[response_node_id] + response_node = self._graph.nodes[response_node_id] session = ResponseSession.from_node(response_node) self._response_sessions[response_node_id] = session @@ -87,7 +87,7 @@ class ResponseStreamCoordinator: node_id: The ID of the node execution_id: The execution ID from NodeRunStartedEvent """ - with self.lock: + with self._lock: self._node_execution_ids[node_id] = execution_id def _get_or_create_execution_id(self, node_id: NodeID) -> str: @@ -99,7 +99,7 @@ class ResponseStreamCoordinator: Returns: The execution ID for the node """ - with self.lock: + with self._lock: if node_id not in self._node_execution_ids: self._node_execution_ids[node_id] = str(uuid4()) return self._node_execution_ids[node_id] @@ -116,14 +116,14 @@ class ResponseStreamCoordinator: List of Path objects, where each path contains branch edge IDs """ # Get root node ID - root_node_id = self.graph.root_node.id + root_node_id = self._graph.root_node.id # If root is the response node, return empty path if root_node_id == response_node_id: return [Path()] # Extract variable selectors from the response node's template - response_node = self.graph.nodes[response_node_id] + response_node = self._graph.nodes[response_node_id] response_session = ResponseSession.from_node(response_node) template = response_session.template @@ -149,7 +149,7 @@ class ResponseStreamCoordinator: visited.add(current_node_id) # Explore outgoing edges - outgoing_edges = self.graph.get_outgoing_edges(current_node_id) + outgoing_edges = self._graph.get_outgoing_edges(current_node_id) for edge in outgoing_edges: edge_id = edge.id next_node_id = edge.head @@ -168,8 +168,8 @@ class ResponseStreamCoordinator: for path in all_complete_paths: blocking_edges: list[str] = [] for edge_id in path: - edge = self.graph.edges[edge_id] - source_node = self.graph.nodes[edge.tail] + edge = self._graph.edges[edge_id] + source_node = self._graph.nodes[edge.tail] # Check if node is a branch/container (original behavior) if source_node.execution_type in { @@ -199,7 +199,7 @@ class ResponseStreamCoordinator: """ events: list[NodeRunStreamChunkEvent] = [] - with self.lock: + with self._lock: # Check each response node in order for response_node_id in self._response_nodes: if response_node_id not in self._paths_maps: @@ -245,21 +245,21 @@ class ResponseStreamCoordinator: # Remove from map to ensure it won't be activated again del self._response_sessions[node_id] - if self.active_session is None: - self.active_session = session + if self._active_session is None: + self._active_session = session # Try to flush immediately events.extend(self.try_flush()) else: # Queue the session if another is active - self.waiting_sessions.append(session) + self._waiting_sessions.append(session) return events def intercept_event( self, event: NodeRunStreamChunkEvent | NodeRunSucceededEvent ) -> Sequence[NodeRunStreamChunkEvent]: - with self.lock: + with self._lock: if isinstance(event, NodeRunStreamChunkEvent): self._append_stream_chunk(event.selector, event) if event.is_final: @@ -269,9 +269,8 @@ class ResponseStreamCoordinator: # Skip cause we share the same variable pool. # # for variable_name, variable_value in event.node_run_result.outputs.items(): - # self.variable_pool.add((event.node_id, variable_name), variable_value) + # self._variable_pool.add((event.node_id, variable_name), variable_value) return self.try_flush() - return [] def _create_stream_chunk_event( self, @@ -287,9 +286,9 @@ class ResponseStreamCoordinator: active response node's information since these are not actual node IDs. """ # Check if this is a special selector that doesn't correspond to a node - if selector and selector[0] not in self.graph.nodes and self.active_session: + if selector and selector[0] not in self._graph.nodes and self._active_session: # Use the active response node for special selectors - response_node = self.graph.nodes[self.active_session.node_id] + response_node = self._graph.nodes[self._active_session.node_id] return NodeRunStreamChunkEvent( id=execution_id, node_id=response_node.id, @@ -300,7 +299,7 @@ class ResponseStreamCoordinator: ) # Standard case: selector refers to an actual node - node = self.graph.nodes[node_id] + node = self._graph.nodes[node_id] return NodeRunStreamChunkEvent( id=execution_id, node_id=node.id, @@ -323,9 +322,9 @@ class ResponseStreamCoordinator: # Determine which node to attribute the output to # For special selectors (sys, env, conversation), use the active response node # For regular selectors, use the source node - if self.active_session and source_selector_prefix not in self.graph.nodes: + if self._active_session and source_selector_prefix not in self._graph.nodes: # Special selector - use active response node - output_node_id = self.active_session.node_id + output_node_id = self._active_session.node_id else: # Regular node selector output_node_id = source_selector_prefix @@ -336,8 +335,8 @@ class ResponseStreamCoordinator: if event := self._pop_stream_chunk(segment.selector): # For special selectors, we need to update the event to use # the active response node's information - if self.active_session and source_selector_prefix not in self.graph.nodes: - response_node = self.graph.nodes[self.active_session.node_id] + if self._active_session and source_selector_prefix not in self._graph.nodes: + response_node = self._graph.nodes[self._active_session.node_id] # Create a new event with the response node's information # but keep the original selector updated_event = NodeRunStreamChunkEvent( @@ -359,10 +358,10 @@ class ResponseStreamCoordinator: if stream_closed: is_complete = True - elif value := self.variable_pool.get(segment.selector): + elif value := self._variable_pool.get(segment.selector): # Process scalar value is_last_segment = bool( - self.active_session and self.active_session.index == len(self.active_session.template.segments) - 1 + self._active_session and self._active_session.index == len(self._active_session.template.segments) - 1 ) events.append( self._create_stream_chunk_event( @@ -379,13 +378,13 @@ class ResponseStreamCoordinator: def _process_text_segment(self, segment: TextSegment) -> Sequence[NodeRunStreamChunkEvent]: """Process a text segment. Returns (events, is_complete).""" - assert self.active_session is not None - current_response_node = self.graph.nodes[self.active_session.node_id] + assert self._active_session is not None + current_response_node = self._graph.nodes[self._active_session.node_id] # Use get_or_create_execution_id to ensure we have a consistent ID execution_id = self._get_or_create_execution_id(current_response_node.id) - is_last_segment = self.active_session.index == len(self.active_session.template.segments) - 1 + is_last_segment = self._active_session.index == len(self._active_session.template.segments) - 1 event = self._create_stream_chunk_event( node_id=current_response_node.id, execution_id=execution_id, @@ -396,29 +395,29 @@ class ResponseStreamCoordinator: return [event] def try_flush(self) -> list[NodeRunStreamChunkEvent]: - with self.lock: - if not self.active_session: + with self._lock: + if not self._active_session: return [] - template = self.active_session.template - response_node_id = self.active_session.node_id + template = self._active_session.template + response_node_id = self._active_session.node_id events: list[NodeRunStreamChunkEvent] = [] # Process segments sequentially from current index - while self.active_session.index < len(template.segments): - segment = template.segments[self.active_session.index] + while self._active_session.index < len(template.segments): + segment = template.segments[self._active_session.index] if isinstance(segment, VariableSegment): # Check if the source node for this variable is skipped # Only check for actual nodes, not special selectors (sys, env, conversation) source_selector_prefix = segment.selector[0] if segment.selector else "" - if source_selector_prefix in self.graph.nodes: - source_node = self.graph.nodes[source_selector_prefix] + if source_selector_prefix in self._graph.nodes: + source_node = self._graph.nodes[source_selector_prefix] if source_node.state == NodeState.SKIPPED: # Skip this variable segment if the source node is skipped - self.active_session.index += 1 + self._active_session.index += 1 continue segment_events, is_complete = self._process_variable_segment(segment) @@ -426,7 +425,7 @@ class ResponseStreamCoordinator: # Only advance index if this variable segment is complete if is_complete: - self.active_session.index += 1 + self._active_session.index += 1 else: # Wait for more data break @@ -434,9 +433,9 @@ class ResponseStreamCoordinator: else: segment_events = self._process_text_segment(segment) events.extend(segment_events) - self.active_session.index += 1 + self._active_session.index += 1 - if self.active_session.is_complete(): + if self._active_session.is_complete(): # End current session and get events from starting next session next_session_events = self.end_session(response_node_id) events.extend(next_session_events) @@ -454,16 +453,16 @@ class ResponseStreamCoordinator: Returns: List of events from starting the next session """ - with self.lock: + with self._lock: events: list[NodeRunStreamChunkEvent] = [] - if self.active_session and self.active_session.node_id == node_id: - self.active_session = None + if self._active_session and self._active_session.node_id == node_id: + self._active_session = None # Try to start next waiting session - if self.waiting_sessions: - next_session = self.waiting_sessions.popleft() - self.active_session = next_session + if self._waiting_sessions: + next_session = self._waiting_sessions.popleft() + self._active_session = next_session # Immediately try to flush any available segments events = self.try_flush() diff --git a/api/core/workflow/graph_engine/state_management/unified_state_manager.py b/api/core/workflow/graph_engine/state_management/unified_state_manager.py index 0d4e5a4d43..258b84c341 100644 --- a/api/core/workflow/graph_engine/state_management/unified_state_manager.py +++ b/api/core/workflow/graph_engine/state_management/unified_state_manager.py @@ -46,8 +46,8 @@ class UnifiedStateManager: graph: The workflow graph ready_queue: Queue for nodes ready to execute """ - self.graph = graph - self.ready_queue = ready_queue + self._graph = graph + self._ready_queue = ready_queue self._lock = threading.RLock() # Execution tracking state @@ -66,8 +66,8 @@ class UnifiedStateManager: node_id: The ID of the node to enqueue """ with self._lock: - self.graph.nodes[node_id].state = NodeState.TAKEN - self.ready_queue.put(node_id) + self._graph.nodes[node_id].state = NodeState.TAKEN + self._ready_queue.put(node_id) def mark_node_skipped(self, node_id: str) -> None: """ @@ -77,7 +77,7 @@ class UnifiedStateManager: node_id: The ID of the node to skip """ with self._lock: - self.graph.nodes[node_id].state = NodeState.SKIPPED + self._graph.nodes[node_id].state = NodeState.SKIPPED def is_node_ready(self, node_id: str) -> bool: """ @@ -94,7 +94,7 @@ class UnifiedStateManager: """ with self._lock: # Get all incoming edges to this node - incoming_edges = self.graph.get_incoming_edges(node_id) + incoming_edges = self._graph.get_incoming_edges(node_id) # If no incoming edges, node is always ready if not incoming_edges: @@ -118,7 +118,7 @@ class UnifiedStateManager: The current node state """ with self._lock: - return self.graph.nodes[node_id].state + return self._graph.nodes[node_id].state # ============= Edge State Operations ============= @@ -130,7 +130,7 @@ class UnifiedStateManager: edge_id: The ID of the edge to mark """ with self._lock: - self.graph.edges[edge_id].state = NodeState.TAKEN + self._graph.edges[edge_id].state = NodeState.TAKEN def mark_edge_skipped(self, edge_id: str) -> None: """ @@ -140,7 +140,7 @@ class UnifiedStateManager: edge_id: The ID of the edge to mark """ with self._lock: - self.graph.edges[edge_id].state = NodeState.SKIPPED + self._graph.edges[edge_id].state = NodeState.SKIPPED def analyze_edge_states(self, edges: list[Edge]) -> EdgeStateAnalysis: """ @@ -172,7 +172,7 @@ class UnifiedStateManager: The current edge state """ with self._lock: - return self.graph.edges[edge_id].state + return self._graph.edges[edge_id].state def categorize_branch_edges(self, node_id: str, selected_handle: str) -> tuple[Sequence[Edge], Sequence[Edge]]: """ @@ -186,7 +186,7 @@ class UnifiedStateManager: A tuple of (selected_edges, unselected_edges) """ with self._lock: - outgoing_edges = self.graph.get_outgoing_edges(node_id) + outgoing_edges = self._graph.get_outgoing_edges(node_id) selected_edges: list[Edge] = [] unselected_edges: list[Edge] = [] @@ -272,7 +272,7 @@ class UnifiedStateManager: True if execution is complete """ with self._lock: - return self.ready_queue.empty() and len(self._executing_nodes) == 0 + return self._ready_queue.empty() and len(self._executing_nodes) == 0 def get_queue_depth(self) -> int: """ @@ -281,7 +281,7 @@ class UnifiedStateManager: Returns: Number of nodes in the ready queue """ - return self.ready_queue.qsize() + return self._ready_queue.qsize() def get_execution_stats(self) -> dict[str, int]: """ @@ -291,12 +291,12 @@ class UnifiedStateManager: Dictionary with execution statistics """ with self._lock: - taken_nodes = sum(1 for node in self.graph.nodes.values() if node.state == NodeState.TAKEN) - skipped_nodes = sum(1 for node in self.graph.nodes.values() if node.state == NodeState.SKIPPED) - unknown_nodes = sum(1 for node in self.graph.nodes.values() if node.state == NodeState.UNKNOWN) + taken_nodes = sum(1 for node in self._graph.nodes.values() if node.state == NodeState.TAKEN) + skipped_nodes = sum(1 for node in self._graph.nodes.values() if node.state == NodeState.SKIPPED) + unknown_nodes = sum(1 for node in self._graph.nodes.values() if node.state == NodeState.UNKNOWN) return { - "queue_depth": self.ready_queue.qsize(), + "queue_depth": self._ready_queue.qsize(), "executing": len(self._executing_nodes), "taken_nodes": taken_nodes, "skipped_nodes": skipped_nodes, diff --git a/api/core/workflow/graph_engine/worker.py b/api/core/workflow/graph_engine/worker.py index 1fb0824e63..df2fbf486e 100644 --- a/api/core/workflow/graph_engine/worker.py +++ b/api/core/workflow/graph_engine/worker.py @@ -59,16 +59,16 @@ class Worker(threading.Thread): on_active_callback: Optional callback when worker becomes active """ super().__init__(name=f"GraphWorker-{worker_id}", daemon=True) - self.ready_queue = ready_queue - self.event_queue = event_queue - self.graph = graph - self.worker_id = worker_id - self.flask_app = flask_app - self.context_vars = context_vars + self._ready_queue = ready_queue + self._event_queue = event_queue + self._graph = graph + self._worker_id = worker_id + self._flask_app = flask_app + self._context_vars = context_vars self._stop_event = threading.Event() - self.on_idle_callback = on_idle_callback - self.on_active_callback = on_active_callback - self.last_task_time = time.time() + self._on_idle_callback = on_idle_callback + self._on_active_callback = on_active_callback + self._last_task_time = time.time() def stop(self) -> None: """Signal the worker to stop processing.""" @@ -85,22 +85,22 @@ class Worker(threading.Thread): while not self._stop_event.is_set(): # Try to get a node ID from the ready queue (with timeout) try: - node_id = self.ready_queue.get(timeout=0.1) + node_id = self._ready_queue.get(timeout=0.1) except queue.Empty: # Notify that worker is idle - if self.on_idle_callback: - self.on_idle_callback(self.worker_id) + if self._on_idle_callback: + self._on_idle_callback(self._worker_id) continue # Notify that worker is active - if self.on_active_callback: - self.on_active_callback(self.worker_id) + if self._on_active_callback: + self._on_active_callback(self._worker_id) - self.last_task_time = time.time() - node = self.graph.nodes[node_id] + self._last_task_time = time.time() + node = self._graph.nodes[node_id] try: self._execute_node(node) - self.ready_queue.task_done() + self._ready_queue.task_done() except Exception as e: error_event = NodeRunFailedEvent( id=str(uuid4()), @@ -110,7 +110,7 @@ class Worker(threading.Thread): error=str(e), start_at=datetime.now(), ) - self.event_queue.put(error_event) + self._event_queue.put(error_event) def _execute_node(self, node: Node) -> None: """ @@ -120,19 +120,19 @@ class Worker(threading.Thread): node: The node instance to execute """ # Execute the node with preserved context if Flask app is provided - if self.flask_app and self.context_vars: + if self._flask_app and self._context_vars: with preserve_flask_contexts( - flask_app=self.flask_app, - context_vars=self.context_vars, + flask_app=self._flask_app, + context_vars=self._context_vars, ): # Execute the node node_events = node.run() for event in node_events: # Forward event to dispatcher immediately for streaming - self.event_queue.put(event) + self._event_queue.put(event) else: # Execute without context preservation node_events = node.run() for event in node_events: # Forward event to dispatcher immediately for streaming - self.event_queue.put(event) + self._event_queue.put(event) diff --git a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py b/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py index c07ea1e4dd..94b8ff3ca2 100644 --- a/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py +++ b/api/core/workflow/graph_engine/worker_management/simple_worker_pool.py @@ -56,20 +56,20 @@ class SimpleWorkerPool: scale_up_threshold: Queue depth to trigger scale up scale_down_idle_time: Seconds before scaling down idle workers """ - self.ready_queue = ready_queue - self.event_queue = event_queue - self.graph = graph - self.flask_app = flask_app - self.context_vars = context_vars + self._ready_queue = ready_queue + self._event_queue = event_queue + self._graph = graph + self._flask_app = flask_app + self._context_vars = context_vars # Scaling parameters with defaults - self.min_workers = min_workers or dify_config.GRAPH_ENGINE_MIN_WORKERS - self.max_workers = max_workers or dify_config.GRAPH_ENGINE_MAX_WORKERS - self.scale_up_threshold = scale_up_threshold or dify_config.GRAPH_ENGINE_SCALE_UP_THRESHOLD - self.scale_down_idle_time = scale_down_idle_time or dify_config.GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME + self._min_workers = min_workers or dify_config.GRAPH_ENGINE_MIN_WORKERS + self._max_workers = max_workers or dify_config.GRAPH_ENGINE_MAX_WORKERS + self._scale_up_threshold = scale_up_threshold or dify_config.GRAPH_ENGINE_SCALE_UP_THRESHOLD + self._scale_down_idle_time = scale_down_idle_time or dify_config.GRAPH_ENGINE_SCALE_DOWN_IDLE_TIME # Worker management - self.workers: list[Worker] = [] + self._workers: list[Worker] = [] self._worker_counter = 0 self._lock = threading.RLock() self._running = False @@ -89,13 +89,13 @@ class SimpleWorkerPool: # Calculate initial worker count if initial_count is None: - node_count = len(self.graph.nodes) + node_count = len(self._graph.nodes) if node_count < 10: - initial_count = self.min_workers + initial_count = self._min_workers elif node_count < 50: - initial_count = min(self.min_workers + 1, self.max_workers) + initial_count = min(self._min_workers + 1, self._max_workers) else: - initial_count = min(self.min_workers + 2, self.max_workers) + initial_count = min(self._min_workers + 2, self._max_workers) # Create initial workers for _ in range(initial_count): @@ -107,15 +107,15 @@ class SimpleWorkerPool: self._running = False # Stop all workers - for worker in self.workers: + for worker in self._workers: worker.stop() # Wait for workers to finish - for worker in self.workers: + for worker in self._workers: if worker.is_alive(): worker.join(timeout=10.0) - self.workers.clear() + self._workers.clear() def _create_worker(self) -> None: """Create and start a new worker.""" @@ -123,16 +123,16 @@ class SimpleWorkerPool: self._worker_counter += 1 worker = Worker( - ready_queue=self.ready_queue, - event_queue=self.event_queue, - graph=self.graph, + ready_queue=self._ready_queue, + event_queue=self._event_queue, + graph=self._graph, worker_id=worker_id, - flask_app=self.flask_app, - context_vars=self.context_vars, + flask_app=self._flask_app, + context_vars=self._context_vars, ) worker.start() - self.workers.append(worker) + self._workers.append(worker) def check_and_scale(self) -> None: """Check and perform scaling if needed.""" @@ -140,17 +140,17 @@ class SimpleWorkerPool: if not self._running: return - current_count = len(self.workers) - queue_depth = self.ready_queue.qsize() + current_count = len(self._workers) + queue_depth = self._ready_queue.qsize() # Simple scaling logic - if queue_depth > self.scale_up_threshold and current_count < self.max_workers: + if queue_depth > self._scale_up_threshold and current_count < self._max_workers: self._create_worker() def get_worker_count(self) -> int: """Get current number of workers.""" with self._lock: - return len(self.workers) + return len(self._workers) def get_status(self) -> dict[str, int]: """ @@ -161,8 +161,8 @@ class SimpleWorkerPool: """ with self._lock: return { - "total_workers": len(self.workers), - "queue_depth": self.ready_queue.qsize(), - "min_workers": self.min_workers, - "max_workers": self.max_workers, + "total_workers": len(self._workers), + "queue_depth": self._ready_queue.qsize(), + "min_workers": self._min_workers, + "max_workers": self._max_workers, } From 3d57a9ccdc4bc42e025e3515ebc35380c25ef5ed Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Mon, 1 Sep 2025 09:45:07 +0800 Subject: [PATCH 23/96] Fix never hit `(!code || code.length === 0)` (#24860) --- web/service/use-share.ts | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/web/service/use-share.ts b/web/service/use-share.ts index 6845a2f3c7..267975fd38 100644 --- a/web/service/use-share.ts +++ b/web/service/use-share.ts @@ -6,12 +6,7 @@ const NAME_SPACE = 'webapp' export const useGetWebAppAccessModeByCode = (code: string | null) => { return useQuery({ queryKey: [NAME_SPACE, 'appAccessMode', code], - queryFn: () => { - if (!code || code.length === 0) - return Promise.reject(new Error('App code is required to get access mode')) - - return getAppAccessModeByAppCode(code) - }, + queryFn: () => getAppAccessModeByAppCode(code!), enabled: !!code, }) } From c45c22b1b2ca3eeca1ec70e39201ba3db5156a72 Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 1 Sep 2025 10:04:05 +0800 Subject: [PATCH 24/96] fix translation of all oauth.ts (#24855) --- web/i18n/de-DE/oauth.ts | 6 +++--- web/i18n/en-US/oauth.ts | 6 +++--- web/i18n/es-ES/oauth.ts | 4 ++-- web/i18n/fa-IR/oauth.ts | 4 ++-- web/i18n/fr-FR/oauth.ts | 4 ++-- web/i18n/hi-IN/oauth.ts | 6 +++--- web/i18n/it-IT/oauth.ts | 4 ++-- web/i18n/ja-JP/oauth.ts | 8 ++++---- web/i18n/ko-KR/oauth.ts | 8 ++++---- web/i18n/pl-PL/oauth.ts | 4 ++-- web/i18n/pt-BR/oauth.ts | 4 ++-- web/i18n/ro-RO/oauth.ts | 4 ++-- web/i18n/ru-RU/oauth.ts | 6 +++--- web/i18n/sl-SI/oauth.ts | 4 ++-- web/i18n/th-TH/oauth.ts | 4 ++-- web/i18n/tr-TR/oauth.ts | 4 ++-- web/i18n/uk-UA/oauth.ts | 4 ++-- web/i18n/vi-VN/oauth.ts | 4 ++-- 18 files changed, 44 insertions(+), 44 deletions(-) diff --git a/web/i18n/de-DE/oauth.ts b/web/i18n/de-DE/oauth.ts index 6eb684fa3c..7478a4afd5 100644 --- a/web/i18n/de-DE/oauth.ts +++ b/web/i18n/de-DE/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { common: 'Wir respektieren Ihre Privatsphäre und werden diese Informationen nur verwenden, um Ihre Erfahrung mit unseren Entwickler-Tools zu verbessern.', - notLoggedIn: 'möchte auf Ihr Dify Cloud-Konto zugreifen', - loggedIn: 'möchte auf die folgenden Informationen aus Ihrem Dify Cloud-Konto zugreifen.', + notLoggedIn: 'Diese App möchte auf Ihr Dify Cloud-Konto zugreifen', + loggedIn: 'Diese App möchte auf die folgenden Informationen aus Ihrem Dify Cloud-Konto zugreifen.', needLogin: 'Bitte melden Sie sich an, um zu autorisieren.', }, scopes: { @@ -21,7 +21,7 @@ const translation = { login: 'Anmelden', unknownApp: 'Unbekannte App', continue: 'Fortsetzen', - connect: 'Verbinde zu', + connect: 'Verbinden mit', } export default translation diff --git a/web/i18n/en-US/oauth.ts b/web/i18n/en-US/oauth.ts index ff71487fcd..5215330587 100644 --- a/web/i18n/en-US/oauth.ts +++ b/web/i18n/en-US/oauth.ts @@ -1,7 +1,7 @@ const translation = { tips: { - loggedIn: 'wants to access the following information from your Dify Cloud account.', - notLoggedIn: 'wants to access your Dify Cloud account', + loggedIn: 'This app wants to access the following information from your Dify Cloud account.', + notLoggedIn: 'This app wants to access your Dify Cloud account', needLogin: 'Please log in to authorize', common: 'We respect your privacy and will only use this information to enhance your experience with our developer tools.', }, @@ -18,7 +18,7 @@ const translation = { }, error: { invalidParams: 'Invalid parameters', - authorizeFailed: 'Authorize failed', + authorizeFailed: 'Authorization failed', authAppInfoFetchFailed: 'Failed to fetch app info for authorization', }, unknownApp: 'Unknown App', diff --git a/web/i18n/es-ES/oauth.ts b/web/i18n/es-ES/oauth.ts index fe6093ebf7..23d7eaa895 100644 --- a/web/i18n/es-ES/oauth.ts +++ b/web/i18n/es-ES/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: 'Por favor inicie sesión para autorizar', - notLoggedIn: 'quiere acceder a su cuenta de Dify Cloud', - loggedIn: 'quiere acceder a la siguiente información de su cuenta de Dify Cloud.', + notLoggedIn: 'Esta aplicación quiere acceder a su cuenta de Dify Cloud', + loggedIn: 'Esta aplicación quiere acceder a la siguiente información de su cuenta de Dify Cloud.', common: 'Respetamos su privacidad y solo utilizaremos esta información para mejorar su experiencia con nuestras herramientas para desarrolladores.', }, scopes: { diff --git a/web/i18n/fa-IR/oauth.ts b/web/i18n/fa-IR/oauth.ts index cb8ea498fa..380b4f78b5 100644 --- a/web/i18n/fa-IR/oauth.ts +++ b/web/i18n/fa-IR/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: 'لطفاً برای تأیید وارد شوید', - notLoggedIn: 'می‌خواهد به حساب Dify Cloud شما دسترسی پیدا کند', - loggedIn: 'می‌خواهد به اطلاعات زیر از حساب ابر دیفی شما دسترسی پیدا کند.', + notLoggedIn: 'این برنامه می‌خواهد به حساب Dify Cloud شما دسترسی پیدا کند', + loggedIn: 'این برنامه می‌خواهد به اطلاعات زیر از حساب ابر دیفی شما دسترسی پیدا کند.', common: 'ما به حریم خصوصی شما احترام می‌گذاریم و تنها از این اطلاعات برای بهبود تجربه شما با ابزارهای توسعه‌دهنده‌مان استفاده خواهیم کرد.', }, scopes: { diff --git a/web/i18n/fr-FR/oauth.ts b/web/i18n/fr-FR/oauth.ts index b2fa71e143..7ce46b9d5e 100644 --- a/web/i18n/fr-FR/oauth.ts +++ b/web/i18n/fr-FR/oauth.ts @@ -1,9 +1,9 @@ const translation = { tips: { needLogin: 'Veuillez vous connecter pour autoriser', - notLoggedIn: 'veut accéder à votre compte Dify Cloud', + notLoggedIn: 'Cette application veut accéder à votre compte Dify Cloud', common: 'Nous respectons votre vie privée et n\'utiliserons ces informations que pour améliorer votre expérience avec nos outils de développement.', - loggedIn: 'veut accéder aux informations suivantes de votre compte Dify Cloud.', + loggedIn: 'Cette application veut accéder aux informations suivantes de votre compte Dify Cloud.', }, scopes: { email: 'E-mail', diff --git a/web/i18n/hi-IN/oauth.ts b/web/i18n/hi-IN/oauth.ts index 7cdba1fe5b..a2e7bb9e36 100644 --- a/web/i18n/hi-IN/oauth.ts +++ b/web/i18n/hi-IN/oauth.ts @@ -1,9 +1,9 @@ const translation = { tips: { needLogin: 'कृपया प्राधिकरण के लिए लॉग इन करें', - notLoggedIn: 'आप आपके Dify Cloud खाते तक पहुंचना चाहते हैं', + notLoggedIn: 'यह ऐप आपके Dify Cloud खाते तक पहुंचना चाहता है', common: 'हम आपकी गोपनीयता का सम्मान करते हैं और इस जानकारी का उपयोग केवल आपके हमारे विकास उपकरणों के साथ अनुभव को बेहतर बनाने के लिए करेंगे।', - loggedIn: 'आप आपके Dify Cloud खाते से निम्नलिखित जानकारी तक पहुंचना चाहते हैं।', + loggedIn: 'यह ऐप आपके Dify Cloud खाते से निम्नलिखित जानकारी तक पहुंचना चाहता है।', }, scopes: { name: 'नाम', @@ -13,7 +13,7 @@ const translation = { timezone: 'समय क्षेत्र', }, error: { - authorizeFailed: 'अनु autorización विफल', + authorizeFailed: 'प्राधिकरण विफल', invalidParams: 'अमान्य पैरामीटर', authAppInfoFetchFailed: 'प्राधिकरण के लिए ऐप जानकारी प्राप्त करने में असफल हुआ', }, diff --git a/web/i18n/it-IT/oauth.ts b/web/i18n/it-IT/oauth.ts index 3955a3997e..4220666a9a 100644 --- a/web/i18n/it-IT/oauth.ts +++ b/web/i18n/it-IT/oauth.ts @@ -1,7 +1,7 @@ const translation = { tips: { - notLoggedIn: 'vuole accedere al tuo account Dify Cloud', - loggedIn: 'vuole accedere alle seguenti informazioni dal tuo account Dify Cloud.', + notLoggedIn: 'Questa app vuole accedere al tuo account Dify Cloud', + loggedIn: 'Questa app vuole accedere alle seguenti informazioni dal tuo account Dify Cloud.', common: 'Rispettiamo la tua privacy e utilizzeremo queste informazioni solo per migliorare la tua esperienza con i nostri strumenti per sviluppatori.', needLogin: 'Per favore, accedi per autorizzare', }, diff --git a/web/i18n/ja-JP/oauth.ts b/web/i18n/ja-JP/oauth.ts index 239892c03e..54322e1a48 100644 --- a/web/i18n/ja-JP/oauth.ts +++ b/web/i18n/ja-JP/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { - notLoggedIn: 'あなたのDify Cloudアカウントにアクセスしたいです', + notLoggedIn: 'このアプリはあなたのDify Cloudアカウントにアクセスしたいです', needLogin: 'ログインして認証してください', - loggedIn: 'あなたのDify Cloudアカウントから以下の情報にアクセスしたいと思っています。', + loggedIn: 'このアプリはあなたのDify Cloudアカウントから以下の情報にアクセスしたいと思っています。', common: '私たちはあなたのプライバシーを尊重し、この情報を私たちの開発者ツールによる体験を向上させるためにのみ使用します。', }, scopes: { @@ -17,10 +17,10 @@ const translation = { invalidParams: '無効なパラメータ', authAppInfoFetchFailed: '認証のためのアプリ情報の取得に失敗しました', }, - unknownApp: '未知のアプリ', + unknownApp: '不明なアプリ', login: 'ログイン', switchAccount: 'アカウントを切り替える', - continue: '続けてください', + continue: '続行', connect: '接続する', } diff --git a/web/i18n/ko-KR/oauth.ts b/web/i18n/ko-KR/oauth.ts index 7f86a20ce0..5c13240823 100644 --- a/web/i18n/ko-KR/oauth.ts +++ b/web/i18n/ko-KR/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: '로그인하여 인증해 주세요.', - notLoggedIn: 'Dify Cloud 계정에 접근하고 싶어합니다.', - loggedIn: '다음 정보를 귀하의 Dify Cloud 계정에서 액세스하려고 합니다.', + notLoggedIn: '이 앱은 Dify Cloud 계정에 접근하고 싶어합니다.', + loggedIn: '이 앱은 다음 정보를 귀하의 Dify Cloud 계정에서 액세스하려고 합니다.', common: '우리는 귀하의 개인 정보를 존중하며, 이 정보를 개발자 도구를 통한 귀하의 경험 향상에만 사용할 것입니다.', }, scopes: { @@ -17,11 +17,11 @@ const translation = { authorizeFailed: '권한 부여 실패', authAppInfoFetchFailed: '인증을 위한 앱 정보를 가져오지 못했습니다.', }, - continue: '계속하다', + continue: '계속', unknownApp: '알 수 없는 앱', switchAccount: '계정 전환', login: '로그인', - connect: '연결하다', + connect: '연결', } export default translation diff --git a/web/i18n/pl-PL/oauth.ts b/web/i18n/pl-PL/oauth.ts index e8cf0a5f62..2136b29c90 100644 --- a/web/i18n/pl-PL/oauth.ts +++ b/web/i18n/pl-PL/oauth.ts @@ -1,9 +1,9 @@ const translation = { tips: { needLogin: 'Proszę się zalogować, aby autoryzować', - notLoggedIn: 'chce uzyskać dostęp do twojego konta Dify Cloud', + notLoggedIn: 'Ta aplikacja chce uzyskać dostęp do twojego konta Dify Cloud', common: 'Szanujemy Twoją prywatność i będziemy wykorzystywać te informacje tylko w celu ulepszenia Twojego doświadczenia z naszymi narzędziami deweloperskimi.', - loggedIn: 'chce uzyskać dostęp do następujących informacji z twojego konta Dify Cloud.', + loggedIn: 'Ta aplikacja chce uzyskać dostęp do następujących informacji z twojego konta Dify Cloud.', }, scopes: { timezone: 'Strefa czasowa', diff --git a/web/i18n/pt-BR/oauth.ts b/web/i18n/pt-BR/oauth.ts index 2e45480f29..eba5d4e738 100644 --- a/web/i18n/pt-BR/oauth.ts +++ b/web/i18n/pt-BR/oauth.ts @@ -1,7 +1,7 @@ const translation = { tips: { - notLoggedIn: 'quer acessar sua conta do Dify Cloud', - loggedIn: 'quer acessar as seguintes informações da sua conta Dify Cloud.', + notLoggedIn: 'Este aplicativo quer acessar sua conta do Dify Cloud', + loggedIn: 'Este aplicativo quer acessar as seguintes informações da sua conta Dify Cloud.', common: 'Respeitamos sua privacidade e usaremos essas informações apenas para melhorar sua experiência com nossas ferramentas de desenvolvedor.', needLogin: 'Por favor, faça login para autorizar', }, diff --git a/web/i18n/ro-RO/oauth.ts b/web/i18n/ro-RO/oauth.ts index 0eb9222093..c21322d2f2 100644 --- a/web/i18n/ro-RO/oauth.ts +++ b/web/i18n/ro-RO/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: 'Vă rugăm să vă conectați pentru a autoriza', - loggedIn: 'vrea să acceseze următoarele informații din contul tău Dify Cloud.', - notLoggedIn: 'vrea să acceseze contul tău Dify Cloud', + loggedIn: 'Această aplicație vrea să acceseze următoarele informații din contul tău Dify Cloud.', + notLoggedIn: 'Această aplicație vrea să acceseze contul tău Dify Cloud', common: 'Respectăm confidențialitatea dvs. și vom folosi aceste informații doar pentru a îmbunătăți experiența dvs. cu instrumentele noastre pentru dezvoltatori.', }, scopes: { diff --git a/web/i18n/ru-RU/oauth.ts b/web/i18n/ru-RU/oauth.ts index 26a84100d5..d530b8c780 100644 --- a/web/i18n/ru-RU/oauth.ts +++ b/web/i18n/ru-RU/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: 'Пожалуйста, войдите, чтобы авторизоваться', - notLoggedIn: 'хочет получить доступ к вашей учетной записи Dify Cloud', - loggedIn: 'хочет получить следующую информацию из вашего аккаунта Dify Cloud.', + notLoggedIn: 'Это приложение хочет получить доступ к вашей учетной записи Dify Cloud', + loggedIn: 'Это приложение хочет получить следующую информацию из вашего аккаунта Dify Cloud.', common: 'Мы уважаем вашу конфиденциальность и будем использовать эту информацию только для улучшения вашего опыта с нашими инструментами разработчика.', }, scopes: { @@ -17,7 +17,7 @@ const translation = { authorizeFailed: 'Авторизация не удалась', authAppInfoFetchFailed: 'Не удалось получить информацию об приложении для авторизации', }, - continue: 'Продолжайте', + continue: 'Продолжить', connect: 'Подключиться к', switchAccount: 'Сменить аккаунт', unknownApp: 'Неизвестное приложение', diff --git a/web/i18n/sl-SI/oauth.ts b/web/i18n/sl-SI/oauth.ts index 2a99e1a6e3..f03bfadd50 100644 --- a/web/i18n/sl-SI/oauth.ts +++ b/web/i18n/sl-SI/oauth.ts @@ -1,7 +1,7 @@ const translation = { tips: { - notLoggedIn: 'želi dostopati do vašega Dify Cloud računa', - loggedIn: 'želi dostopati do naslednjih informacij iz vašega računa Dify Cloud.', + notLoggedIn: 'Ta aplikacija želi dostopati do vašega Dify Cloud računa', + loggedIn: 'Ta aplikacija želi dostopati do naslednjih informacij iz vašega računa Dify Cloud.', common: 'Soočamo se z vašo zasebnostjo in te informacije bomo uporabili le za izboljšanje vaših izkušenj z našimi orodji za razvijalce.', needLogin: 'Prosimo, prijavite se za avtorizacijo', }, diff --git a/web/i18n/th-TH/oauth.ts b/web/i18n/th-TH/oauth.ts index 74b5d123f1..626510b684 100644 --- a/web/i18n/th-TH/oauth.ts +++ b/web/i18n/th-TH/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: 'โปรดเข้าสู่ระบบเพื่ออนุญาต', - notLoggedIn: 'ต้องการเข้าถึงบัญชี Dify Cloud ของคุณ', - loggedIn: 'ต้องการเข้าถึงข้อมูลต่อไปนี้จากบัญชี Dify Cloud ของคุณ.', + notLoggedIn: 'แอปพลิเคชันนี้ต้องการเข้าถึงบัญชี Dify Cloud ของคุณ', + loggedIn: 'แอปพลิเคชันนี้ต้องการเข้าถึงข้อมูลต่อไปนี้จากบัญชี Dify Cloud ของคุณ.', common: 'เรามีความเคารพต่อความเป็นส่วนตัวของคุณและจะใช้ข้อมูลนี้เพื่อปรับปรุงประสบการณ์ของคุณกับเครื่องมือนักพัฒนาของเราเท่านั้น.', }, scopes: { diff --git a/web/i18n/tr-TR/oauth.ts b/web/i18n/tr-TR/oauth.ts index 65196bcfe3..3f71cdf483 100644 --- a/web/i18n/tr-TR/oauth.ts +++ b/web/i18n/tr-TR/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { - notLoggedIn: 'Dify Cloud hesabınıza erişmek istiyor', + notLoggedIn: 'Bu uygulama Dify Cloud hesabınıza erişmek istiyor', common: 'Gizliliğinize saygı gösteriyoruz ve bu bilgiyi yalnızca geliştirici araçlarımızla deneyiminizi geliştirmek için kullanacağız.', - loggedIn: 'Dify Cloud hesabınızdaki aşağıdaki bilgilere erişmek istiyor.', + loggedIn: 'Bu uygulama Dify Cloud hesabınızdaki aşağıdaki bilgilere erişmek istiyor.', needLogin: 'Lütfen yetkilendirmek için giriş yapın', }, scopes: { diff --git a/web/i18n/uk-UA/oauth.ts b/web/i18n/uk-UA/oauth.ts index 0fc6018059..a2510c6efe 100644 --- a/web/i18n/uk-UA/oauth.ts +++ b/web/i18n/uk-UA/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { - notLoggedIn: 'хоче отримати доступ до вашого облікового запису Dify Cloud', + notLoggedIn: 'Цей додаток хоче отримати доступ до вашого облікового запису Dify Cloud', needLogin: 'Будь ласка, увійдіть, щоб авторизуватися.', - loggedIn: 'хоче отримати доступ до наступної інформації з вашого облікового запису Dify Cloud.', + loggedIn: 'Цей додаток хоче отримати доступ до наступної інформації з вашого облікового запису Dify Cloud.', common: 'Ми поважаємо вашу конфіденційність і використовуватимемо цю інформацію лише для покращення вашого досвіду з нашими інструментами для розробників.', }, scopes: { diff --git a/web/i18n/vi-VN/oauth.ts b/web/i18n/vi-VN/oauth.ts index 2c1c9ba37d..dc4c1b39bc 100644 --- a/web/i18n/vi-VN/oauth.ts +++ b/web/i18n/vi-VN/oauth.ts @@ -1,8 +1,8 @@ const translation = { tips: { needLogin: 'Vui lòng đăng nhập để xác thực', - notLoggedIn: 'muốn truy cập vào tài khoản Dify Cloud của bạn', - loggedIn: 'muốn truy cập thông tin sau từ tài khoản Dify Cloud của bạn.', + notLoggedIn: 'Ứng dụng này muốn truy cập vào tài khoản Dify Cloud của bạn', + loggedIn: 'Ứng dụng này muốn truy cập thông tin sau từ tài khoản Dify Cloud của bạn.', common: 'Chúng tôi tôn trọng quyền riêng tư của bạn và sẽ chỉ sử dụng thông tin này để cải thiện trải nghiệm của bạn với các công cụ phát triển của chúng tôi.', }, scopes: { From b8d8dddd5a06efd1816cd496d4d92b1476aafd00 Mon Sep 17 00:00:00 2001 From: Asuka Minato Date: Mon, 1 Sep 2025 11:04:24 +0900 Subject: [PATCH 25/96] example of decorator typing (#24857) Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> --- api/controllers/inner_api/wraps.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/api/controllers/inner_api/wraps.py b/api/controllers/inner_api/wraps.py index c5aa318f58..de4f1da801 100644 --- a/api/controllers/inner_api/wraps.py +++ b/api/controllers/inner_api/wraps.py @@ -1,8 +1,12 @@ from base64 import b64encode +from collections.abc import Callable from functools import wraps from hashlib import sha1 from hmac import new as hmac_new +from typing import ParamSpec, TypeVar +P = ParamSpec("P") +R = TypeVar("R") from flask import abort, request from configs import dify_config @@ -10,9 +14,9 @@ from extensions.ext_database import db from models.model import EndUser -def billing_inner_api_only(view): +def billing_inner_api_only(view: Callable[P, R]): @wraps(view) - def decorated(*args, **kwargs): + def decorated(*args: P.args, **kwargs: P.kwargs): if not dify_config.INNER_API: abort(404) @@ -26,9 +30,9 @@ def billing_inner_api_only(view): return decorated -def enterprise_inner_api_only(view): +def enterprise_inner_api_only(view: Callable[P, R]): @wraps(view) - def decorated(*args, **kwargs): + def decorated(*args: P.args, **kwargs: P.kwargs): if not dify_config.INNER_API: abort(404) @@ -78,9 +82,9 @@ def enterprise_inner_api_user_auth(view): return decorated -def plugin_inner_api_only(view): +def plugin_inner_api_only(view: Callable[P, R]): @wraps(view) - def decorated(*args, **kwargs): + def decorated(*args: P.args, **kwargs: P.kwargs): if not dify_config.PLUGIN_DAEMON_KEY: abort(404) From c45d676477c16807f2a71df94b7646fdc38f378e Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 1 Sep 2025 10:05:19 +0800 Subject: [PATCH 26/96] remove duplicated authorization header handling and bearer should be case-insensitive (#24852) --- api/controllers/console/auth/oauth_server.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/api/controllers/console/auth/oauth_server.py b/api/controllers/console/auth/oauth_server.py index 19ca464a79..0e6e746a8b 100644 --- a/api/controllers/console/auth/oauth_server.py +++ b/api/controllers/console/auth/oauth_server.py @@ -44,22 +44,19 @@ def oauth_server_access_token_required(view): if not oauth_provider_app or not isinstance(oauth_provider_app, OAuthProviderApp): raise BadRequest("Invalid oauth_provider_app") - if not request.headers.get("Authorization"): - raise BadRequest("Authorization is required") - authorization_header = request.headers.get("Authorization") if not authorization_header: raise BadRequest("Authorization header is required") - parts = authorization_header.split(" ") + parts = authorization_header.strip().split(" ") if len(parts) != 2: raise BadRequest("Invalid Authorization header format") - token_type = parts[0] - if token_type != "Bearer": + token_type = parts[0].strip() + if token_type.lower() != "bearer": raise BadRequest("token_type is invalid") - access_token = parts[1] + access_token = parts[1].strip() if not access_token: raise BadRequest("access_token is required") From 2e6e414a9ed36c5b23af26755780fd59234fbc5d Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Mon, 1 Sep 2025 10:05:54 +0800 Subject: [PATCH 27/96] the conversion OAuthGrantType(parsed_args["grant_type"]) can raise ValueError for invalid values which is not caught and will produce a 500 (#24854) --- api/controllers/console/auth/oauth_server.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/controllers/console/auth/oauth_server.py b/api/controllers/console/auth/oauth_server.py index 0e6e746a8b..f730cfa3fe 100644 --- a/api/controllers/console/auth/oauth_server.py +++ b/api/controllers/console/auth/oauth_server.py @@ -122,7 +122,10 @@ class OAuthServerUserTokenApi(Resource): parser.add_argument("refresh_token", type=str, required=False, location="json") parsed_args = parser.parse_args() - grant_type = OAuthGrantType(parsed_args["grant_type"]) + try: + grant_type = OAuthGrantType(parsed_args["grant_type"]) + except ValueError: + raise BadRequest("invalid grant_type") if grant_type == OAuthGrantType.AUTHORIZATION_CODE: if not parsed_args["code"]: @@ -160,8 +163,6 @@ class OAuthServerUserTokenApi(Resource): "refresh_token": refresh_token, } ) - else: - raise BadRequest("invalid grant_type") class OAuthServerUserAccountApi(Resource): From 88622f70fbc002a1e8fc4e5a5eaf8b64c4538f6c Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 12:08:03 +0800 Subject: [PATCH 28/96] refactor(graph_engine): Move setup methods into `__init__` Signed-off-by: -LAN- --- api/core/workflow/graph_engine/README.md | 187 ------------------ .../workflow/graph_engine/graph_engine.py | 125 ++++++------ ...ditional_streaming_vs_template_workflow.py | 4 +- 3 files changed, 68 insertions(+), 248 deletions(-) delete mode 100644 api/core/workflow/graph_engine/README.md diff --git a/api/core/workflow/graph_engine/README.md b/api/core/workflow/graph_engine/README.md deleted file mode 100644 index 7e5c919513..0000000000 --- a/api/core/workflow/graph_engine/README.md +++ /dev/null @@ -1,187 +0,0 @@ -# Graph Engine - -Queue-based workflow execution engine for parallel graph processing. - -## Architecture - -The engine uses a modular architecture with specialized packages: - -### Core Components - -- **Domain** (`domain/`) - Core models: ExecutionContext, GraphExecution, NodeExecution -- **Event Management** (`event_management/`) - Event handling, collection, and emission -- **State Management** (`state_management/`) - Thread-safe state tracking for nodes and edges -- **Error Handling** (`error_handling/`) - Strategy-based error recovery (retry, abort, fail-branch, default-value) -- **Graph Traversal** (`graph_traversal/`) - Node readiness, edge processing, branch handling -- **Command Processing** (`command_processing/`) - External command handling (abort, pause, resume) -- **Worker Management** (`worker_management/`) - Dynamic worker pool with auto-scaling -- **Orchestration** (`orchestration/`) - Main event loop and execution coordination - -### Supporting Components - -- **Output Registry** (`output_registry/`) - Thread-safe storage for node outputs -- **Response Coordinator** (`response_coordinator/`) - Ordered streaming of response nodes -- **Command Channels** (`command_channels/`) - Command transport (InMemory/Redis) -- **Layers** (`layers/`) - Pluggable middleware for extensions - -## Architecture Diagram - -```mermaid -classDiagram - class GraphEngine { - +run() - +add_layer() - } - - class Domain { - ExecutionContext - GraphExecution - NodeExecution - } - - class EventManagement { - EventHandlerRegistry - EventCollector - EventEmitter - } - - class StateManagement { - NodeStateManager - EdgeStateManager - ExecutionTracker - } - - class WorkerManagement { - WorkerPool - WorkerFactory - DynamicScaler - ActivityTracker - } - - class GraphTraversal { - NodeReadinessChecker - EdgeProcessor - BranchHandler - SkipPropagator - } - - class Orchestration { - Dispatcher - ExecutionCoordinator - } - - class ErrorHandling { - ErrorHandler - RetryStrategy - AbortStrategy - FailBranchStrategy - } - - class CommandProcessing { - CommandProcessor - AbortCommandHandler - } - - class CommandChannels { - InMemoryChannel - RedisChannel - } - - class OutputRegistry { - <> - Scalar Values - Streaming Data - } - - class ResponseCoordinator { - Session Management - Path Analysis - } - - class Layers { - <> - DebugLoggingLayer - } - - GraphEngine --> Orchestration : coordinates - GraphEngine --> Layers : extends - - Orchestration --> EventManagement : processes events - Orchestration --> WorkerManagement : manages scaling - Orchestration --> CommandProcessing : checks commands - Orchestration --> StateManagement : monitors state - - WorkerManagement --> StateManagement : consumes ready queue - WorkerManagement --> EventManagement : produces events - WorkerManagement --> Domain : executes nodes - - EventManagement --> ErrorHandling : failed events - EventManagement --> GraphTraversal : success events - EventManagement --> ResponseCoordinator : stream events - EventManagement --> Layers : notifies - - GraphTraversal --> StateManagement : updates states - GraphTraversal --> Domain : checks graph - - CommandProcessing --> CommandChannels : fetches commands - CommandProcessing --> Domain : modifies execution - - ErrorHandling --> Domain : handles failures - - StateManagement --> Domain : tracks entities - - ResponseCoordinator --> OutputRegistry : reads outputs - - Domain --> OutputRegistry : writes outputs -``` - -## Package Relationships - -### Core Dependencies - -- **Orchestration** acts as the central coordinator, managing all subsystems -- **Domain** provides the core business entities used by all packages -- **EventManagement** serves as the communication backbone between components -- **StateManagement** maintains thread-safe state for the entire system - -### Data Flow - -1. **Commands** flow from CommandChannels → CommandProcessing → Domain -1. **Events** flow from Workers → EventHandlerRegistry → State updates -1. **Node outputs** flow from Workers → OutputRegistry → ResponseCoordinator -1. **Ready nodes** flow from GraphTraversal → StateManagement → WorkerManagement - -### Extension Points - -- **Layers** observe all events for monitoring, logging, and custom logic -- **ErrorHandling** strategies can be extended for custom failure recovery -- **CommandChannels** can be implemented for different transport mechanisms - -## Execution Flow - -1. **Initialization**: GraphEngine creates all subsystems with the workflow graph -1. **Node Discovery**: Traversal components identify ready nodes -1. **Worker Execution**: Workers pull from ready queue and execute nodes -1. **Event Processing**: Dispatcher routes events to appropriate handlers -1. **State Updates**: Managers track node/edge states for next steps -1. **Completion**: Coordinator detects when all nodes are done - -## Usage - -```python -from core.workflow.graph_engine import GraphEngine -from core.workflow.graph_engine.command_channels import InMemoryChannel - -# Create and run engine -engine = GraphEngine( - tenant_id="tenant_1", - app_id="app_1", - workflow_id="workflow_1", - graph=graph, - command_channel=InMemoryChannel(), -) - -# Stream execution events -for event in engine.run(): - handle_event(event) -``` diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 43ab486fe1..11a2f458fb 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -72,9 +72,10 @@ class GraphEngine: scale_up_threshold: int | None = None, scale_down_idle_time: float | None = None, ) -> None: - """Initialize the graph engine with separated concerns.""" + """Initialize the graph engine with all subsystems and dependencies.""" - # Create domain models + # === Domain Models === + # Execution context encapsulates workflow execution metadata self._execution_context = ExecutionContext( tenant_id=tenant_id, app_id=app_id, @@ -87,62 +88,67 @@ class GraphEngine: max_execution_time=max_execution_time, ) + # Graph execution tracks the overall execution state self._graph_execution = GraphExecution(workflow_id=workflow_id) - # Store core dependencies + # === Core Dependencies === + # Graph structure and configuration self._graph = graph self._graph_config = graph_config self._graph_runtime_state = graph_runtime_state self._command_channel = command_channel - # Store worker management parameters + # === Worker Management Parameters === + # Parameters for dynamic worker pool scaling self._min_workers = min_workers self._max_workers = max_workers self._scale_up_threshold = scale_up_threshold self._scale_down_idle_time = scale_down_idle_time - # Initialize queues + # === Execution Queues === + # Queue for nodes ready to execute self._ready_queue: queue.Queue[str] = queue.Queue() + # Queue for events generated during execution self._event_queue: queue.Queue[GraphNodeEventBase] = queue.Queue() - # Initialize subsystems - self._initialize_subsystems() - - # Layers for extensibility - self._layers: list[Layer] = [] - - # Validate graph state consistency - self._validate_graph_state_consistency() - - def _initialize_subsystems(self) -> None: - """Initialize all subsystems with proper dependency injection.""" - - # Unified state management - single instance handles all state operations + # === State Management === + # Unified state manager handles all node state transitions and queue operations self._state_manager = UnifiedStateManager(self._graph, self._ready_queue) - # Response coordination + # === Response Coordination === + # Coordinates response streaming from response nodes self._response_coordinator = ResponseStreamCoordinator( variable_pool=self._graph_runtime_state.variable_pool, graph=self._graph ) - # Event management + # === Event Management === + # Event collector aggregates events from all subsystems self._event_collector = EventCollector() + # Event emitter streams collected events to consumers self._event_emitter = EventEmitter(self._event_collector) - # Error handling + # === Error Handling === + # Centralized error handler for graph execution errors self._error_handler = ErrorHandler(self._graph, self._graph_execution) - # Graph traversal + # === Graph Traversal Components === + # Checks if nodes are ready to execute based on their dependencies self._node_readiness_checker = NodeReadinessChecker(self._graph) + + # Processes edges to determine next nodes after execution self._edge_processor = EdgeProcessor( graph=self._graph, state_manager=self._state_manager, response_coordinator=self._response_coordinator, ) + + # Propagates skip status through the graph when conditions aren't met self._skip_propagator = SkipPropagator( graph=self._graph, state_manager=self._state_manager, ) + + # Handles conditional branching and route selection self._branch_handler = BranchHandler( graph=self._graph, edge_processor=self._edge_processor, @@ -150,7 +156,8 @@ class GraphEngine: state_manager=self._state_manager, ) - # Event handler registry with all dependencies + # === Event Handler Registry === + # Central registry for handling all node execution events self._event_handler_registry = EventHandlerRegistry( graph=self._graph, graph_runtime_state=self._graph_runtime_state, @@ -163,47 +170,22 @@ class GraphEngine: error_handler=self._error_handler, ) - # Command processing + # === Command Processing === + # Processes external commands (e.g., abort requests) self._command_processor = CommandProcessor( command_channel=self._command_channel, graph_execution=self._graph_execution, ) - self._setup_command_handlers() - # Worker management - self._setup_worker_management() - - # Orchestration - self._execution_coordinator = ExecutionCoordinator( - graph_execution=self._graph_execution, - state_manager=self._state_manager, - event_handler=self._event_handler_registry, - event_collector=self._event_collector, - command_processor=self._command_processor, - worker_pool=self._worker_pool, - ) - - self._dispatcher = Dispatcher( - event_queue=self._event_queue, - event_handler=self._event_handler_registry, - event_collector=self._event_collector, - execution_coordinator=self._execution_coordinator, - max_execution_time=self._execution_context.max_execution_time, - event_emitter=self._event_emitter, - ) - - def _setup_command_handlers(self) -> None: - """Configure command handlers.""" - # Create handler instance that follows the protocol + # Register abort command handler abort_handler = AbortCommandHandler() self._command_processor.register_handler( AbortCommand, abort_handler, ) - def _setup_worker_management(self) -> None: - """Initialize worker management subsystem.""" - # Capture context for workers + # === Worker Pool Setup === + # Capture Flask app context for worker threads flask_app: Flask | None = None try: app = current_app._get_current_object() # type: ignore @@ -212,9 +194,10 @@ class GraphEngine: except RuntimeError: pass + # Capture context variables for worker threads context_vars = contextvars.copy_context() - # Create simple worker pool + # Create worker pool for parallel node execution self._worker_pool = SimpleWorkerPool( ready_queue=self._ready_queue, event_queue=self._event_queue, @@ -227,6 +210,35 @@ class GraphEngine: scale_down_idle_time=self._scale_down_idle_time, ) + # === Orchestration === + # Coordinates the overall execution lifecycle + self._execution_coordinator = ExecutionCoordinator( + graph_execution=self._graph_execution, + state_manager=self._state_manager, + event_handler=self._event_handler_registry, + event_collector=self._event_collector, + command_processor=self._command_processor, + worker_pool=self._worker_pool, + ) + + # Dispatches events and manages execution flow + self._dispatcher = Dispatcher( + event_queue=self._event_queue, + event_handler=self._event_handler_registry, + event_collector=self._event_collector, + execution_coordinator=self._execution_coordinator, + max_execution_time=self._execution_context.max_execution_time, + event_emitter=self._event_emitter, + ) + + # === Extensibility === + # Layers allow plugins to extend engine functionality + self._layers: list[Layer] = [] + + # === Validation === + # Ensure all nodes share the same GraphRuntimeState instance + self._validate_graph_state_consistency() + def _validate_graph_state_consistency(self) -> None: """Validate that all nodes share the same GraphRuntimeState.""" expected_state_id = id(self._graph_runtime_state) @@ -337,8 +349,3 @@ class GraphEngine: def graph_runtime_state(self) -> GraphRuntimeState: """Get the graph runtime state.""" return self._graph_runtime_state - - @property - def graph(self) -> Graph: - """Get the graph.""" - return self._graph diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_conditional_streaming_vs_template_workflow.py b/api/tests/unit_tests/core/workflow/graph_engine/test_conditional_streaming_vs_template_workflow.py index f7da5e65d9..7ea789af51 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_conditional_streaming_vs_template_workflow.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_conditional_streaming_vs_template_workflow.py @@ -100,7 +100,7 @@ def test_streaming_output_with_blocking_equals_one(): ) # Check that NodeRunStreamChunkEvent contains 'query' should has same id with Start NodeRunStartedEvent - start_node_id = engine.graph.root_node.id + start_node_id = graph.root_node.id start_events = [e for e in events if isinstance(e, NodeRunStartedEvent) and e.node_id == start_node_id] assert len(start_events) == 1, f"Expected 1 start event for node {start_node_id}, but got {len(start_events)}" start_event = start_events[0] @@ -210,7 +210,7 @@ def test_streaming_output_with_blocking_not_equals_one(): assert isinstance(chunk_event.chunk, str), f"Expected chunk to be string, but got {type(chunk_event.chunk)}" # Check that NodeRunStreamChunkEvent contains 'query' should has same id with Start NodeRunStartedEvent - start_node_id = engine.graph.root_node.id + start_node_id = graph.root_node.id start_events = [e for e in events if isinstance(e, NodeRunStartedEvent) and e.node_id == start_node_id] assert len(start_events) == 1, f"Expected 1 start event for node {start_node_id}, but got {len(start_events)}" start_event = start_events[0] From bb5d52539cdad4b00c2d0b8ef12a161a66293a9f Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 12:53:06 +0800 Subject: [PATCH 29/96] refactor(graph_engine): Merge branch_handler into edge_processor Signed-off-by: -LAN- --- api/.importlinter | 10 ++- .../event_management/event_handlers.py | 7 +- .../workflow/graph_engine/graph_engine.py | 22 ++--- .../graph_engine/graph_traversal/__init__.py | 4 - .../graph_traversal/branch_handler.py | 87 ------------------- .../graph_traversal/edge_processor.py | 54 +++++++++++- .../graph_traversal/node_readiness.py | 86 ------------------ .../graph_engine/worker_management/README.md | 81 ----------------- 8 files changed, 66 insertions(+), 285 deletions(-) delete mode 100644 api/core/workflow/graph_engine/graph_traversal/branch_handler.py delete mode 100644 api/core/workflow/graph_engine/graph_traversal/node_readiness.py delete mode 100644 api/core/workflow/graph_engine/worker_management/README.md diff --git a/api/.importlinter b/api/.importlinter index 14a66f2ff9..4380e8c18e 100644 --- a/api/.importlinter +++ b/api/.importlinter @@ -97,10 +97,12 @@ modules = [importlinter:contract:graph-traversal-components] name = Graph Traversal Components -type = independence -modules = - core.workflow.graph_engine.graph_traversal.node_readiness - core.workflow.graph_engine.graph_traversal.skip_propagator +type = layers +layers = + edge_processor + skip_propagator +containers = + core.workflow.graph_engine.graph_traversal [importlinter:contract:command-channels] name = Command Channels Independence diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py index 3ec8e8b028..c3c38ee3eb 100644 --- a/api/core/workflow/graph_engine/event_management/event_handlers.py +++ b/api/core/workflow/graph_engine/event_management/event_handlers.py @@ -31,7 +31,7 @@ from ..response_coordinator import ResponseStreamCoordinator if TYPE_CHECKING: from ..error_handling import ErrorHandler - from ..graph_traversal import BranchHandler, EdgeProcessor + from ..graph_traversal import EdgeProcessor from ..state_management import UnifiedStateManager from .event_collector import EventCollector @@ -54,7 +54,6 @@ class EventHandlerRegistry: graph_execution: GraphExecution, response_coordinator: ResponseStreamCoordinator, event_collector: "EventCollector", - branch_handler: "BranchHandler", edge_processor: "EdgeProcessor", state_manager: "UnifiedStateManager", error_handler: "ErrorHandler", @@ -68,7 +67,6 @@ class EventHandlerRegistry: graph_execution: Graph execution aggregate response_coordinator: Response stream coordinator event_collector: Event collector for collecting events - branch_handler: Branch handler for branch node processing edge_processor: Edge processor for edge traversal state_manager: Unified state manager error_handler: Error handler @@ -78,7 +76,6 @@ class EventHandlerRegistry: self._graph_execution = graph_execution self._response_coordinator = response_coordinator self._event_collector = event_collector - self._branch_handler = branch_handler self._edge_processor = edge_processor self._state_manager = state_manager self._error_handler = error_handler @@ -184,7 +181,7 @@ class EventHandlerRegistry: # Process edges and get ready nodes node = self._graph.nodes[event.node_id] if node.execution_type == NodeExecutionType.BRANCH: - ready_nodes, edge_streaming_events = self._branch_handler.handle_branch_completion( + ready_nodes, edge_streaming_events = self._edge_processor.handle_branch_completion( event.node_id, event.node_run_result.edge_source_handle ) else: diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 11a2f458fb..be6fd4f63f 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -32,7 +32,7 @@ from .domain import ExecutionContext, GraphExecution from .entities.commands import AbortCommand from .error_handling import ErrorHandler from .event_management import EventCollector, EventEmitter, EventHandlerRegistry -from .graph_traversal import BranchHandler, EdgeProcessor, NodeReadinessChecker, SkipPropagator +from .graph_traversal import EdgeProcessor, SkipPropagator from .layers.base import Layer from .orchestration import Dispatcher, ExecutionCoordinator from .protocols.command_channel import CommandChannel @@ -132,28 +132,19 @@ class GraphEngine: self._error_handler = ErrorHandler(self._graph, self._graph_execution) # === Graph Traversal Components === - # Checks if nodes are ready to execute based on their dependencies - self._node_readiness_checker = NodeReadinessChecker(self._graph) - - # Processes edges to determine next nodes after execution - self._edge_processor = EdgeProcessor( - graph=self._graph, - state_manager=self._state_manager, - response_coordinator=self._response_coordinator, - ) - # Propagates skip status through the graph when conditions aren't met self._skip_propagator = SkipPropagator( graph=self._graph, state_manager=self._state_manager, ) - # Handles conditional branching and route selection - self._branch_handler = BranchHandler( + # Processes edges to determine next nodes after execution + # Also handles conditional branching and route selection + self._edge_processor = EdgeProcessor( graph=self._graph, - edge_processor=self._edge_processor, - skip_propagator=self._skip_propagator, state_manager=self._state_manager, + response_coordinator=self._response_coordinator, + skip_propagator=self._skip_propagator, ) # === Event Handler Registry === @@ -164,7 +155,6 @@ class GraphEngine: graph_execution=self._graph_execution, response_coordinator=self._response_coordinator, event_collector=self._event_collector, - branch_handler=self._branch_handler, edge_processor=self._edge_processor, state_manager=self._state_manager, error_handler=self._error_handler, diff --git a/api/core/workflow/graph_engine/graph_traversal/__init__.py b/api/core/workflow/graph_engine/graph_traversal/__init__.py index 16f09bd7f1..d629140d06 100644 --- a/api/core/workflow/graph_engine/graph_traversal/__init__.py +++ b/api/core/workflow/graph_engine/graph_traversal/__init__.py @@ -5,14 +5,10 @@ This package handles graph navigation, edge processing, and skip propagation logic. """ -from .branch_handler import BranchHandler from .edge_processor import EdgeProcessor -from .node_readiness import NodeReadinessChecker from .skip_propagator import SkipPropagator __all__ = [ - "BranchHandler", "EdgeProcessor", - "NodeReadinessChecker", "SkipPropagator", ] diff --git a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py b/api/core/workflow/graph_engine/graph_traversal/branch_handler.py deleted file mode 100644 index 3d646a8719..0000000000 --- a/api/core/workflow/graph_engine/graph_traversal/branch_handler.py +++ /dev/null @@ -1,87 +0,0 @@ -""" -Branch node handling for graph traversal. -""" - -from collections.abc import Sequence -from typing import final - -from core.workflow.graph import Graph -from core.workflow.graph_events.node import NodeRunStreamChunkEvent - -from ..state_management import UnifiedStateManager -from .edge_processor import EdgeProcessor -from .skip_propagator import SkipPropagator - - -@final -class BranchHandler: - """ - Handles branch node logic during graph traversal. - - Branch nodes select one of multiple paths based on conditions, - requiring special handling for edge selection and skip propagation. - """ - - def __init__( - self, - graph: Graph, - edge_processor: EdgeProcessor, - skip_propagator: SkipPropagator, - state_manager: UnifiedStateManager, - ) -> None: - """ - Initialize the branch handler. - - Args: - graph: The workflow graph - edge_processor: Processor for edges - skip_propagator: Propagator for skip states - state_manager: Unified state manager - """ - self._graph = graph - self._edge_processor = edge_processor - self._skip_propagator = skip_propagator - self._state_manager = state_manager - - def handle_branch_completion( - self, node_id: str, selected_handle: str | None - ) -> tuple[Sequence[str], Sequence[NodeRunStreamChunkEvent]]: - """ - Handle completion of a branch node. - - Args: - node_id: The ID of the branch node - selected_handle: The handle of the selected branch - - Returns: - Tuple of (list of downstream nodes ready for execution, list of streaming events) - - Raises: - ValueError: If no branch was selected - """ - if not selected_handle: - raise ValueError(f"Branch node {node_id} completed without selecting a branch") - - # Categorize edges into selected and unselected - _, unselected_edges = self._state_manager.categorize_branch_edges(node_id, selected_handle) - - # Skip all unselected paths - self._skip_propagator.skip_branch_paths(unselected_edges) - - # Process selected edges and get ready nodes and streaming events - return self._edge_processor.process_node_success(node_id, selected_handle) - - def validate_branch_selection(self, node_id: str, selected_handle: str) -> bool: - """ - Validate that a branch selection is valid. - - Args: - node_id: The ID of the branch node - selected_handle: The handle to validate - - Returns: - True if the selection is valid - """ - outgoing_edges = self._graph.get_outgoing_edges(node_id) - valid_handles = {edge.source_handle for edge in outgoing_edges} - return selected_handle in valid_handles diff --git a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py index ebfcd7d9ee..c5634ed984 100644 --- a/api/core/workflow/graph_engine/graph_traversal/edge_processor.py +++ b/api/core/workflow/graph_engine/graph_traversal/edge_processor.py @@ -3,7 +3,7 @@ Edge processing logic for graph traversal. """ from collections.abc import Sequence -from typing import final +from typing import TYPE_CHECKING, final from core.workflow.enums import NodeExecutionType from core.workflow.graph import Edge, Graph @@ -12,6 +12,9 @@ from core.workflow.graph_events import NodeRunStreamChunkEvent from ..response_coordinator import ResponseStreamCoordinator from ..state_management import UnifiedStateManager +if TYPE_CHECKING: + from .skip_propagator import SkipPropagator + @final class EdgeProcessor: @@ -19,7 +22,8 @@ class EdgeProcessor: Processes edges during graph execution. This handles marking edges as taken or skipped, notifying - the response coordinator, and triggering downstream node execution. + the response coordinator, triggering downstream node execution, + and managing branch node logic. """ def __init__( @@ -27,6 +31,7 @@ class EdgeProcessor: graph: Graph, state_manager: UnifiedStateManager, response_coordinator: ResponseStreamCoordinator, + skip_propagator: "SkipPropagator", ) -> None: """ Initialize the edge processor. @@ -35,10 +40,12 @@ class EdgeProcessor: graph: The workflow graph state_manager: Unified state manager response_coordinator: Response stream coordinator + skip_propagator: Propagator for skip states """ self._graph = graph self._state_manager = state_manager self._response_coordinator = response_coordinator + self._skip_propagator = skip_propagator def process_node_success( self, node_id: str, selected_handle: str | None = None @@ -149,3 +156,46 @@ class EdgeProcessor: edge: The edge to skip """ self._state_manager.mark_edge_skipped(edge.id) + + def handle_branch_completion( + self, node_id: str, selected_handle: str | None + ) -> tuple[Sequence[str], Sequence[NodeRunStreamChunkEvent]]: + """ + Handle completion of a branch node. + + Args: + node_id: The ID of the branch node + selected_handle: The handle of the selected branch + + Returns: + Tuple of (list of downstream nodes ready for execution, list of streaming events) + + Raises: + ValueError: If no branch was selected + """ + if not selected_handle: + raise ValueError(f"Branch node {node_id} completed without selecting a branch") + + # Categorize edges into selected and unselected + _, unselected_edges = self._state_manager.categorize_branch_edges(node_id, selected_handle) + + # Skip all unselected paths + self._skip_propagator.skip_branch_paths(unselected_edges) + + # Process selected edges and get ready nodes and streaming events + return self.process_node_success(node_id, selected_handle) + + def validate_branch_selection(self, node_id: str, selected_handle: str) -> bool: + """ + Validate that a branch selection is valid. + + Args: + node_id: The ID of the branch node + selected_handle: The handle to validate + + Returns: + True if the selection is valid + """ + outgoing_edges = self._graph.get_outgoing_edges(node_id) + valid_handles = {edge.source_handle for edge in outgoing_edges} + return selected_handle in valid_handles diff --git a/api/core/workflow/graph_engine/graph_traversal/node_readiness.py b/api/core/workflow/graph_engine/graph_traversal/node_readiness.py deleted file mode 100644 index d1e3e434a6..0000000000 --- a/api/core/workflow/graph_engine/graph_traversal/node_readiness.py +++ /dev/null @@ -1,86 +0,0 @@ -""" -Node readiness checking for execution. -""" - -from typing import final - -from core.workflow.enums import NodeState -from core.workflow.graph import Graph - - -@final -class NodeReadinessChecker: - """ - Checks if nodes are ready for execution based on their dependencies. - - A node is ready when its dependencies (incoming edges) have been - satisfied according to the graph's execution rules. - """ - - def __init__(self, graph: Graph) -> None: - """ - Initialize the readiness checker. - - Args: - graph: The workflow graph - """ - self._graph = graph - - def is_node_ready(self, node_id: str) -> bool: - """ - Check if a node is ready to be executed. - - A node is ready when: - - It has no incoming edges (root or isolated node), OR - - At least one incoming edge is TAKEN and none are UNKNOWN - - Args: - node_id: The ID of the node to check - - Returns: - True if the node is ready for execution - """ - incoming_edges = self._graph.get_incoming_edges(node_id) - - # No dependencies means always ready - if not incoming_edges: - return True - - # Check edge states - has_unknown = False - has_taken = False - - for edge in incoming_edges: - if edge.state == NodeState.UNKNOWN: - has_unknown = True - break - elif edge.state == NodeState.TAKEN: - has_taken = True - - # Not ready if any dependency is still unknown - if has_unknown: - return False - - # Ready if at least one path is taken - return has_taken - - def get_ready_downstream_nodes(self, from_node_id: str) -> list[str]: - """ - Get all downstream nodes that are ready after a node completes. - - Args: - from_node_id: The ID of the completed node - - Returns: - List of node IDs that are now ready - """ - ready_nodes: list[str] = [] - outgoing_edges = self._graph.get_outgoing_edges(from_node_id) - - for edge in outgoing_edges: - if edge.state == NodeState.TAKEN: - downstream_node_id = edge.head - if self.is_node_ready(downstream_node_id): - ready_nodes.append(downstream_node_id) - - return ready_nodes diff --git a/api/core/workflow/graph_engine/worker_management/README.md b/api/core/workflow/graph_engine/worker_management/README.md deleted file mode 100644 index 1e67e1144d..0000000000 --- a/api/core/workflow/graph_engine/worker_management/README.md +++ /dev/null @@ -1,81 +0,0 @@ -# Worker Management - -Dynamic worker pool for node execution. - -## Components - -### WorkerPool - -Manages worker thread lifecycle. - -- `start/stop/wait()` - Control workers -- `scale_up/down()` - Adjust pool size -- `get_worker_count()` - Current count - -### WorkerFactory - -Creates workers with Flask context. - -- `create_worker()` - Build with dependencies -- Preserves request context - -### DynamicScaler - -Determines scaling decisions. - -- `min/max_workers` - Pool bounds -- `scale_up_threshold` - Queue trigger -- `should_scale_up/down()` - Check conditions - -### ActivityTracker - -Tracks worker activity. - -- `track_activity(worker_id)` - Record activity -- `get_idle_workers(threshold)` - Find idle -- `get_active_count()` - Active count - -## Usage - -```python -scaler = DynamicScaler( - min_workers=2, - max_workers=10, - scale_up_threshold=5 -) - -pool = WorkerPool( - ready_queue=ready_queue, - worker_factory=factory, - dynamic_scaler=scaler -) - -pool.start() - -# Scale based on load -if scaler.should_scale_up(queue_size, active): - pool.scale_up() - -pool.stop() -``` - -## Scaling Strategy - -**Scale Up**: Queue size > threshold AND workers < max -**Scale Down**: Idle workers exist AND workers > min - -## Parameters - -- `min_workers` - Minimum pool size -- `max_workers` - Maximum pool size -- `scale_up_threshold` - Queue trigger -- `scale_down_threshold` - Idle seconds - -## Flask Context - -WorkerFactory preserves request context across threads: - -```python -context_vars = {"request_id": request.id} -# Workers receive same context -``` From 8433cf44379037bc06f6eeafabd4bdd23e303273 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Mon, 1 Sep 2025 13:15:58 +0800 Subject: [PATCH 30/96] refactor(graph_engine): Merge event_collector and event_emitter into event_manager Signed-off-by: -LAN- --- .../graph_engine/event_management/__init__.py | 10 ++-- .../event_management/event_emitter.py | 58 ------------------ .../event_management/event_handlers.py | 8 +-- .../{event_collector.py => event_manager.py} | 59 ++++++++++++------- .../workflow/graph_engine/graph_engine.py | 22 ++++--- .../graph_engine/orchestration/dispatcher.py | 14 ++--- .../orchestration/execution_coordinator.py | 10 ++-- 7 files changed, 67 insertions(+), 114 deletions(-) delete mode 100644 api/core/workflow/graph_engine/event_management/event_emitter.py rename api/core/workflow/graph_engine/event_management/{event_collector.py => event_manager.py} (73%) diff --git a/api/core/workflow/graph_engine/event_management/__init__.py b/api/core/workflow/graph_engine/event_management/__init__.py index 90c37aa195..f6c3c0f753 100644 --- a/api/core/workflow/graph_engine/event_management/__init__.py +++ b/api/core/workflow/graph_engine/event_management/__init__.py @@ -5,12 +5,10 @@ This package handles event routing, collection, and emission for workflow graph execution events. """ -from .event_collector import EventCollector -from .event_emitter import EventEmitter -from .event_handlers import EventHandlerRegistry +from .event_handlers import EventHandler +from .event_manager import EventManager __all__ = [ - "EventCollector", - "EventEmitter", - "EventHandlerRegistry", + "EventHandler", + "EventManager", ] diff --git a/api/core/workflow/graph_engine/event_management/event_emitter.py b/api/core/workflow/graph_engine/event_management/event_emitter.py deleted file mode 100644 index 660ab2d1ce..0000000000 --- a/api/core/workflow/graph_engine/event_management/event_emitter.py +++ /dev/null @@ -1,58 +0,0 @@ -""" -Event emitter for yielding events to external consumers. -""" - -import threading -import time -from collections.abc import Generator -from typing import final - -from core.workflow.graph_events import GraphEngineEvent - -from .event_collector import EventCollector - - -@final -class EventEmitter: - """ - Emits collected events as a generator for external consumption. - - This provides a generator interface for yielding events as they're - collected, with proper synchronization for multi-threaded access. - """ - - def __init__(self, event_collector: EventCollector) -> None: - """ - Initialize the event emitter. - - Args: - event_collector: The collector to emit events from - """ - self._event_collector = event_collector - self._execution_complete = threading.Event() - - def mark_complete(self) -> None: - """Mark execution as complete to stop the generator.""" - self._execution_complete.set() - - def emit_events(self) -> Generator[GraphEngineEvent, None, None]: - """ - Generator that yields events as they're collected. - - Yields: - GraphEngineEvent instances as they're processed - """ - yielded_count = 0 - - while not self._execution_complete.is_set() or yielded_count < self._event_collector.event_count(): - # Get new events since last yield - new_events = self._event_collector.get_new_events(yielded_count) - - # Yield any new events - for event in new_events: - yield event - yielded_count += 1 - - # Small sleep to avoid busy waiting - if not self._execution_complete.is_set() and not new_events: - time.sleep(0.001) diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py index c3c38ee3eb..50b415d026 100644 --- a/api/core/workflow/graph_engine/event_management/event_handlers.py +++ b/api/core/workflow/graph_engine/event_management/event_handlers.py @@ -33,13 +33,13 @@ if TYPE_CHECKING: from ..error_handling import ErrorHandler from ..graph_traversal import EdgeProcessor from ..state_management import UnifiedStateManager - from .event_collector import EventCollector + from .event_manager import EventManager logger = logging.getLogger(__name__) @final -class EventHandlerRegistry: +class EventHandler: """ Registry of event handlers for different event types. @@ -53,7 +53,7 @@ class EventHandlerRegistry: graph_runtime_state: GraphRuntimeState, graph_execution: GraphExecution, response_coordinator: ResponseStreamCoordinator, - event_collector: "EventCollector", + event_collector: "EventManager", edge_processor: "EdgeProcessor", state_manager: "UnifiedStateManager", error_handler: "ErrorHandler", @@ -66,7 +66,7 @@ class EventHandlerRegistry: graph_runtime_state: Runtime state with variable pool graph_execution: Graph execution aggregate response_coordinator: Response stream coordinator - event_collector: Event collector for collecting events + event_collector: Event manager for collecting events edge_processor: Edge processor for edge traversal state_manager: Unified state manager error_handler: Error handler diff --git a/api/core/workflow/graph_engine/event_management/event_collector.py b/api/core/workflow/graph_engine/event_management/event_manager.py similarity index 73% rename from api/core/workflow/graph_engine/event_management/event_collector.py rename to api/core/workflow/graph_engine/event_management/event_manager.py index 683a23c928..d34f4e032b 100644 --- a/api/core/workflow/graph_engine/event_management/event_collector.py +++ b/api/core/workflow/graph_engine/event_management/event_manager.py @@ -1,8 +1,10 @@ """ -Event collector for buffering and managing events. +Unified event manager for collecting and emitting events. """ import threading +import time +from collections.abc import Generator from typing import final from core.workflow.graph_events import GraphEngineEvent @@ -89,19 +91,21 @@ class WriteLockContext: @final -class EventCollector: +class EventManager: """ - Collects and buffers events for later retrieval. + Unified event manager that collects, buffers, and emits events. - This provides thread-safe event collection with support for - notifying layers about events as they're collected. + This class combines event collection with event emission, providing + thread-safe event management with support for notifying layers and + streaming events to external consumers. """ def __init__(self) -> None: - """Initialize the event collector.""" + """Initialize the event manager.""" self._events: list[GraphEngineEvent] = [] self._lock = ReadWriteLock() self._layers: list[Layer] = [] + self._execution_complete = threading.Event() def set_layers(self, layers: list[Layer]) -> None: """ @@ -123,17 +127,7 @@ class EventCollector: self._events.append(event) self._notify_layers(event) - def get_events(self) -> list[GraphEngineEvent]: - """ - Get all collected events. - - Returns: - List of collected events - """ - with self._lock.read_lock(): - return list(self._events) - - def get_new_events(self, start_index: int) -> list[GraphEngineEvent]: + def _get_new_events(self, start_index: int) -> list[GraphEngineEvent]: """ Get new events starting from a specific index. @@ -146,7 +140,7 @@ class EventCollector: with self._lock.read_lock(): return list(self._events[start_index:]) - def event_count(self) -> int: + def _event_count(self) -> int: """ Get the current count of collected events. @@ -156,10 +150,31 @@ class EventCollector: with self._lock.read_lock(): return len(self._events) - def clear(self) -> None: - """Clear all collected events.""" - with self._lock.write_lock(): - self._events.clear() + def mark_complete(self) -> None: + """Mark execution as complete to stop the event emission generator.""" + self._execution_complete.set() + + def emit_events(self) -> Generator[GraphEngineEvent, None, None]: + """ + Generator that yields events as they're collected. + + Yields: + GraphEngineEvent instances as they're processed + """ + yielded_count = 0 + + while not self._execution_complete.is_set() or yielded_count < self._event_count(): + # Get new events since last yield + new_events = self._get_new_events(yielded_count) + + # Yield any new events + for event in new_events: + yield event + yielded_count += 1 + + # Small sleep to avoid busy waiting + if not self._execution_complete.is_set() and not new_events: + time.sleep(0.001) def _notify_layers(self, event: GraphEngineEvent) -> None: """ diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index be6fd4f63f..833cee0ffe 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -31,7 +31,7 @@ from .command_processing import AbortCommandHandler, CommandProcessor from .domain import ExecutionContext, GraphExecution from .entities.commands import AbortCommand from .error_handling import ErrorHandler -from .event_management import EventCollector, EventEmitter, EventHandlerRegistry +from .event_management import EventHandler, EventManager from .graph_traversal import EdgeProcessor, SkipPropagator from .layers.base import Layer from .orchestration import Dispatcher, ExecutionCoordinator @@ -122,10 +122,8 @@ class GraphEngine: ) # === Event Management === - # Event collector aggregates events from all subsystems - self._event_collector = EventCollector() - # Event emitter streams collected events to consumers - self._event_emitter = EventEmitter(self._event_collector) + # Event manager handles both collection and emission of events + self._event_manager = EventManager() # === Error Handling === # Centralized error handler for graph execution errors @@ -149,12 +147,12 @@ class GraphEngine: # === Event Handler Registry === # Central registry for handling all node execution events - self._event_handler_registry = EventHandlerRegistry( + self._event_handler_registry = EventHandler( graph=self._graph, graph_runtime_state=self._graph_runtime_state, graph_execution=self._graph_execution, response_coordinator=self._response_coordinator, - event_collector=self._event_collector, + event_collector=self._event_manager, edge_processor=self._edge_processor, state_manager=self._state_manager, error_handler=self._error_handler, @@ -206,7 +204,7 @@ class GraphEngine: graph_execution=self._graph_execution, state_manager=self._state_manager, event_handler=self._event_handler_registry, - event_collector=self._event_collector, + event_collector=self._event_manager, command_processor=self._command_processor, worker_pool=self._worker_pool, ) @@ -215,10 +213,10 @@ class GraphEngine: self._dispatcher = Dispatcher( event_queue=self._event_queue, event_handler=self._event_handler_registry, - event_collector=self._event_collector, + event_collector=self._event_manager, execution_coordinator=self._execution_coordinator, max_execution_time=self._execution_context.max_execution_time, - event_emitter=self._event_emitter, + event_emitter=self._event_manager, ) # === Extensibility === @@ -261,7 +259,7 @@ class GraphEngine: self._start_execution() # Yield events as they occur - yield from self._event_emitter.emit_events() + yield from self._event_manager.emit_events() # Handle completion if self._graph_execution.aborted: @@ -289,7 +287,7 @@ class GraphEngine: def _initialize_layers(self) -> None: """Initialize layers with context.""" - self._event_collector.set_layers(self._layers) + self._event_manager.set_layers(self._layers) for layer in self._layers: try: layer.initialize(self._graph_runtime_state, self._command_channel) diff --git a/api/core/workflow/graph_engine/orchestration/dispatcher.py b/api/core/workflow/graph_engine/orchestration/dispatcher.py index 5ae1c3bbbe..80f744c941 100644 --- a/api/core/workflow/graph_engine/orchestration/dispatcher.py +++ b/api/core/workflow/graph_engine/orchestration/dispatcher.py @@ -10,11 +10,11 @@ from typing import TYPE_CHECKING, final from core.workflow.graph_events.base import GraphNodeEventBase -from ..event_management import EventCollector, EventEmitter +from ..event_management import EventManager from .execution_coordinator import ExecutionCoordinator if TYPE_CHECKING: - from ..event_management import EventHandlerRegistry + from ..event_management import EventHandler logger = logging.getLogger(__name__) @@ -31,11 +31,11 @@ class Dispatcher: def __init__( self, event_queue: queue.Queue[GraphNodeEventBase], - event_handler: "EventHandlerRegistry", - event_collector: EventCollector, + event_handler: "EventHandler", + event_collector: EventManager, execution_coordinator: ExecutionCoordinator, max_execution_time: int, - event_emitter: EventEmitter | None = None, + event_emitter: EventManager | None = None, ) -> None: """ Initialize the dispatcher. @@ -43,10 +43,10 @@ class Dispatcher: Args: event_queue: Queue of events from workers event_handler: Event handler registry for processing events - event_collector: Event collector for collecting unhandled events + event_collector: Event manager for collecting unhandled events execution_coordinator: Coordinator for execution flow max_execution_time: Maximum execution time in seconds - event_emitter: Optional event emitter to signal completion + event_emitter: Optional event manager to signal completion """ self._event_queue = event_queue self._event_handler = event_handler diff --git a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py index 63e512f7b3..3dd443ddb3 100644 --- a/api/core/workflow/graph_engine/orchestration/execution_coordinator.py +++ b/api/core/workflow/graph_engine/orchestration/execution_coordinator.py @@ -6,12 +6,12 @@ from typing import TYPE_CHECKING, final from ..command_processing import CommandProcessor from ..domain import GraphExecution -from ..event_management import EventCollector +from ..event_management import EventManager from ..state_management import UnifiedStateManager from ..worker_management import SimpleWorkerPool if TYPE_CHECKING: - from ..event_management import EventHandlerRegistry + from ..event_management import EventHandler @final @@ -27,8 +27,8 @@ class ExecutionCoordinator: self, graph_execution: GraphExecution, state_manager: UnifiedStateManager, - event_handler: "EventHandlerRegistry", - event_collector: EventCollector, + event_handler: "EventHandler", + event_collector: EventManager, command_processor: CommandProcessor, worker_pool: SimpleWorkerPool, ) -> None: @@ -39,7 +39,7 @@ class ExecutionCoordinator: graph_execution: Graph execution aggregate state_manager: Unified state manager event_handler: Event handler registry for processing events - event_collector: Event collector for collecting events + event_collector: Event manager for collecting events command_processor: Processor for commands worker_pool: Pool of workers """ From f11131f8b502b635c0ad59d95ec3c0d12149957f Mon Sep 17 00:00:00 2001 From: 17hz <0x149527@gmail.com> Date: Mon, 1 Sep 2025 13:50:33 +0800 Subject: [PATCH 31/96] fix: basepath did not read from the environment variable (#24870) --- web/next.config.js | 4 +--- web/utils/var-basePath.js | 6 ------ web/utils/var.ts | 2 +- 3 files changed, 2 insertions(+), 10 deletions(-) delete mode 100644 web/utils/var-basePath.js diff --git a/web/next.config.js b/web/next.config.js index 6920a47fbf..e039ba9284 100644 --- a/web/next.config.js +++ b/web/next.config.js @@ -1,4 +1,3 @@ -const { basePath, assetPrefix } = require('./utils/var-basePath') const { codeInspectorPlugin } = require('code-inspector-plugin') const withMDX = require('@next/mdx')({ extension: /\.mdx?$/, @@ -24,8 +23,7 @@ const remoteImageURLs = [hasSetWebPrefix ? new URL(`${process.env.NEXT_PUBLIC_WE /** @type {import('next').NextConfig} */ const nextConfig = { - basePath, - assetPrefix, + basePath: process.env.NEXT_PUBLIC_BASE_PATH || '', webpack: (config, { dev, isServer }) => { if (dev) { config.plugins.push(codeInspectorPlugin({ bundler: 'webpack' })) diff --git a/web/utils/var-basePath.js b/web/utils/var-basePath.js deleted file mode 100644 index ff6dd505ea..0000000000 --- a/web/utils/var-basePath.js +++ /dev/null @@ -1,6 +0,0 @@ -// export basePath to next.config.js -// same as the one exported from var.ts -module.exports = { - basePath: process.env.NEXT_PUBLIC_BASE_PATH || '', - assetPrefix: '', -} diff --git a/web/utils/var.ts b/web/utils/var.ts index 4bbb7ca631..e3320a099d 100644 --- a/web/utils/var.ts +++ b/web/utils/var.ts @@ -118,7 +118,7 @@ export const getVars = (value: string) => { // Set the value of basePath // example: /dify -export const basePath = '' +export const basePath = process.env.NEXT_PUBLIC_BASE_PATH || '' export function getMarketplaceUrl(path: string, params?: Record) { const searchParams = new URLSearchParams({ source: encodeURIComponent(window.location.origin) }) From ffba341258b6ec96301c10279754481eff0db5bb Mon Sep 17 00:00:00 2001 From: willzhao Date: Mon, 1 Sep 2025 14:05:32 +0800 Subject: [PATCH 32/96] [CHORE]: remove redundant-cast (#24807) --- api/core/app/apps/advanced_chat/app_runner.py | 2 +- api/core/helper/encrypter.py | 2 +- api/core/model_manager.py | 18 ---------------- api/core/prompt/utils/prompt_message_util.py | 1 - api/core/provider_manager.py | 6 +++--- .../datasource/vdb/qdrant/qdrant_vector.py | 3 +-- api/core/rag/extractor/markdown_extractor.py | 4 ++-- api/core/rag/extractor/notion_extractor.py | 2 +- api/core/rag/extractor/pdf_extractor.py | 4 ++-- api/core/tools/tool_manager.py | 21 ++++++++----------- api/core/tools/utils/message_transformer.py | 5 ++--- .../tools/utils/model_invocation_utils.py | 19 +++++++---------- api/core/tools/workflow_as_tool/tool.py | 6 +++--- api/core/variables/variables.py | 4 ++-- .../workflow/graph_engine/graph_engine.py | 2 +- api/core/workflow/nodes/agent/agent_node.py | 5 ++--- .../workflow/nodes/document_extractor/node.py | 4 ++-- .../parameter_extractor_node.py | 2 +- .../question_classifier_node.py | 4 ++-- api/core/workflow/nodes/tool/tool_node.py | 4 ++-- api/core/workflow/workflow_entry.py | 3 +-- api/factories/file_factory.py | 3 +-- api/models/tools.py | 2 +- api/services/account_service.py | 6 +++--- api/services/annotation_service.py | 6 +++--- .../workflow/nodes/test_code.py | 6 ------ 26 files changed, 54 insertions(+), 90 deletions(-) diff --git a/api/core/app/apps/advanced_chat/app_runner.py b/api/core/app/apps/advanced_chat/app_runner.py index 3de2f5ca9e..8d256da9cb 100644 --- a/api/core/app/apps/advanced_chat/app_runner.py +++ b/api/core/app/apps/advanced_chat/app_runner.py @@ -140,7 +140,7 @@ class AdvancedChatAppRunner(WorkflowBasedAppRunner): environment_variables=self._workflow.environment_variables, # Based on the definition of `VariableUnion`, # `list[Variable]` can be safely used as `list[VariableUnion]` since they are compatible. - conversation_variables=cast(list[VariableUnion], conversation_variables), + conversation_variables=conversation_variables, ) # init graph diff --git a/api/core/helper/encrypter.py b/api/core/helper/encrypter.py index cac7e8e6e0..383a2dd57e 100644 --- a/api/core/helper/encrypter.py +++ b/api/core/helper/encrypter.py @@ -3,7 +3,7 @@ import base64 from libs import rsa -def obfuscated_token(token: str): +def obfuscated_token(token: str) -> str: if not token: return token if len(token) <= 8: diff --git a/api/core/model_manager.py b/api/core/model_manager.py index 51af3d1877..e567565548 100644 --- a/api/core/model_manager.py +++ b/api/core/model_manager.py @@ -158,8 +158,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, LargeLanguageModel): raise Exception("Model type instance is not LargeLanguageModel") - - self.model_type_instance = cast(LargeLanguageModel, self.model_type_instance) return cast( Union[LLMResult, Generator], self._round_robin_invoke( @@ -188,8 +186,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, LargeLanguageModel): raise Exception("Model type instance is not LargeLanguageModel") - - self.model_type_instance = cast(LargeLanguageModel, self.model_type_instance) return cast( int, self._round_robin_invoke( @@ -214,8 +210,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, TextEmbeddingModel): raise Exception("Model type instance is not TextEmbeddingModel") - - self.model_type_instance = cast(TextEmbeddingModel, self.model_type_instance) return cast( TextEmbeddingResult, self._round_robin_invoke( @@ -237,8 +231,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, TextEmbeddingModel): raise Exception("Model type instance is not TextEmbeddingModel") - - self.model_type_instance = cast(TextEmbeddingModel, self.model_type_instance) return cast( list[int], self._round_robin_invoke( @@ -269,8 +261,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, RerankModel): raise Exception("Model type instance is not RerankModel") - - self.model_type_instance = cast(RerankModel, self.model_type_instance) return cast( RerankResult, self._round_robin_invoke( @@ -295,8 +285,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, ModerationModel): raise Exception("Model type instance is not ModerationModel") - - self.model_type_instance = cast(ModerationModel, self.model_type_instance) return cast( bool, self._round_robin_invoke( @@ -318,8 +306,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, Speech2TextModel): raise Exception("Model type instance is not Speech2TextModel") - - self.model_type_instance = cast(Speech2TextModel, self.model_type_instance) return cast( str, self._round_robin_invoke( @@ -343,8 +329,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, TTSModel): raise Exception("Model type instance is not TTSModel") - - self.model_type_instance = cast(TTSModel, self.model_type_instance) return cast( Iterable[bytes], self._round_robin_invoke( @@ -404,8 +388,6 @@ class ModelInstance: """ if not isinstance(self.model_type_instance, TTSModel): raise Exception("Model type instance is not TTSModel") - - self.model_type_instance = cast(TTSModel, self.model_type_instance) return self.model_type_instance.get_tts_model_voices( model=self.model, credentials=self.credentials, language=language ) diff --git a/api/core/prompt/utils/prompt_message_util.py b/api/core/prompt/utils/prompt_message_util.py index 2f4e651461..cdc6ccc821 100644 --- a/api/core/prompt/utils/prompt_message_util.py +++ b/api/core/prompt/utils/prompt_message_util.py @@ -87,7 +87,6 @@ class PromptMessageUtil: if isinstance(prompt_message.content, list): for content in prompt_message.content: if content.type == PromptMessageContentType.TEXT: - content = cast(TextPromptMessageContent, content) text += content.data else: content = cast(ImagePromptMessageContent, content) diff --git a/api/core/provider_manager.py b/api/core/provider_manager.py index 28a4ce0778..cad0de6478 100644 --- a/api/core/provider_manager.py +++ b/api/core/provider_manager.py @@ -2,7 +2,7 @@ import contextlib import json from collections import defaultdict from json import JSONDecodeError -from typing import Any, Optional, cast +from typing import Any, Optional from sqlalchemy import select from sqlalchemy.exc import IntegrityError @@ -154,8 +154,8 @@ class ProviderManager: for provider_entity in provider_entities: # handle include, exclude if is_filtered( - include_set=cast(set[str], dify_config.POSITION_PROVIDER_INCLUDES_SET), - exclude_set=cast(set[str], dify_config.POSITION_PROVIDER_EXCLUDES_SET), + include_set=dify_config.POSITION_PROVIDER_INCLUDES_SET, + exclude_set=dify_config.POSITION_PROVIDER_EXCLUDES_SET, data=provider_entity, name_func=lambda x: x.provider, ): diff --git a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py index fcf3a6d126..41ad5e57e6 100644 --- a/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py +++ b/api/core/rag/datasource/vdb/qdrant/qdrant_vector.py @@ -3,7 +3,7 @@ import os import uuid from collections.abc import Generator, Iterable, Sequence from itertools import islice -from typing import TYPE_CHECKING, Any, Optional, Union, cast +from typing import TYPE_CHECKING, Any, Optional, Union import qdrant_client from flask import current_app @@ -426,7 +426,6 @@ class QdrantVector(BaseVector): def _reload_if_needed(self): if isinstance(self._client, QdrantLocal): - self._client = cast(QdrantLocal, self._client) self._client._load() @classmethod diff --git a/api/core/rag/extractor/markdown_extractor.py b/api/core/rag/extractor/markdown_extractor.py index c97765b1dc..3845392c8d 100644 --- a/api/core/rag/extractor/markdown_extractor.py +++ b/api/core/rag/extractor/markdown_extractor.py @@ -2,7 +2,7 @@ import re from pathlib import Path -from typing import Optional, cast +from typing import Optional from core.rag.extractor.extractor_base import BaseExtractor from core.rag.extractor.helpers import detect_file_encodings @@ -76,7 +76,7 @@ class MarkdownExtractor(BaseExtractor): markdown_tups.append((current_header, current_text)) markdown_tups = [ - (re.sub(r"#", "", cast(str, key)).strip() if key else None, re.sub(r"<.*?>", "", value)) + (re.sub(r"#", "", key).strip() if key else None, re.sub(r"<.*?>", "", value)) for key, value in markdown_tups ] diff --git a/api/core/rag/extractor/notion_extractor.py b/api/core/rag/extractor/notion_extractor.py index 17f4d1af2d..3d4b898c93 100644 --- a/api/core/rag/extractor/notion_extractor.py +++ b/api/core/rag/extractor/notion_extractor.py @@ -385,4 +385,4 @@ class NotionExtractor(BaseExtractor): f"No notion data source binding found for tenant {tenant_id} and notion workspace {notion_workspace_id}" ) - return cast(str, data_source_binding.access_token) + return data_source_binding.access_token diff --git a/api/core/rag/extractor/pdf_extractor.py b/api/core/rag/extractor/pdf_extractor.py index 7dfe2e357c..3c43f34104 100644 --- a/api/core/rag/extractor/pdf_extractor.py +++ b/api/core/rag/extractor/pdf_extractor.py @@ -2,7 +2,7 @@ import contextlib from collections.abc import Iterator -from typing import Optional, cast +from typing import Optional from core.rag.extractor.blob.blob import Blob from core.rag.extractor.extractor_base import BaseExtractor @@ -27,7 +27,7 @@ class PdfExtractor(BaseExtractor): plaintext_file_exists = False if self._file_cache_key: with contextlib.suppress(FileNotFoundError): - text = cast(bytes, storage.load(self._file_cache_key)).decode("utf-8") + text = storage.load(self._file_cache_key).decode("utf-8") plaintext_file_exists = True return [Document(page_content=text)] documents = list(self.load()) diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 3454ec3489..b338a779ac 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -331,16 +331,13 @@ class ToolManager: if controller_tools is None or len(controller_tools) == 0: raise ToolProviderNotFoundError(f"workflow provider {provider_id} not found") - return cast( - WorkflowTool, - controller.get_tools(tenant_id=workflow_provider.tenant_id)[0].fork_tool_runtime( - runtime=ToolRuntime( - tenant_id=tenant_id, - credentials={}, - invoke_from=invoke_from, - tool_invoke_from=tool_invoke_from, - ) - ), + return controller.get_tools(tenant_id=workflow_provider.tenant_id)[0].fork_tool_runtime( + runtime=ToolRuntime( + tenant_id=tenant_id, + credentials={}, + invoke_from=invoke_from, + tool_invoke_from=tool_invoke_from, + ) ) elif provider_type == ToolProviderType.APP: raise NotImplementedError("app provider not implemented") @@ -648,8 +645,8 @@ class ToolManager: for provider in builtin_providers: # handle include, exclude if is_filtered( - include_set=cast(set[str], dify_config.POSITION_TOOL_INCLUDES_SET), - exclude_set=cast(set[str], dify_config.POSITION_TOOL_EXCLUDES_SET), + include_set=dify_config.POSITION_TOOL_INCLUDES_SET, + exclude_set=dify_config.POSITION_TOOL_EXCLUDES_SET, data=provider, name_func=lambda x: x.identity.name, ): diff --git a/api/core/tools/utils/message_transformer.py b/api/core/tools/utils/message_transformer.py index 8357dac0d7..bf075bd730 100644 --- a/api/core/tools/utils/message_transformer.py +++ b/api/core/tools/utils/message_transformer.py @@ -3,7 +3,7 @@ from collections.abc import Generator from datetime import date, datetime from decimal import Decimal from mimetypes import guess_extension -from typing import Optional, cast +from typing import Optional from uuid import UUID import numpy as np @@ -159,8 +159,7 @@ class ToolFileMessageTransformer: elif message.type == ToolInvokeMessage.MessageType.JSON: if isinstance(message.message, ToolInvokeMessage.JsonMessage): - json_msg = cast(ToolInvokeMessage.JsonMessage, message.message) - json_msg.json_object = safe_json_value(json_msg.json_object) + message.message.json_object = safe_json_value(message.message.json_object) yield message else: yield message diff --git a/api/core/tools/utils/model_invocation_utils.py b/api/core/tools/utils/model_invocation_utils.py index 3f59b3f472..251d914800 100644 --- a/api/core/tools/utils/model_invocation_utils.py +++ b/api/core/tools/utils/model_invocation_utils.py @@ -129,17 +129,14 @@ class ModelInvocationUtils: db.session.commit() try: - response: LLMResult = cast( - LLMResult, - model_instance.invoke_llm( - prompt_messages=prompt_messages, - model_parameters=model_parameters, - tools=[], - stop=[], - stream=False, - user=user_id, - callbacks=[], - ), + response: LLMResult = model_instance.invoke_llm( + prompt_messages=prompt_messages, + model_parameters=model_parameters, + tools=[], + stop=[], + stream=False, + user=user_id, + callbacks=[], ) except InvokeRateLimitError as e: raise InvokeModelError(f"Invoke rate limit error: {e}") diff --git a/api/core/tools/workflow_as_tool/tool.py b/api/core/tools/workflow_as_tool/tool.py index 1387df5973..ea219af684 100644 --- a/api/core/tools/workflow_as_tool/tool.py +++ b/api/core/tools/workflow_as_tool/tool.py @@ -1,7 +1,7 @@ import json import logging from collections.abc import Generator -from typing import Any, Optional, cast +from typing import Any, Optional from core.file import FILE_MODEL_IDENTITY, File, FileTransferMethod from core.tools.__base.tool import Tool @@ -204,14 +204,14 @@ class WorkflowTool(Tool): item = self._update_file_mapping(item) file = build_from_mapping( mapping=item, - tenant_id=str(cast(ToolRuntime, self.runtime).tenant_id), + tenant_id=str(self.runtime.tenant_id), ) files.append(file) elif isinstance(value, dict) and value.get("dify_model_identity") == FILE_MODEL_IDENTITY: value = self._update_file_mapping(value) file = build_from_mapping( mapping=value, - tenant_id=str(cast(ToolRuntime, self.runtime).tenant_id), + tenant_id=str(self.runtime.tenant_id), ) files.append(file) diff --git a/api/core/variables/variables.py b/api/core/variables/variables.py index 16c8116ac1..a994730cd5 100644 --- a/api/core/variables/variables.py +++ b/api/core/variables/variables.py @@ -1,5 +1,5 @@ from collections.abc import Sequence -from typing import Annotated, TypeAlias, cast +from typing import Annotated, TypeAlias from uuid import uuid4 from pydantic import Discriminator, Field, Tag @@ -86,7 +86,7 @@ class SecretVariable(StringVariable): @property def log(self) -> str: - return cast(str, encrypter.obfuscated_token(self.value)) + return encrypter.obfuscated_token(self.value) class NoneVariable(NoneSegment, Variable): diff --git a/api/core/workflow/graph_engine/graph_engine.py b/api/core/workflow/graph_engine/graph_engine.py index 03b920ccbb..188d0c475f 100644 --- a/api/core/workflow/graph_engine/graph_engine.py +++ b/api/core/workflow/graph_engine/graph_engine.py @@ -374,7 +374,7 @@ class GraphEngine: if len(sub_edge_mappings) == 0: continue - edge = cast(GraphEdge, sub_edge_mappings[0]) + edge = sub_edge_mappings[0] if edge.run_condition is None: logger.warning("Edge %s run condition is None", edge.target_node_id) continue diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 144f036aa4..9e5d5e62b4 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -153,7 +153,7 @@ class AgentNode(BaseNode): messages=message_stream, tool_info={ "icon": self.agent_strategy_icon, - "agent_strategy": cast(AgentNodeData, self._node_data).agent_strategy_name, + "agent_strategy": self._node_data.agent_strategy_name, }, parameters_for_log=parameters_for_log, user_id=self.user_id, @@ -394,8 +394,7 @@ class AgentNode(BaseNode): current_plugin = next( plugin for plugin in plugins - if f"{plugin.plugin_id}/{plugin.name}" - == cast(AgentNodeData, self._node_data).agent_strategy_provider_name + if f"{plugin.plugin_id}/{plugin.name}" == self._node_data.agent_strategy_provider_name ) icon = current_plugin.declaration.icon except StopIteration: diff --git a/api/core/workflow/nodes/document_extractor/node.py b/api/core/workflow/nodes/document_extractor/node.py index b820999c3a..bb09b1a5dd 100644 --- a/api/core/workflow/nodes/document_extractor/node.py +++ b/api/core/workflow/nodes/document_extractor/node.py @@ -302,12 +302,12 @@ def _extract_text_from_yaml(file_content: bytes) -> str: encoding = "utf-8" yaml_data = yaml.safe_load_all(file_content.decode(encoding, errors="ignore")) - return cast(str, yaml.dump_all(yaml_data, allow_unicode=True, sort_keys=False)) + return yaml.dump_all(yaml_data, allow_unicode=True, sort_keys=False) except (UnicodeDecodeError, LookupError, yaml.YAMLError) as e: # If decoding fails, try with utf-8 as last resort try: yaml_data = yaml.safe_load_all(file_content.decode("utf-8", errors="ignore")) - return cast(str, yaml.dump_all(yaml_data, allow_unicode=True, sort_keys=False)) + return yaml.dump_all(yaml_data, allow_unicode=True, sort_keys=False) except (UnicodeDecodeError, yaml.YAMLError): raise TextExtractionError(f"Failed to decode or parse YAML file: {e}") from e diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 3dcde5ad81..43edf7eac6 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -139,7 +139,7 @@ class ParameterExtractorNode(BaseNode): """ Run the node. """ - node_data = cast(ParameterExtractorNodeData, self._node_data) + node_data = self._node_data variable = self.graph_runtime_state.variable_pool.get(node_data.query) query = variable.text if variable else "" diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index 3e4984ecd5..ba4e55bb89 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -1,6 +1,6 @@ import json from collections.abc import Mapping, Sequence -from typing import TYPE_CHECKING, Any, Optional, cast +from typing import TYPE_CHECKING, Any, Optional from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.memory.token_buffer_memory import TokenBufferMemory @@ -109,7 +109,7 @@ class QuestionClassifierNode(BaseNode): return "1" def _run(self): - node_data = cast(QuestionClassifierNodeData, self._node_data) + node_data = self._node_data variable_pool = self.graph_runtime_state.variable_pool # extract variables diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 4c8e13de70..1a85c08b5b 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -1,5 +1,5 @@ from collections.abc import Generator, Mapping, Sequence -from typing import Any, Optional, cast +from typing import Any, Optional from sqlalchemy import select from sqlalchemy.orm import Session @@ -57,7 +57,7 @@ class ToolNode(BaseNode): Run the tool node """ - node_data = cast(ToolNodeData, self._node_data) + node_data = self._node_data # fetch tool icon tool_info = { diff --git a/api/core/workflow/workflow_entry.py b/api/core/workflow/workflow_entry.py index 801e36e272..e9b73df0f3 100644 --- a/api/core/workflow/workflow_entry.py +++ b/api/core/workflow/workflow_entry.py @@ -2,7 +2,7 @@ import logging import time import uuid from collections.abc import Generator, Mapping, Sequence -from typing import Any, Optional, cast +from typing import Any, Optional from configs import dify_config from core.app.apps.exc import GenerateTaskStoppedError @@ -261,7 +261,6 @@ class WorkflowEntry: environment_variables=[], ) - node_cls = cast(type[BaseNode], node_cls) # init workflow run state node: BaseNode = node_cls( id=str(uuid.uuid4()), diff --git a/api/factories/file_factory.py b/api/factories/file_factory.py index 0ea7d3ae1e..62e3bfa3ba 100644 --- a/api/factories/file_factory.py +++ b/api/factories/file_factory.py @@ -3,7 +3,7 @@ import os import urllib.parse import uuid from collections.abc import Callable, Mapping, Sequence -from typing import Any, cast +from typing import Any import httpx from sqlalchemy import select @@ -258,7 +258,6 @@ def _get_remote_file_info(url: str): mime_type = "" resp = ssrf_proxy.head(url, follow_redirects=True) - resp = cast(httpx.Response, resp) if resp.status_code == httpx.codes.OK: if content_disposition := resp.headers.get("Content-Disposition"): filename = str(content_disposition.split("filename=")[-1].strip('"')) diff --git a/api/models/tools.py b/api/models/tools.py index e0c9fa6ffc..d88d817374 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -308,7 +308,7 @@ class MCPToolProvider(Base): @property def decrypted_server_url(self) -> str: - return cast(str, encrypter.decrypt_token(self.tenant_id, self.server_url)) + return encrypter.decrypt_token(self.tenant_id, self.server_url) @property def masked_server_url(self) -> str: diff --git a/api/services/account_service.py b/api/services/account_service.py index 089e667166..50ce171ded 100644 --- a/api/services/account_service.py +++ b/api/services/account_service.py @@ -146,7 +146,7 @@ class AccountService: account.last_active_at = naive_utc_now() db.session.commit() - return cast(Account, account) + return account @staticmethod def get_account_jwt_token(account: Account) -> str: @@ -191,7 +191,7 @@ class AccountService: db.session.commit() - return cast(Account, account) + return account @staticmethod def update_account_password(account, password, new_password): @@ -1127,7 +1127,7 @@ class TenantService: def get_custom_config(tenant_id: str) -> dict: tenant = db.get_or_404(Tenant, tenant_id) - return cast(dict, tenant.custom_config_dict) + return tenant.custom_config_dict @staticmethod def is_owner(account: Account, tenant: Tenant) -> bool: diff --git a/api/services/annotation_service.py b/api/services/annotation_service.py index 6603063c22..9ee92bc2dc 100644 --- a/api/services/annotation_service.py +++ b/api/services/annotation_service.py @@ -1,5 +1,5 @@ import uuid -from typing import cast +from typing import Optional import pandas as pd from flask_login import current_user @@ -40,7 +40,7 @@ class AppAnnotationService: if not message: raise NotFound("Message Not Exists.") - annotation = message.annotation + annotation: Optional[MessageAnnotation] = message.annotation # save the message annotation if annotation: annotation.content = args["answer"] @@ -70,7 +70,7 @@ class AppAnnotationService: app_id, annotation_setting.collection_binding_id, ) - return cast(MessageAnnotation, annotation) + return annotation @classmethod def enable_app_annotation(cls, args: dict, app_id: str) -> dict: diff --git a/api/tests/integration_tests/workflow/nodes/test_code.py b/api/tests/integration_tests/workflow/nodes/test_code.py index 4f659c5e13..eb85d6118e 100644 --- a/api/tests/integration_tests/workflow/nodes/test_code.py +++ b/api/tests/integration_tests/workflow/nodes/test_code.py @@ -1,7 +1,6 @@ import time import uuid from os import getenv -from typing import cast import pytest @@ -13,7 +12,6 @@ from core.workflow.graph_engine.entities.graph import Graph from core.workflow.graph_engine.entities.graph_init_params import GraphInitParams from core.workflow.graph_engine.entities.graph_runtime_state import GraphRuntimeState from core.workflow.nodes.code.code_node import CodeNode -from core.workflow.nodes.code.entities import CodeNodeData from core.workflow.system_variable import SystemVariable from models.enums import UserFrom from models.workflow import WorkflowType @@ -238,8 +236,6 @@ def test_execute_code_output_validator_depth(): "object_validator": {"result": 1, "depth": {"depth": {"depth": 1}}}, } - node._node_data = cast(CodeNodeData, node._node_data) - # validate node._transform_result(result, node._node_data.outputs) @@ -334,8 +330,6 @@ def test_execute_code_output_object_list(): ] } - node._node_data = cast(CodeNodeData, node._node_data) - # validate node._transform_result(result, node._node_data.outputs) From 60d9d0584a6073ea4d0cc2925f74284be674748c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E6=9C=A8=E4=B9=8B=E6=9C=AC=E6=BE=AA?= Date: Mon, 1 Sep 2025 14:28:21 +0800 Subject: [PATCH 33/96] refactor: migrate marketplace.py from requests to httpx (#24015) --- api/core/helper/marketplace.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/core/helper/marketplace.py b/api/core/helper/marketplace.py index fe3078923d..e837f2fd38 100644 --- a/api/core/helper/marketplace.py +++ b/api/core/helper/marketplace.py @@ -1,6 +1,6 @@ from collections.abc import Sequence -import requests +import httpx from yarl import URL from configs import dify_config @@ -23,7 +23,7 @@ def batch_fetch_plugin_manifests(plugin_ids: list[str]) -> Sequence[MarketplaceP return [] url = str(marketplace_api_url / "api/v1/plugins/batch") - response = requests.post(url, json={"plugin_ids": plugin_ids}) + response = httpx.post(url, json={"plugin_ids": plugin_ids}) response.raise_for_status() return [MarketplacePluginDeclaration(**plugin) for plugin in response.json()["data"]["plugins"]] @@ -36,7 +36,7 @@ def batch_fetch_plugin_manifests_ignore_deserialization_error( return [] url = str(marketplace_api_url / "api/v1/plugins/batch") - response = requests.post(url, json={"plugin_ids": plugin_ids}) + response = httpx.post(url, json={"plugin_ids": plugin_ids}) response.raise_for_status() result: list[MarketplacePluginDeclaration] = [] for plugin in response.json()["data"]["plugins"]: @@ -50,5 +50,5 @@ def batch_fetch_plugin_manifests_ignore_deserialization_error( def record_install_plugin_event(plugin_unique_identifier: str): url = str(marketplace_api_url / "api/v1/stats/plugins/install_count") - response = requests.post(url, json={"unique_identifier": plugin_unique_identifier}) + response = httpx.post(url, json={"unique_identifier": plugin_unique_identifier}) response.raise_for_status() From 1b401063e8d9bb44e5f0d4f9fc23fc99ddbee854 Mon Sep 17 00:00:00 2001 From: 17hz <0x149527@gmail.com> Date: Mon, 1 Sep 2025 14:45:44 +0800 Subject: [PATCH 34/96] chore: pnpx deprecation (#24868) --- web/package.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/web/package.json b/web/package.json index a422c7fd6c..528f5e468f 100644 --- a/web/package.json +++ b/web/package.json @@ -23,8 +23,8 @@ "build": "next build", "build:docker": "next build && node scripts/optimize-standalone.js", "start": "cp -r .next/static .next/standalone/.next/static && cp -r public .next/standalone/public && cross-env PORT=$npm_config_port HOSTNAME=$npm_config_host node .next/standalone/server.js", - "lint": "pnpx oxlint && pnpm eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache", - "lint-only-show-error": "pnpx oxlint && pnpm eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --quiet", + "lint": "npx oxlint && pnpm eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache", + "lint-only-show-error": "npm oxlint && pnpm eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --quiet", "fix": "eslint --fix .", "eslint-fix": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --fix", "eslint-fix-only-show-error": "eslint --cache --cache-location node_modules/.cache/eslint/.eslint-cache --fix --quiet", From d5a521eef2b436f5a98aa21edb6844ee1c67b003 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E8=80=90=E5=B0=8F=E5=BF=83?= Date: Mon, 1 Sep 2025 14:48:56 +0800 Subject: [PATCH 35/96] fix: Fix database connection leak in EasyUIBasedGenerateTaskPipeline (#24815) --- .../task_pipeline/easy_ui_based_generate_task_pipeline.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 471118c8cb..e3b917067f 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -472,9 +472,10 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): :param event: agent thought event :return: """ - agent_thought: Optional[MessageAgentThought] = ( - db.session.query(MessageAgentThought).where(MessageAgentThought.id == event.agent_thought_id).first() - ) + with Session(db.engine, expire_on_commit=False) as session: + agent_thought: Optional[MessageAgentThought] = ( + session.query(MessageAgentThought).where(MessageAgentThought.id == event.agent_thought_id).first() + ) if agent_thought: return AgentThoughtStreamResponse( From 414ee5197518adbd82c325eb151cc48667bcf0a5 Mon Sep 17 00:00:00 2001 From: Tianyi Jing Date: Mon, 1 Sep 2025 15:21:36 +0800 Subject: [PATCH 36/96] fix: add missing form for boolean types (#24812) Signed-off-by: jingfelix --- .../base/form/components/base/base-field.tsx | 19 +++++++++++++++++++ web/app/components/base/form/types.ts | 1 + 2 files changed, 20 insertions(+) diff --git a/web/app/components/base/form/components/base/base-field.tsx b/web/app/components/base/form/components/base/base-field.tsx index 4005bab6bc..35ca251a5b 100644 --- a/web/app/components/base/form/components/base/base-field.tsx +++ b/web/app/components/base/form/components/base/base-field.tsx @@ -12,6 +12,7 @@ import PureSelect from '@/app/components/base/select/pure' import type { FormSchema } from '@/app/components/base/form/types' import { FormTypeEnum } from '@/app/components/base/form/types' import { useRenderI18nObject } from '@/hooks/use-i18n' +import Radio from '@/app/components/base/radio' import RadioE from '@/app/components/base/radio/ui' export type BaseFieldProps = { @@ -102,6 +103,12 @@ const BaseField = ({ }) }, [values, show_on]) + const booleanRadioValue = useMemo(() => { + if (value === null || value === undefined) + return undefined + return value ? 1 : 0 + }, [value]) + if (!show) return null @@ -204,6 +211,18 @@ const BaseField = ({ ) } + { + formSchema.type === FormTypeEnum.boolean && ( + field.handleChange(val === 1)} + > + True + False + + ) + } { formSchema.url && ( Date: Mon, 1 Sep 2025 15:31:59 +0800 Subject: [PATCH 37/96] CI: add TS indentation check via esLint (#24810) --- .github/workflows/style.yml | 4 +- web/__tests__/check-i18n.test.ts | 2 +- web/__tests__/description-validation.test.tsx | 4 +- web/__tests__/document-list-sorting.test.tsx | 2 +- .../plugin-tool-workflow-error.test.tsx | 2 +- web/__tests__/real-browser-flicker.test.tsx | 2 +- .../workflow-parallel-limit.test.tsx | 4 +- .../svg-attribute-error-reproduction.spec.tsx | 4 +- .../account-page/AvatarWithEdit.tsx | 2 +- web/app/components/app-sidebar/basic.tsx | 4 +- web/app/components/app-sidebar/index.tsx | 4 +- .../sidebar-animation-issues.spec.tsx | 2 +- web/app/components/app/annotation/index.tsx | 2 +- .../config-var/config-modal/type-select.tsx | 10 +- .../params-config/config-content.tsx | 1 - .../configuration/debug/chat-user-input.tsx | 8 +- web/app/components/app/log/list.tsx | 124 +++++++++--------- web/app/components/app/overview/app-card.tsx | 2 +- .../app/overview/embedded/index.tsx | 8 +- .../app/overview/settings/index.tsx | 2 +- web/app/components/apps/list.tsx | 2 +- .../embedded-chatbot/inputs-form/content.tsx | 12 +- web/app/components/base/checkbox/index.tsx | 12 +- .../base/date-and-time-picker/utils/dayjs.ts | 2 +- .../base/form/form-scenarios/demo/index.tsx | 2 +- web/app/components/base/form/types.ts | 10 +- web/app/components/base/mermaid/index.tsx | 6 +- .../plugins/current-block/component.tsx | 6 +- .../plugins/error-message-block/component.tsx | 6 +- .../plugins/last-run-block/component.tsx | 6 +- web/app/components/base/select/index.tsx | 48 +++---- .../base/tag-management/selector.tsx | 2 +- web/app/components/base/toast/index.tsx | 2 +- .../common/retrieval-param-config/index.tsx | 1 - .../create/website/base/options-wrap.tsx | 1 - .../datasets/create/website/index.tsx | 3 +- .../website/jina-reader/base/options-wrap.tsx | 1 - .../detail/batch-modal/csv-uploader.tsx | 2 +- .../create/InfoPanel.tsx | 6 +- .../components/chunk-detail-modal.tsx | 4 +- .../hooks/use-edit-dataset-metadata.ts | 1 - .../actions/commands/registry.ts | 4 +- .../components/goto-anything/actions/index.ts | 4 +- web/app/components/goto-anything/index.tsx | 2 +- .../data-source-website/index.tsx | 1 - .../add-credential-in-load-balancing.tsx | 6 +- .../model-auth/authorized/index.tsx | 10 +- .../model-load-balancing-modal.tsx | 4 +- .../install-bundle/item/github-item.tsx | 3 - .../install-bundle/steps/install-multi.tsx | 6 - .../install-from-github/steps/loaded.tsx | 1 - .../steps/uploading.tsx | 1 - .../plugins/marketplace/context.tsx | 1 - .../plugins/plugin-auth/authorized/index.tsx | 28 ++-- .../hooks/use-plugin-auth-action.ts | 6 +- .../app-selector/index.tsx | 2 +- .../plugin-detail-panel/detail-header.tsx | 2 +- .../plugin-detail-panel/endpoint-modal.tsx | 4 +- .../multiple-tool-selector/index.tsx | 2 +- .../tool-selector/reasoning-config-form.tsx | 12 +- .../components/plugins/plugin-item/action.tsx | 1 - .../auto-update-setting/index.tsx | 24 ++-- .../auto-update-setting/utils.ts | 12 +- .../update-plugin/downgrade-warning.tsx | 2 +- .../update-plugin/from-market-place.tsx | 76 +++++------ .../components/tools/mcp/detail/content.tsx | 1 - .../components/tools/mcp/mcp-service-card.tsx | 6 +- .../components/tools/utils/to-form-schema.ts | 18 +-- .../workflow-app/hooks/use-workflow-init.ts | 1 - .../workflow/block-selector/all-tools.tsx | 1 - .../market-place-plugin/action.tsx | 1 - .../market-place-plugin/list.tsx | 1 - .../workflow/block-selector/tool/tool.tsx | 1 - .../datasets-detail-store/provider.tsx | 1 - .../workflow/header/header-in-restoring.tsx | 40 +++--- .../header/version-history-button.tsx | 20 +-- .../workflow/hooks-store/provider.tsx | 1 - .../hooks/use-inspect-vars-crud-common.ts | 58 ++++---- .../hooks/use-nodes-available-var-list.ts | 6 +- .../use-workflow-node-started.ts | 2 +- .../components/agent-strategy-selector.tsx | 2 +- .../nodes/_base/components/agent-strategy.tsx | 2 +- .../components/before-run-form/form-item.tsx | 8 +- .../components/input-support-select-var.tsx | 1 - .../mcp-tool-not-support-tooltip.tsx | 2 +- .../nodes/_base/components/variable/utils.ts | 4 +- .../_base/components/variable/var-list.tsx | 10 +- .../variable/var-reference-picker.tsx | 2 +- .../_base/components/workflow-panel/index.tsx | 4 +- .../workflow-panel/last-run/index.tsx | 10 +- .../nodes/_base/hooks/use-output-var-list.ts | 8 +- .../components/workflow/nodes/agent/panel.tsx | 8 +- .../nodes/agent/use-single-run-form-params.ts | 2 +- .../assigner/components/var-list/index.tsx | 2 +- .../nodes/http/hooks/use-key-value-list.ts | 2 - .../workflow/nodes/http/use-config.ts | 1 - .../components/metadata/metadata-trigger.tsx | 1 - .../nodes/knowledge-retrieval/use-config.ts | 3 - .../json-importer.tsx | 1 - .../nodes/parameter-extractor/use-config.ts | 1 - .../components/class-list.tsx | 8 +- .../nodes/question-classifier/use-config.ts | 2 - .../workflow/nodes/tool/use-config.ts | 1 - .../nodes/tool/use-single-run-form-params.ts | 2 +- .../workflow/operator/export-image.tsx | 4 +- .../workflow/panel/inputs-panel.tsx | 2 +- .../workflow/panel/workflow-preview.tsx | 2 +- .../workflow/selection-contextmenu.tsx | 2 +- .../workflow/variable-inspect/empty.tsx | 2 +- .../workflow/variable-inspect/index.tsx | 10 +- web/app/education-apply/hooks.ts | 44 +++---- web/app/install/installForm.tsx | 2 +- web/eslint.config.mjs | 8 +- web/i18n/en-US/workflow.ts | 2 +- web/package.json | 1 + web/service/base.ts | 46 +++---- web/service/use-plugins-auth.ts | 26 ++-- web/utils/navigation.ts | 8 +- 118 files changed, 457 insertions(+), 489 deletions(-) diff --git a/.github/workflows/style.yml b/.github/workflows/style.yml index b6c9131c08..9c79dbc57e 100644 --- a/.github/workflows/style.yml +++ b/.github/workflows/style.yml @@ -89,7 +89,9 @@ jobs: - name: Web style check if: steps.changed-files.outputs.any_changed == 'true' working-directory: ./web - run: pnpm run lint + run: | + pnpm run lint + pnpm run eslint docker-compose-template: name: Docker Compose Template diff --git a/web/__tests__/check-i18n.test.ts b/web/__tests__/check-i18n.test.ts index b4c4f1540d..b579f22d4b 100644 --- a/web/__tests__/check-i18n.test.ts +++ b/web/__tests__/check-i18n.test.ts @@ -621,7 +621,7 @@ export default translation && !trimmed.startsWith('//')) break } - else { + else { break } diff --git a/web/__tests__/description-validation.test.tsx b/web/__tests__/description-validation.test.tsx index 85263b035f..a78a4e632e 100644 --- a/web/__tests__/description-validation.test.tsx +++ b/web/__tests__/description-validation.test.tsx @@ -60,7 +60,7 @@ describe('Description Validation Logic', () => { try { validateDescriptionLength(invalidDescription) } - catch (error) { + catch (error) { expect((error as Error).message).toBe(expectedErrorMessage) } }) @@ -86,7 +86,7 @@ describe('Description Validation Logic', () => { expect(() => validateDescriptionLength(testDescription)).not.toThrow() expect(validateDescriptionLength(testDescription)).toBe(testDescription) } - else { + else { expect(() => validateDescriptionLength(testDescription)).toThrow( 'Description cannot exceed 400 characters.', ) diff --git a/web/__tests__/document-list-sorting.test.tsx b/web/__tests__/document-list-sorting.test.tsx index 1510dbec23..77c0bb60cf 100644 --- a/web/__tests__/document-list-sorting.test.tsx +++ b/web/__tests__/document-list-sorting.test.tsx @@ -39,7 +39,7 @@ describe('Document List Sorting', () => { const result = aValue.localeCompare(bValue) return order === 'asc' ? result : -result } - else { + else { const result = aValue - bValue return order === 'asc' ? result : -result } diff --git a/web/__tests__/plugin-tool-workflow-error.test.tsx b/web/__tests__/plugin-tool-workflow-error.test.tsx index 370052bc80..87bda8fa13 100644 --- a/web/__tests__/plugin-tool-workflow-error.test.tsx +++ b/web/__tests__/plugin-tool-workflow-error.test.tsx @@ -196,7 +196,7 @@ describe('Plugin Tool Workflow Integration', () => { const _pluginId = (tool.uniqueIdentifier as any).split(':')[0] }).toThrow() } - else { + else { // Valid tools should work fine expect(() => { const _pluginId = tool.uniqueIdentifier.split(':')[0] diff --git a/web/__tests__/real-browser-flicker.test.tsx b/web/__tests__/real-browser-flicker.test.tsx index cf3abd5f80..52bdf4777f 100644 --- a/web/__tests__/real-browser-flicker.test.tsx +++ b/web/__tests__/real-browser-flicker.test.tsx @@ -252,7 +252,7 @@ describe('Real Browser Environment Dark Mode Flicker Test', () => { if (hasStyleChange) console.log('⚠️ Style changes detected - this causes visible flicker') - else + else console.log('✅ No style changes detected') expect(timingData.length).toBeGreaterThan(1) diff --git a/web/__tests__/workflow-parallel-limit.test.tsx b/web/__tests__/workflow-parallel-limit.test.tsx index 0843122ab4..64e9d328f0 100644 --- a/web/__tests__/workflow-parallel-limit.test.tsx +++ b/web/__tests__/workflow-parallel-limit.test.tsx @@ -15,7 +15,7 @@ const originalEnv = process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT function setupEnvironment(value?: string) { if (value) process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = value - else + else delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT // Clear module cache to force re-evaluation @@ -25,7 +25,7 @@ function setupEnvironment(value?: string) { function restoreEnvironment() { if (originalEnv) process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT = originalEnv - else + else delete process.env.NEXT_PUBLIC_MAX_PARALLEL_LIMIT jest.resetModules() diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx index a3281be8eb..b1e915b2bf 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx @@ -47,7 +47,7 @@ describe('SVG Attribute Error Reproduction', () => { console.log(` ${index + 1}. ${error.substring(0, 100)}...`) }) } - else { + else { console.log('No inkscape errors found in this render') } @@ -150,7 +150,7 @@ describe('SVG Attribute Error Reproduction', () => { if (problematicKeys.length > 0) console.log(`🚨 PROBLEM: Still found problematic attributes: ${problematicKeys.join(', ')}`) - else + else console.log('✅ No problematic attributes found after normalization') }) }) diff --git a/web/app/account/(commonLayout)/account-page/AvatarWithEdit.tsx b/web/app/account/(commonLayout)/account-page/AvatarWithEdit.tsx index 0408d2ee34..5890c2ea92 100644 --- a/web/app/account/(commonLayout)/account-page/AvatarWithEdit.tsx +++ b/web/app/account/(commonLayout)/account-page/AvatarWithEdit.tsx @@ -106,7 +106,7 @@ const AvatarWithEdit = ({ onSave, ...props }: AvatarWithEditProps) => { onClick={() => { if (hoverArea === 'right' && !onAvatarError) setIsShowDeleteConfirm(true) - else + else setIsShowAvatarPicker(true) }} onMouseMove={(e) => { diff --git a/web/app/components/app-sidebar/basic.tsx b/web/app/components/app-sidebar/basic.tsx index 00357d6c27..77a965c03e 100644 --- a/web/app/components/app-sidebar/basic.tsx +++ b/web/app/components/app-sidebar/basic.tsx @@ -45,8 +45,8 @@ const ICON_MAP = { , dataset: , webapp:
    - -
    , + + , notion: , } diff --git a/web/app/components/app-sidebar/index.tsx b/web/app/components/app-sidebar/index.tsx index c3ff45d6a6..c60aa26f5d 100644 --- a/web/app/components/app-sidebar/index.tsx +++ b/web/app/components/app-sidebar/index.tsx @@ -62,12 +62,12 @@ const AppDetailNav = ({ title, desc, isExternal, icon, icon_background, navigati }, [appSidebarExpand, setAppSiderbarExpand]) if (inWorkflowCanvas && hideHeader) { - return ( + return (
    ) -} + } return (
    { })) }) - describe('Issue #1: Toggle Button Position Movement - FIXED', () => { + describe('Issue #1: Toggle Button Position Movement - FIXED', () => { it('should verify consistent padding prevents button position shift', () => { let expanded = false const handleToggle = () => { diff --git a/web/app/components/app/annotation/index.tsx b/web/app/components/app/annotation/index.tsx index bb2a95b0b5..afa8732701 100644 --- a/web/app/components/app/annotation/index.tsx +++ b/web/app/components/app/annotation/index.tsx @@ -84,7 +84,7 @@ const Annotation: FC = (props) => { setList(data as AnnotationItem[]) setTotal(total) } - finally { + finally { setIsLoading(false) } } diff --git a/web/app/components/app/configuration/config-var/config-modal/type-select.tsx b/web/app/components/app/configuration/config-var/config-modal/type-select.tsx index 3f6a01ed7c..beb7b03e37 100644 --- a/web/app/components/app/configuration/config-var/config-modal/type-select.tsx +++ b/web/app/components/app/configuration/config-var/config-modal/type-select.tsx @@ -52,13 +52,13 @@ const TypeSelector: FC = ({ >
    - - {selectedItem?.name} - + > + {selectedItem?.name} +
    {inputVarTypeToVarType(selectedItem?.value as InputVarType)} diff --git a/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx b/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx index 86025f68fa..cb61b927bc 100644 --- a/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx +++ b/web/app/components/app/configuration/dataset-config/params-config/config-content.tsx @@ -175,7 +175,6 @@ const ConfigContent: FC = ({ ...datasetConfigs, reranking_enable: enable, }) - // eslint-disable-next-line react-hooks/exhaustive-deps }, [currentRerankModel, datasetConfigs, onChange]) return ( diff --git a/web/app/components/app/configuration/debug/chat-user-input.tsx b/web/app/components/app/configuration/debug/chat-user-input.tsx index ac07691ce4..b1161de075 100644 --- a/web/app/components/app/configuration/debug/chat-user-input.tsx +++ b/web/app/components/app/configuration/debug/chat-user-input.tsx @@ -57,10 +57,10 @@ const ChatUserInput = ({ >
    {type !== 'checkbox' && ( -
    -
    {name || key}
    - {!required && {t('workflow.panel.optional')}} -
    +
    +
    {name || key}
    + {!required && {t('workflow.panel.optional')}} +
    )}
    {type === 'string' && ( diff --git a/web/app/components/app/log/list.tsx b/web/app/components/app/log/list.tsx index 67b8065745..b73d1f19de 100644 --- a/web/app/components/app/log/list.tsx +++ b/web/app/components/app/log/list.tsx @@ -112,72 +112,72 @@ const getFormattedChatList = (messages: ChatMessage[], conversationId: string, t const newChatList: IChatItem[] = [] try { messages.forEach((item: ChatMessage) => { - const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || [] - newChatList.push({ - id: `question-${item.id}`, - content: item.inputs.query || item.inputs.default_input || item.query, // text generation: item.inputs.query; chat: item.query - isAnswer: false, - message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))), - parentMessageId: item.parent_message_id || undefined, - }) + const questionFiles = item.message_files?.filter((file: any) => file.belongs_to === 'user') || [] + newChatList.push({ + id: `question-${item.id}`, + content: item.inputs.query || item.inputs.default_input || item.query, // text generation: item.inputs.query; chat: item.query + isAnswer: false, + message_files: getProcessedFilesFromResponse(questionFiles.map((item: any) => ({ ...item, related_id: item.id }))), + parentMessageId: item.parent_message_id || undefined, + }) - const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [] - newChatList.push({ - id: item.id, - content: item.answer, - agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), - feedback: item.feedbacks.find(item => item.from_source === 'user'), // user feedback - adminFeedback: item.feedbacks.find(item => item.from_source === 'admin'), // admin feedback - feedbackDisabled: false, - isAnswer: true, - message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))), - log: [ - ...item.message, - ...(item.message[item.message.length - 1]?.role !== 'assistant' - ? [ - { - role: 'assistant', - text: item.answer, - files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], - }, - ] - : []), - ] as IChatItem['log'], - workflow_run_id: item.workflow_run_id, - conversationId, - input: { - inputs: item.inputs, - query: item.query, - }, - more: { - time: dayjs.unix(item.created_at).tz(timezone).format(format), - tokens: item.answer_tokens + item.message_tokens, - latency: item.provider_response_latency.toFixed(2), - }, - citation: item.metadata?.retriever_resources, - annotation: (() => { - if (item.annotation_hit_history) { - return { - id: item.annotation_hit_history.annotation_id, - authorName: item.annotation_hit_history.annotation_create_account?.name || 'N/A', - created_at: item.annotation_hit_history.created_at, + const answerFiles = item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [] + newChatList.push({ + id: item.id, + content: item.answer, + agent_thoughts: addFileInfos(item.agent_thoughts ? sortAgentSorts(item.agent_thoughts) : item.agent_thoughts, item.message_files), + feedback: item.feedbacks.find(item => item.from_source === 'user'), // user feedback + adminFeedback: item.feedbacks.find(item => item.from_source === 'admin'), // admin feedback + feedbackDisabled: false, + isAnswer: true, + message_files: getProcessedFilesFromResponse(answerFiles.map((item: any) => ({ ...item, related_id: item.id }))), + log: [ + ...item.message, + ...(item.message[item.message.length - 1]?.role !== 'assistant' + ? [ + { + role: 'assistant', + text: item.answer, + files: item.message_files?.filter((file: any) => file.belongs_to === 'assistant') || [], + }, + ] + : []), + ] as IChatItem['log'], + workflow_run_id: item.workflow_run_id, + conversationId, + input: { + inputs: item.inputs, + query: item.query, + }, + more: { + time: dayjs.unix(item.created_at).tz(timezone).format(format), + tokens: item.answer_tokens + item.message_tokens, + latency: item.provider_response_latency.toFixed(2), + }, + citation: item.metadata?.retriever_resources, + annotation: (() => { + if (item.annotation_hit_history) { + return { + id: item.annotation_hit_history.annotation_id, + authorName: item.annotation_hit_history.annotation_create_account?.name || 'N/A', + created_at: item.annotation_hit_history.created_at, + } } - } - if (item.annotation) { - return { - id: item.annotation.id, - authorName: item.annotation.account.name, - logAnnotation: item.annotation, - created_at: 0, + if (item.annotation) { + return { + id: item.annotation.id, + authorName: item.annotation.account.name, + logAnnotation: item.annotation, + created_at: 0, + } } - } - return undefined - })(), - parentMessageId: `question-${item.id}`, + return undefined + })(), + parentMessageId: `question-${item.id}`, + }) }) - }) return newChatList } @@ -503,7 +503,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) { setThreadChatItems(getThreadMessages(tree, newAllChatItems.at(-1)?.id)) } - catch (error) { + catch (error) { console.error(error) setHasMore(false) } @@ -522,7 +522,7 @@ function DetailPanel({ detail, onFeedback }: IDetailPanel) { if (outerDiv && outerDiv.scrollHeight > outerDiv.clientHeight) { scrollContainer = outerDiv } - else if (scrollableDiv && scrollableDiv.scrollHeight > scrollableDiv.clientHeight) { + else if (scrollableDiv && scrollableDiv.scrollHeight > scrollableDiv.clientHeight) { scrollContainer = scrollableDiv } else if (chatContainer && chatContainer.scrollHeight > chatContainer.clientHeight) { diff --git a/web/app/components/app/overview/app-card.tsx b/web/app/components/app/overview/app-card.tsx index 8713c8ef7b..c6df0ebfd9 100644 --- a/web/app/components/app/overview/app-card.tsx +++ b/web/app/components/app/overview/app-card.tsx @@ -167,7 +167,7 @@ function AppCard({ setAppDetail(res) setShowAccessControl(false) } - catch (error) { + catch (error) { console.error('Failed to fetch app detail:', error) } }, [appDetail, setAppDetail]) diff --git a/web/app/components/app/overview/embedded/index.tsx b/web/app/components/app/overview/embedded/index.tsx index cd25c4ca65..6eba993e1d 100644 --- a/web/app/components/app/overview/embedded/index.tsx +++ b/web/app/components/app/overview/embedded/index.tsx @@ -40,12 +40,12 @@ const OPTION_MAP = { `