dify/api/services/workflow/nested_node_graph_service.py
Yansong Zhang 44491e427c feat(api): enable all sandbox/skill controller routes and resolve dependencies (P0)
Resolve the full dependency chain to enable all previously disabled controllers:

Enabled routes:
- sandbox_files: sandbox file browser API
- sandbox_providers: sandbox provider management API
- app_asset: app asset management API
- skills: skill extraction API
- CLI API blueprint: DifyCli callback endpoints (/cli/api/*)

Dependencies extracted (64 files, ~8000 lines):
- models/sandbox.py, models/app_asset.py: DB models
- core/zip_sandbox/: zip-based sandbox execution
- core/session/: CLI API session management
- core/memory/: base memory + node token buffer
- core/helper/creators.py: helper utilities
- core/llm_generator/: context models, output models, utils
- core/workflow/nodes/command/: command node type
- core/workflow/nodes/file_upload/: file upload node type
- core/app/entities/: app_asset_entities, app_bundle_entities, llm_generation_entities
- services/: asset_content, skill, workflow_collaboration, workflow_comment
- controllers/console/app/error.py: AppAsset error classes
- core/tools/utils/system_encryption.py

Import fixes:
- dify_graph.enums -> graphon.enums in skill_service.py
- get_signed_file_url_for_plugin -> get_signed_file_url in cli_api.py

All 5 controllers verified: import OK, Flask starts successfully.
46 existing tests still pass.

Made-with: Cursor
2026-04-09 09:36:16 +08:00

158 lines
5.4 KiB
Python

"""
Service for generating Nested Node LLM graph structures.
This service creates graph structures containing LLM nodes configured for
extracting values from list[PromptMessage] variables.
"""
from typing import Any
from sqlalchemy.orm import Session
from graphon.enums import BuiltinNodeTypes
from graphon.model_runtime.entities import LLMMode
from services.model_provider_service import ModelProviderService
from services.workflow.entities import NestedNodeGraphRequest, NestedNodeGraphResponse, NestedNodeParameterSchema
class NestedNodeGraphService:
"""Service for generating Nested Node LLM graph structures."""
def __init__(self, session: Session):
self._session = session
def generate_nested_node_id(self, node_id: str, parameter_name: str) -> str:
"""Generate nested node ID following the naming convention.
Format: {node_id}_ext_{parameter_name}
"""
return f"{node_id}_ext_{parameter_name}"
def generate_nested_node_graph(self, tenant_id: str, request: NestedNodeGraphRequest) -> NestedNodeGraphResponse:
"""Generate a complete graph structure containing a Nested Node LLM node.
Args:
tenant_id: The tenant ID for fetching default model config
request: The nested node graph generation request
Returns:
Complete graph structure with nodes, edges, and viewport
"""
node_id = self.generate_nested_node_id(request.parent_node_id, request.parameter_key)
model_config = self._get_default_model_config(tenant_id)
node = self._build_nested_node_llm_node(
node_id=node_id,
parent_node_id=request.parent_node_id,
context_source=request.context_source,
parameter_schema=request.parameter_schema,
model_config=model_config,
)
graph = {
"nodes": [node],
"edges": [],
"viewport": {},
}
return NestedNodeGraphResponse(graph=graph)
def _get_default_model_config(self, tenant_id: str) -> dict[str, Any]:
"""Get the default LLM model configuration for the tenant."""
model_provider_service = ModelProviderService()
default_model = model_provider_service.get_default_model_of_model_type(
tenant_id=tenant_id,
model_type="llm",
)
if default_model:
return {
"provider": default_model.provider.provider,
"name": default_model.model,
"mode": LLMMode.CHAT.value,
"completion_params": {},
}
# Fallback to empty config if no default model is configured
return {
"provider": "",
"name": "",
"mode": LLMMode.CHAT.value,
"completion_params": {},
}
def _build_nested_node_llm_node(
self,
*,
node_id: str,
parent_node_id: str,
context_source: list[str],
parameter_schema: NestedNodeParameterSchema,
model_config: dict[str, Any],
) -> dict[str, Any]:
"""Build the Nested Node LLM node structure.
The node uses:
- $context in prompt_template to reference the PromptMessage list
- structured_output for extracting the specific parameter
- parent_node_id to associate with the parent node
"""
prompt_template = [
{
"role": "system",
"text": "Extract the required parameter value from the conversation context above.",
"skill": False,
},
{"$context": context_source},
{"role": "user", "text": "", "skill": False},
]
structured_output = {
"schema": {
"type": "object",
"properties": {
parameter_schema.name: {
"type": parameter_schema.type,
"description": parameter_schema.description,
}
},
"required": [parameter_schema.name],
"additionalProperties": False,
}
}
return {
"id": node_id,
"position": {"x": 0, "y": 0},
"data": {
"type": BuiltinNodeTypes.LLM,
# BaseNodeData fields
"title": f"NestedNode: {parameter_schema.name}",
"desc": f"Extract {parameter_schema.name} from conversation context",
"version": "1",
"error_strategy": None,
"default_value": None,
"retry_config": {"max_retries": 0},
"parent_node_id": parent_node_id,
# LLMNodeData fields
"model": model_config,
"prompt_template": prompt_template,
"prompt_config": {"jinja2_variables": []},
"memory": None,
"context": {
"enabled": False,
"variable_selector": None,
},
"vision": {
"enabled": False,
"configs": {
"variable_selector": ["sys", "files"],
"detail": "high",
},
},
"structured_output_enabled": True,
"structured_output": structured_output,
"computer_use": False,
"tool_settings": [],
},
}