mirror of https://github.com/langgenius/dify.git
Merge branch 'feat/rag-2' into feat/merge-migrations
# Conflicts: # api/services/workflow_service.py
This commit is contained in:
commit
055f7644fb
|
|
@ -43,6 +43,10 @@ jobs:
|
|||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: uv sync --project api --dev
|
||||
|
||||
- name: Run Import Linter
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: uv run --directory api --dev lint-imports
|
||||
|
||||
- name: Run Basedpyright Checks
|
||||
if: steps.changed-files.outputs.any_changed == 'true'
|
||||
run: dev/basedpyright-check
|
||||
|
|
|
|||
|
|
@ -22,14 +22,15 @@ containers =
|
|||
ignore_imports =
|
||||
core.workflow.nodes.base.node -> core.workflow.graph_events
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph_events
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_events
|
||||
|
||||
core.workflow.nodes.node_factory -> core.workflow.graph
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph_engine
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph
|
||||
core.workflow.nodes.iteration.iteration_node -> core.workflow.graph_engine.command_channels
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_events
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_engine
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph
|
||||
core.workflow.nodes.loop.loop_node -> core.workflow.graph_engine.command_channels
|
||||
core.workflow.nodes.node_factory -> core.workflow.graph
|
||||
|
||||
[importlinter:contract:rsc]
|
||||
name = RSC
|
||||
|
|
@ -57,9 +58,9 @@ layers =
|
|||
orchestration
|
||||
command_processing
|
||||
event_management
|
||||
error_handling
|
||||
error_handler
|
||||
graph_traversal
|
||||
state_management
|
||||
graph_state_manager
|
||||
worker_management
|
||||
domain
|
||||
containers =
|
||||
|
|
@ -86,14 +87,6 @@ forbidden_modules =
|
|||
core.workflow.graph_engine.command_processing
|
||||
core.workflow.graph_engine.event_management
|
||||
|
||||
[importlinter:contract:error-handling-strategies]
|
||||
name = Error Handling Strategies
|
||||
type = independence
|
||||
modules =
|
||||
core.workflow.graph_engine.error_handling.abort_strategy
|
||||
core.workflow.graph_engine.error_handling.retry_strategy
|
||||
core.workflow.graph_engine.error_handling.fail_branch_strategy
|
||||
core.workflow.graph_engine.error_handling.default_value_strategy
|
||||
|
||||
[importlinter:contract:graph-traversal-components]
|
||||
name = Graph Traversal Components
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ def no_key_cache_key(namespace: str, key: str) -> str:
|
|||
|
||||
|
||||
# Returns whether the obtained value is obtained, and None if it does not
|
||||
def get_value_from_dict(namespace_cache: dict[str, Any] | None, key: str) -> Any | None:
|
||||
def get_value_from_dict(namespace_cache: dict[str, Any] | None, key: str) -> Any:
|
||||
if namespace_cache:
|
||||
kv_data = namespace_cache.get(CONFIGURATIONS)
|
||||
if kv_data is None:
|
||||
|
|
|
|||
|
|
@ -420,7 +420,13 @@ class PluginUploadFileRequestApi(Resource):
|
|||
)
|
||||
def post(self, user_model: Account | EndUser, tenant_model: Tenant, payload: RequestRequestUploadFile):
|
||||
# generate signed url
|
||||
url = get_signed_file_url_for_plugin(payload.filename, payload.mimetype, tenant_model.id, user_model.id)
|
||||
url = get_signed_file_url_for_plugin(
|
||||
payload.filename,
|
||||
payload.mimetype,
|
||||
tenant_model.id,
|
||||
user_model.id,
|
||||
user_model.session_id if isinstance(user_model, EndUser) else None,
|
||||
)
|
||||
return BaseBackwardsInvocationResponse(data={"url": url}).model_dump()
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -355,7 +355,7 @@ class WorkflowResponseConverter:
|
|||
else WorkflowNodeExecutionStatus.FAILED,
|
||||
error=None,
|
||||
elapsed_time=(naive_utc_now() - event.start_at).total_seconds(),
|
||||
total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0,
|
||||
total_tokens=(lambda x: x if isinstance(x, int) else 0)(event.metadata.get("total_tokens", 0)),
|
||||
execution_metadata=event.metadata,
|
||||
finished_at=int(time.time()),
|
||||
steps=event.steps,
|
||||
|
|
@ -442,7 +442,7 @@ class WorkflowResponseConverter:
|
|||
else WorkflowNodeExecutionStatus.FAILED,
|
||||
error=None,
|
||||
elapsed_time=(naive_utc_now() - event.start_at).total_seconds(),
|
||||
total_tokens=event.metadata.get("total_tokens", 0) if event.metadata else 0,
|
||||
total_tokens=(lambda x: x if isinstance(x, int) else 0)(event.metadata.get("total_tokens", 0)),
|
||||
execution_metadata=event.metadata,
|
||||
finished_at=int(time.time()),
|
||||
steps=event.steps,
|
||||
|
|
|
|||
|
|
@ -384,7 +384,6 @@ class WorkflowBasedAppRunner:
|
|||
predecessor_node_id=event.predecessor_node_id,
|
||||
in_iteration_id=event.in_iteration_id,
|
||||
in_loop_id=event.in_loop_id,
|
||||
parallel_mode_run_id=event.parallel_mode_run_id,
|
||||
inputs=inputs,
|
||||
process_data=process_data,
|
||||
outputs=outputs,
|
||||
|
|
@ -406,7 +405,6 @@ class WorkflowBasedAppRunner:
|
|||
predecessor_node_id=event.predecessor_node_id,
|
||||
in_iteration_id=event.in_iteration_id,
|
||||
in_loop_id=event.in_loop_id,
|
||||
parallel_mode_run_id=event.parallel_mode_run_id,
|
||||
agent_strategy=event.agent_strategy,
|
||||
provider_type=event.provider_type,
|
||||
provider_id=event.provider_id,
|
||||
|
|
|
|||
|
|
@ -1,9 +1,9 @@
|
|||
from collections.abc import Mapping, Sequence
|
||||
from datetime import datetime
|
||||
from enum import StrEnum, auto
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk
|
||||
from core.rag.entities.citation_metadata import RetrievalSourceMetadata
|
||||
|
|
@ -79,9 +79,9 @@ class QueueIterationStartEvent(AppQueueEvent):
|
|||
start_at: datetime
|
||||
|
||||
node_run_index: int
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
predecessor_node_id: str | None = None
|
||||
metadata: Mapping[str, Any] | None = None
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class QueueIterationNextEvent(AppQueueEvent):
|
||||
|
|
@ -97,7 +97,7 @@ class QueueIterationNextEvent(AppQueueEvent):
|
|||
node_type: NodeType
|
||||
node_title: str
|
||||
node_run_index: int
|
||||
output: Optional[Any] = None # output for the current iteration
|
||||
output: Any = None # output for the current iteration
|
||||
|
||||
|
||||
class QueueIterationCompletedEvent(AppQueueEvent):
|
||||
|
|
@ -114,9 +114,9 @@ class QueueIterationCompletedEvent(AppQueueEvent):
|
|||
start_at: datetime
|
||||
|
||||
node_run_index: int
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
outputs: Mapping[str, Any] | None = None
|
||||
metadata: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
|
||||
error: str | None = None
|
||||
|
|
@ -143,9 +143,9 @@ class QueueLoopStartEvent(AppQueueEvent):
|
|||
start_at: datetime
|
||||
|
||||
node_run_index: int
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
predecessor_node_id: str | None = None
|
||||
metadata: Mapping[str, Any] | None = None
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class QueueLoopNextEvent(AppQueueEvent):
|
||||
|
|
@ -171,7 +171,7 @@ class QueueLoopNextEvent(AppQueueEvent):
|
|||
parallel_mode_run_id: str | None = None
|
||||
"""iteration run in parallel mode run id"""
|
||||
node_run_index: int
|
||||
output: Optional[Any] = None # output for the current loop
|
||||
output: Any = None # output for the current loop
|
||||
|
||||
|
||||
class QueueLoopCompletedEvent(AppQueueEvent):
|
||||
|
|
@ -185,7 +185,7 @@ class QueueLoopCompletedEvent(AppQueueEvent):
|
|||
node_id: str
|
||||
node_type: NodeType
|
||||
node_title: str
|
||||
parallel_id: Optional[str] = None
|
||||
parallel_id: str | None = None
|
||||
"""parallel id if node is in parallel"""
|
||||
parallel_start_node_id: str | None = None
|
||||
"""parallel start node id if node is in parallel"""
|
||||
|
|
@ -196,9 +196,9 @@ class QueueLoopCompletedEvent(AppQueueEvent):
|
|||
start_at: datetime
|
||||
|
||||
node_run_index: int
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
outputs: Mapping[str, Any] | None = None
|
||||
metadata: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
|
||||
error: str | None = None
|
||||
|
|
@ -299,7 +299,7 @@ class QueueWorkflowSucceededEvent(AppQueueEvent):
|
|||
"""
|
||||
|
||||
event: QueueEvent = QueueEvent.WORKFLOW_SUCCEEDED
|
||||
outputs: dict[str, Any] | None = None
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class QueueWorkflowFailedEvent(AppQueueEvent):
|
||||
|
|
@ -319,7 +319,7 @@ class QueueWorkflowPartialSuccessEvent(AppQueueEvent):
|
|||
|
||||
event: QueueEvent = QueueEvent.WORKFLOW_PARTIAL_SUCCEEDED
|
||||
exceptions_count: int
|
||||
outputs: dict[str, Any] | None = None
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class QueueNodeStartedEvent(AppQueueEvent):
|
||||
|
|
@ -334,16 +334,16 @@ class QueueNodeStartedEvent(AppQueueEvent):
|
|||
node_title: str
|
||||
node_type: NodeType
|
||||
node_run_index: int = 1 # FIXME(-LAN-): may not used
|
||||
predecessor_node_id: Optional[str] = None
|
||||
parallel_id: Optional[str] = None
|
||||
parallel_start_node_id: Optional[str] = None
|
||||
parent_parallel_id: Optional[str] = None
|
||||
parent_parallel_start_node_id: Optional[str] = None
|
||||
in_iteration_id: Optional[str] = None
|
||||
in_loop_id: Optional[str] = None
|
||||
predecessor_node_id: str | None = None
|
||||
parallel_id: str | None = None
|
||||
parallel_start_node_id: str | None = None
|
||||
parent_parallel_id: str | None = None
|
||||
parent_parallel_start_node_id: str | None = None
|
||||
in_iteration_id: str | None = None
|
||||
in_loop_id: str | None = None
|
||||
start_at: datetime
|
||||
parallel_mode_run_id: Optional[str] = None
|
||||
agent_strategy: Optional[AgentNodeStrategyInit] = None
|
||||
parallel_mode_run_id: str | None = None
|
||||
agent_strategy: AgentNodeStrategyInit | None = None
|
||||
|
||||
# FIXME(-LAN-): only for ToolNode, need to refactor
|
||||
provider_type: str # should be a core.tools.entities.tool_entities.ToolProviderType
|
||||
|
|
@ -360,7 +360,7 @@ class QueueNodeSucceededEvent(AppQueueEvent):
|
|||
node_execution_id: str
|
||||
node_id: str
|
||||
node_type: NodeType
|
||||
parallel_id: Optional[str] = None
|
||||
parallel_id: str | None = None
|
||||
"""parallel id if node is in parallel"""
|
||||
parallel_start_node_id: str | None = None
|
||||
"""parallel start node id if node is in parallel"""
|
||||
|
|
@ -374,12 +374,12 @@ class QueueNodeSucceededEvent(AppQueueEvent):
|
|||
"""loop id if node is in loop"""
|
||||
start_at: datetime
|
||||
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
process_data: Mapping[str, Any] | None = None
|
||||
outputs: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
process_data: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
execution_metadata: Mapping[WorkflowNodeExecutionMetadataKey, Any] | None = None
|
||||
|
||||
error: Optional[str] = None
|
||||
error: str | None = None
|
||||
|
||||
|
||||
class QueueAgentLogEvent(AppQueueEvent):
|
||||
|
|
@ -395,7 +395,7 @@ class QueueAgentLogEvent(AppQueueEvent):
|
|||
error: str | None = None
|
||||
status: str
|
||||
data: Mapping[str, Any]
|
||||
metadata: Mapping[str, Any] | None = None
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
node_id: str
|
||||
|
||||
|
||||
|
|
@ -404,9 +404,9 @@ class QueueNodeRetryEvent(QueueNodeStartedEvent):
|
|||
|
||||
event: QueueEvent = QueueEvent.RETRY
|
||||
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
process_data: Mapping[str, Any] | None = None
|
||||
outputs: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
process_data: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
execution_metadata: Mapping[WorkflowNodeExecutionMetadataKey, Any] | None = None
|
||||
|
||||
error: str
|
||||
|
|
@ -423,7 +423,7 @@ class QueueNodeExceptionEvent(AppQueueEvent):
|
|||
node_execution_id: str
|
||||
node_id: str
|
||||
node_type: NodeType
|
||||
parallel_id: Optional[str] = None
|
||||
parallel_id: str | None = None
|
||||
"""parallel id if node is in parallel"""
|
||||
parallel_start_node_id: str | None = None
|
||||
"""parallel start node id if node is in parallel"""
|
||||
|
|
@ -437,9 +437,9 @@ class QueueNodeExceptionEvent(AppQueueEvent):
|
|||
"""loop id if node is in loop"""
|
||||
start_at: datetime
|
||||
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
process_data: Mapping[str, Any] | None = None
|
||||
outputs: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
process_data: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
execution_metadata: Mapping[WorkflowNodeExecutionMetadataKey, Any] | None = None
|
||||
|
||||
error: str
|
||||
|
|
@ -455,16 +455,16 @@ class QueueNodeFailedEvent(AppQueueEvent):
|
|||
node_execution_id: str
|
||||
node_id: str
|
||||
node_type: NodeType
|
||||
parallel_id: Optional[str] = None
|
||||
in_iteration_id: Optional[str] = None
|
||||
parallel_id: str | None = None
|
||||
in_iteration_id: str | None = None
|
||||
"""iteration id if node is in iteration"""
|
||||
in_loop_id: str | None = None
|
||||
"""loop id if node is in loop"""
|
||||
start_at: datetime
|
||||
|
||||
inputs: Mapping[str, Any] | None = None
|
||||
process_data: Mapping[str, Any] | None = None
|
||||
outputs: Mapping[str, Any] | None = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
process_data: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
execution_metadata: Mapping[WorkflowNodeExecutionMetadataKey, Any] | None = None
|
||||
|
||||
error: str
|
||||
|
|
@ -494,7 +494,7 @@ class QueueErrorEvent(AppQueueEvent):
|
|||
"""
|
||||
|
||||
event: QueueEvent = QueueEvent.ERROR
|
||||
error: Any | None = None
|
||||
error: Any = None
|
||||
|
||||
|
||||
class QueuePingEvent(AppQueueEvent):
|
||||
|
|
|
|||
|
|
@ -136,7 +136,7 @@ class MessageEndStreamResponse(StreamResponse):
|
|||
|
||||
event: StreamEvent = StreamEvent.MESSAGE_END
|
||||
id: str
|
||||
metadata: dict = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
files: Sequence[Mapping[str, Any]] | None = None
|
||||
|
||||
|
||||
|
|
@ -173,7 +173,7 @@ class AgentThoughtStreamResponse(StreamResponse):
|
|||
thought: str | None = None
|
||||
observation: str | None = None
|
||||
tool: str | None = None
|
||||
tool_labels: dict | None = None
|
||||
tool_labels: Mapping[str, object] = Field(default_factory=dict)
|
||||
tool_input: str | None = None
|
||||
message_files: list[str] | None = None
|
||||
|
||||
|
|
@ -226,7 +226,7 @@ class WorkflowFinishStreamResponse(StreamResponse):
|
|||
elapsed_time: float
|
||||
total_tokens: int
|
||||
total_steps: int
|
||||
created_by: dict | None = None
|
||||
created_by: Mapping[str, object] = Field(default_factory=dict)
|
||||
created_at: int
|
||||
finished_at: int
|
||||
exceptions_count: int | None = 0
|
||||
|
|
@ -256,7 +256,7 @@ class NodeStartStreamResponse(StreamResponse):
|
|||
inputs: Optional[Mapping[str, Any]] = None
|
||||
inputs_truncated: bool = False
|
||||
created_at: int
|
||||
extras: dict = Field(default_factory=dict)
|
||||
extras: dict[str, object] = Field(default_factory=dict)
|
||||
parallel_id: str | None = None
|
||||
parallel_start_node_id: str | None = None
|
||||
parent_parallel_id: str | None = None
|
||||
|
|
@ -513,7 +513,7 @@ class IterationNodeCompletedStreamResponse(StreamResponse):
|
|||
error: str | None = None
|
||||
elapsed_time: float
|
||||
total_tokens: int
|
||||
execution_metadata: Mapping | None = None
|
||||
execution_metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
finished_at: int
|
||||
steps: int
|
||||
|
||||
|
|
@ -565,11 +565,11 @@ class LoopNodeNextStreamResponse(StreamResponse):
|
|||
title: str
|
||||
index: int
|
||||
created_at: int
|
||||
pre_loop_output: Any | None = None
|
||||
extras: dict = Field(default_factory=dict)
|
||||
parallel_id: Optional[str] = None
|
||||
parallel_start_node_id: Optional[str] = None
|
||||
parallel_mode_run_id: Optional[str] = None
|
||||
pre_loop_output: Any = None
|
||||
extras: Mapping[str, object] = Field(default_factory=dict)
|
||||
parallel_id: str | None = None
|
||||
parallel_start_node_id: str | None = None
|
||||
parallel_mode_run_id: str | None = None
|
||||
|
||||
event: StreamEvent = StreamEvent.LOOP_NEXT
|
||||
workflow_run_id: str
|
||||
|
|
@ -600,7 +600,7 @@ class LoopNodeCompletedStreamResponse(StreamResponse):
|
|||
error: str | None = None
|
||||
elapsed_time: float
|
||||
total_tokens: int
|
||||
execution_metadata: Mapping | None = None
|
||||
execution_metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
finished_at: int
|
||||
steps: int
|
||||
parallel_id: str | None = None
|
||||
|
|
@ -710,7 +710,7 @@ class ChatbotAppBlockingResponse(AppBlockingResponse):
|
|||
conversation_id: str
|
||||
message_id: str
|
||||
answer: str
|
||||
metadata: dict = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
created_at: int
|
||||
|
||||
data: Data
|
||||
|
|
@ -730,7 +730,7 @@ class CompletionAppBlockingResponse(AppBlockingResponse):
|
|||
mode: str
|
||||
message_id: str
|
||||
answer: str
|
||||
metadata: dict = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
created_at: int
|
||||
|
||||
data: Data
|
||||
|
|
@ -778,7 +778,7 @@ class AgentLogStreamResponse(StreamResponse):
|
|||
error: str | None = None
|
||||
status: str
|
||||
data: Mapping[str, Any]
|
||||
metadata: Mapping[str, Any] | None = None
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
node_id: str
|
||||
|
||||
event: StreamEvent = StreamEvent.AGENT_LOG
|
||||
|
|
|
|||
|
|
@ -109,7 +109,9 @@ class AppGeneratorTTSPublisher:
|
|||
elif isinstance(message.event, QueueNodeSucceededEvent):
|
||||
if message.event.outputs is None:
|
||||
continue
|
||||
self.msg_text += message.event.outputs.get("output", "")
|
||||
output = message.event.outputs.get("output", "")
|
||||
if isinstance(output, str):
|
||||
self.msg_text += output
|
||||
self.last_message = message
|
||||
sentence_arr, text_tmp = self._extract_sentence(self.msg_text)
|
||||
if len(sentence_arr) >= min(self.max_sentence, 7):
|
||||
|
|
@ -119,7 +121,7 @@ class AppGeneratorTTSPublisher:
|
|||
_invoice_tts, text_content, self.model_instance, self.tenant_id, self.voice
|
||||
)
|
||||
future_queue.put(futures_result)
|
||||
if text_tmp:
|
||||
if isinstance(text_tmp, str):
|
||||
self.msg_text = text_tmp
|
||||
else:
|
||||
self.msg_text = ""
|
||||
|
|
|
|||
|
|
@ -25,7 +25,9 @@ def get_signed_file_url(upload_file_id: str, as_attachment=False) -> str:
|
|||
return f"{url}?{query_string}"
|
||||
|
||||
|
||||
def get_signed_file_url_for_plugin(filename: str, mimetype: str, tenant_id: str, user_id: str) -> str:
|
||||
def get_signed_file_url_for_plugin(
|
||||
filename: str, mimetype: str, tenant_id: str, user_id: str, session_id: str | None
|
||||
) -> str:
|
||||
# Plugin access should use internal URL for Docker network communication
|
||||
base_url = dify_config.INTERNAL_FILES_URL or dify_config.FILES_URL
|
||||
url = f"{base_url}/files/upload/for-plugin"
|
||||
|
|
@ -36,7 +38,8 @@ def get_signed_file_url_for_plugin(filename: str, mimetype: str, tenant_id: str,
|
|||
sign = hmac.new(key, msg.encode(), hashlib.sha256).digest()
|
||||
encoded_sign = base64.urlsafe_b64encode(sign).decode()
|
||||
|
||||
return f"{url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}&user_id={user_id}&tenant_id={tenant_id}"
|
||||
url_user_id = session_id or user_id
|
||||
return f"{url}?timestamp={timestamp}&nonce={nonce}&sign={encoded_sign}&user_id={url_user_id}&tenant_id={tenant_id}"
|
||||
|
||||
|
||||
def verify_plugin_file_signature(
|
||||
|
|
|
|||
|
|
@ -1,9 +1,33 @@
|
|||
from abc import abstractmethod
|
||||
from abc import ABC, abstractmethod
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import TypedDict
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class CodeNodeProvider(BaseModel):
|
||||
class VariableConfig(TypedDict):
|
||||
variable: str
|
||||
value_selector: Sequence[str | int]
|
||||
|
||||
|
||||
class OutputConfig(TypedDict):
|
||||
type: str
|
||||
children: None
|
||||
|
||||
|
||||
class CodeConfig(TypedDict):
|
||||
variables: Sequence[VariableConfig]
|
||||
code_language: str
|
||||
code: str
|
||||
outputs: Mapping[str, OutputConfig]
|
||||
|
||||
|
||||
class DefaultConfig(TypedDict):
|
||||
type: str
|
||||
config: CodeConfig
|
||||
|
||||
|
||||
class CodeNodeProvider(BaseModel, ABC):
|
||||
@staticmethod
|
||||
@abstractmethod
|
||||
def get_language() -> str:
|
||||
|
|
@ -22,11 +46,14 @@ class CodeNodeProvider(BaseModel):
|
|||
pass
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls):
|
||||
def get_default_config(cls) -> DefaultConfig:
|
||||
return {
|
||||
"type": "code",
|
||||
"config": {
|
||||
"variables": [{"variable": "arg1", "value_selector": []}, {"variable": "arg2", "value_selector": []}],
|
||||
"variables": [
|
||||
{"variable": "arg1", "value_selector": []},
|
||||
{"variable": "arg2", "value_selector": []},
|
||||
],
|
||||
"code_language": cls.get_language(),
|
||||
"code": cls.get_default_code(),
|
||||
"outputs": {"result": {"type": "string", "children": None}},
|
||||
|
|
|
|||
|
|
@ -160,7 +160,7 @@ class ErrorData(BaseModel):
|
|||
sentence.
|
||||
"""
|
||||
|
||||
data: Any | None = None
|
||||
data: Any = None
|
||||
"""
|
||||
Additional information about the error. The value of this member is defined by the
|
||||
sender (e.g. detailed error information, nested errors etc.).
|
||||
|
|
|
|||
|
|
@ -22,13 +22,9 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
|
||||
class ModelProviderFactory:
|
||||
provider_position_map: dict[str, int]
|
||||
|
||||
def __init__(self, tenant_id: str):
|
||||
from core.plugin.impl.model import PluginModelClient
|
||||
|
||||
self.provider_position_map = {}
|
||||
|
||||
self.tenant_id = tenant_id
|
||||
self.plugin_model_manager = PluginModelClient()
|
||||
|
||||
|
|
|
|||
|
|
@ -408,11 +408,11 @@ class TraceTask:
|
|||
def __init__(
|
||||
self,
|
||||
trace_type: Any,
|
||||
message_id: Optional[str] = None,
|
||||
message_id: str | None = None,
|
||||
workflow_execution: Optional["WorkflowExecution"] = None,
|
||||
conversation_id: Optional[str] = None,
|
||||
user_id: Optional[str] = None,
|
||||
timer: Optional[Any] = None,
|
||||
conversation_id: str | None = None,
|
||||
user_id: str | None = None,
|
||||
timer: Any | None = None,
|
||||
**kwargs,
|
||||
):
|
||||
self.trace_type = trace_type
|
||||
|
|
|
|||
|
|
@ -1,5 +1,6 @@
|
|||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import Any, Literal, Optional
|
||||
from typing import Any, Literal
|
||||
|
||||
from pydantic import BaseModel, Field, field_validator
|
||||
|
||||
|
|
@ -16,10 +17,10 @@ class ToolApiEntity(BaseModel):
|
|||
description: I18nObject
|
||||
parameters: list[ToolParameter] | None = None
|
||||
labels: list[str] = Field(default_factory=list)
|
||||
output_schema: dict | None = None
|
||||
output_schema: Mapping[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
ToolProviderTypeApiLiteral = Optional[Literal["builtin", "api", "workflow", "mcp"]]
|
||||
ToolProviderTypeApiLiteral = Literal["builtin", "api", "workflow", "mcp"] | None
|
||||
|
||||
|
||||
class ToolProviderApiEntity(BaseModel):
|
||||
|
|
@ -27,17 +28,17 @@ class ToolProviderApiEntity(BaseModel):
|
|||
author: str
|
||||
name: str # identifier
|
||||
description: I18nObject
|
||||
icon: str | dict
|
||||
icon_dark: str | dict | None = Field(default=None, description="The dark icon of the tool")
|
||||
icon: str | Mapping[str, str]
|
||||
icon_dark: str | Mapping[str, str] = ""
|
||||
label: I18nObject # label
|
||||
type: ToolProviderType
|
||||
masked_credentials: dict | None = None
|
||||
original_credentials: dict | None = None
|
||||
masked_credentials: Mapping[str, object] = Field(default_factory=dict)
|
||||
original_credentials: Mapping[str, object] = Field(default_factory=dict)
|
||||
is_team_authorization: bool = False
|
||||
allow_delete: bool = True
|
||||
plugin_id: str | None = Field(default="", description="The plugin id of the tool")
|
||||
plugin_unique_identifier: str | None = Field(default="", description="The unique identifier of the tool")
|
||||
tools: list[ToolApiEntity] = Field(default_factory=list)
|
||||
tools: list[ToolApiEntity] = Field(default_factory=list[ToolApiEntity])
|
||||
labels: list[str] = Field(default_factory=list)
|
||||
# MCP
|
||||
server_url: str | None = Field(default="", description="The server url of the tool")
|
||||
|
|
@ -105,7 +106,7 @@ class ToolProviderCredentialApiEntity(BaseModel):
|
|||
is_default: bool = Field(
|
||||
default=False, description="Whether the credential is the default credential for the provider in the workspace"
|
||||
)
|
||||
credentials: dict = Field(description="The credentials of the provider")
|
||||
credentials: Mapping[str, object] = Field(description="The credentials of the provider", default_factory=dict)
|
||||
|
||||
|
||||
class ToolProviderCredentialInfoApiEntity(BaseModel):
|
||||
|
|
|
|||
|
|
@ -187,7 +187,7 @@ class ToolInvokeMessage(BaseModel):
|
|||
error: str | None = Field(default=None, description="The error message")
|
||||
status: LogStatus = Field(..., description="The status of the log")
|
||||
data: Mapping[str, Any] = Field(..., description="Detailed log data")
|
||||
metadata: Mapping[str, Any] | None = Field(default=None, description="The metadata of the log")
|
||||
metadata: Mapping[str, Any] = Field(default_factory=dict, description="The metadata of the log")
|
||||
|
||||
class RetrieverResourceMessage(BaseModel):
|
||||
retriever_resources: list[RetrievalSourceMetadata] = Field(..., description="retriever resources")
|
||||
|
|
@ -363,9 +363,9 @@ class ToolDescription(BaseModel):
|
|||
|
||||
class ToolEntity(BaseModel):
|
||||
identity: ToolIdentity
|
||||
parameters: list[ToolParameter] = Field(default_factory=list)
|
||||
parameters: list[ToolParameter] = Field(default_factory=list[ToolParameter])
|
||||
description: ToolDescription | None = None
|
||||
output_schema: dict | None = None
|
||||
output_schema: Mapping[str, object] = Field(default_factory=dict)
|
||||
has_runtime_parameters: bool = Field(default=False, description="Whether the tool has runtime parameters")
|
||||
|
||||
# pydantic configs
|
||||
|
|
@ -378,21 +378,23 @@ class ToolEntity(BaseModel):
|
|||
|
||||
|
||||
class OAuthSchema(BaseModel):
|
||||
client_schema: list[ProviderConfig] = Field(default_factory=list, description="The schema of the OAuth client")
|
||||
client_schema: list[ProviderConfig] = Field(
|
||||
default_factory=list[ProviderConfig], description="The schema of the OAuth client"
|
||||
)
|
||||
credentials_schema: list[ProviderConfig] = Field(
|
||||
default_factory=list, description="The schema of the OAuth credentials"
|
||||
default_factory=list[ProviderConfig], description="The schema of the OAuth credentials"
|
||||
)
|
||||
|
||||
|
||||
class ToolProviderEntity(BaseModel):
|
||||
identity: ToolProviderIdentity
|
||||
plugin_id: str | None = None
|
||||
credentials_schema: list[ProviderConfig] = Field(default_factory=list)
|
||||
credentials_schema: list[ProviderConfig] = Field(default_factory=list[ProviderConfig])
|
||||
oauth_schema: OAuthSchema | None = None
|
||||
|
||||
|
||||
class ToolProviderEntityWithPlugin(ToolProviderEntity):
|
||||
tools: list[ToolEntity] = Field(default_factory=list)
|
||||
tools: list[ToolEntity] = Field(default_factory=list[ToolEntity])
|
||||
|
||||
|
||||
class WorkflowToolParameterConfiguration(BaseModel):
|
||||
|
|
|
|||
|
|
@ -72,7 +72,6 @@ class MCPToolProviderController(ToolProviderController):
|
|||
),
|
||||
llm=remote_mcp_tool.description or "",
|
||||
),
|
||||
output_schema=None,
|
||||
has_runtime_parameters=len(remote_mcp_tool.inputSchema) > 0,
|
||||
)
|
||||
for remote_mcp_tool in remote_mcp_tools
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ from collections.abc import Generator, Iterable
|
|||
from copy import deepcopy
|
||||
from datetime import UTC, datetime
|
||||
from mimetypes import guess_type
|
||||
from typing import Any, Optional, Union, cast
|
||||
from typing import Any, Union, cast
|
||||
|
||||
from yarl import URL
|
||||
|
||||
|
|
@ -152,9 +152,9 @@ class ToolEngine:
|
|||
user_id: str,
|
||||
workflow_tool_callback: DifyWorkflowCallbackHandler,
|
||||
workflow_call_depth: int,
|
||||
conversation_id: Optional[str] = None,
|
||||
app_id: Optional[str] = None,
|
||||
message_id: Optional[str] = None,
|
||||
conversation_id: str | None = None,
|
||||
app_id: str | None = None,
|
||||
message_id: str | None = None,
|
||||
) -> Generator[ToolInvokeMessage, None, None]:
|
||||
"""
|
||||
Workflow invokes the tool with the given arguments.
|
||||
|
|
|
|||
|
|
@ -14,31 +14,17 @@ from sqlalchemy.orm import Session
|
|||
from yarl import URL
|
||||
|
||||
import contexts
|
||||
from core.helper.provider_cache import ToolProviderCredentialsCache
|
||||
from core.plugin.impl.tool import PluginToolManager
|
||||
from core.tools.__base.tool_provider import ToolProviderController
|
||||
from core.tools.__base.tool_runtime import ToolRuntime
|
||||
from core.tools.mcp_tool.provider import MCPToolProviderController
|
||||
from core.tools.mcp_tool.tool import MCPTool
|
||||
from core.tools.plugin_tool.provider import PluginToolProviderController
|
||||
from core.tools.plugin_tool.tool import PluginTool
|
||||
from core.tools.utils.uuid_utils import is_valid_uuid
|
||||
from core.tools.workflow_as_tool.provider import WorkflowToolProviderController
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from models.provider_ids import ToolProviderID
|
||||
from services.enterprise.plugin_manager_service import PluginCredentialType
|
||||
from services.tools.mcp_tools_manage_service import MCPToolManageService
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.workflow.nodes.tool.entities import ToolEntity
|
||||
|
||||
from configs import dify_config
|
||||
from core.agent.entities import AgentToolEntity
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.helper.module_import_helper import load_single_subclass_from_source
|
||||
from core.helper.position_helper import is_filtered
|
||||
from core.helper.provider_cache import ToolProviderCredentialsCache
|
||||
from core.model_runtime.utils.encoders import jsonable_encoder
|
||||
from core.plugin.impl.tool import PluginToolManager
|
||||
from core.tools.__base.tool import Tool
|
||||
from core.tools.__base.tool_provider import ToolProviderController
|
||||
from core.tools.__base.tool_runtime import ToolRuntime
|
||||
from core.tools.builtin_tool.provider import BuiltinToolProviderController
|
||||
from core.tools.builtin_tool.providers._positions import BuiltinToolProviderSort
|
||||
from core.tools.builtin_tool.tool import BuiltinTool
|
||||
|
|
@ -54,12 +40,21 @@ from core.tools.entities.tool_entities import (
|
|||
ToolProviderType,
|
||||
)
|
||||
from core.tools.errors import ToolProviderNotFoundError
|
||||
from core.tools.mcp_tool.provider import MCPToolProviderController
|
||||
from core.tools.mcp_tool.tool import MCPTool
|
||||
from core.tools.plugin_tool.provider import PluginToolProviderController
|
||||
from core.tools.plugin_tool.tool import PluginTool
|
||||
from core.tools.tool_label_manager import ToolLabelManager
|
||||
from core.tools.utils.configuration import ToolParameterConfigurationManager
|
||||
from core.tools.utils.encryption import create_provider_encrypter, create_tool_provider_encrypter
|
||||
from core.tools.utils.uuid_utils import is_valid_uuid
|
||||
from core.tools.workflow_as_tool.provider import WorkflowToolProviderController
|
||||
from core.tools.workflow_as_tool.tool import WorkflowTool
|
||||
from extensions.ext_database import db
|
||||
from models.provider_ids import ToolProviderID
|
||||
from models.tools import ApiToolProvider, BuiltinToolProvider, MCPToolProvider, WorkflowToolProvider
|
||||
from services.enterprise.plugin_manager_service import PluginCredentialType
|
||||
from services.tools.mcp_tools_manage_service import MCPToolManageService
|
||||
from services.tools.tools_transform_service import ToolTransformService
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
@ -890,7 +885,7 @@ class ToolManager:
|
|||
)
|
||||
|
||||
@classmethod
|
||||
def generate_workflow_tool_icon_url(cls, tenant_id: str, provider_id: str):
|
||||
def generate_workflow_tool_icon_url(cls, tenant_id: str, provider_id: str) -> Mapping[str, str]:
|
||||
try:
|
||||
workflow_provider: WorkflowToolProvider | None = (
|
||||
db.session.query(WorkflowToolProvider)
|
||||
|
|
@ -901,13 +896,13 @@ class ToolManager:
|
|||
if workflow_provider is None:
|
||||
raise ToolProviderNotFoundError(f"workflow provider {provider_id} not found")
|
||||
|
||||
icon: dict = json.loads(workflow_provider.icon)
|
||||
icon = json.loads(workflow_provider.icon)
|
||||
return icon
|
||||
except Exception:
|
||||
return {"background": "#252525", "content": "\ud83d\ude01"}
|
||||
|
||||
@classmethod
|
||||
def generate_api_tool_icon_url(cls, tenant_id: str, provider_id: str):
|
||||
def generate_api_tool_icon_url(cls, tenant_id: str, provider_id: str) -> Mapping[str, str]:
|
||||
try:
|
||||
api_provider: ApiToolProvider | None = (
|
||||
db.session.query(ApiToolProvider)
|
||||
|
|
@ -918,13 +913,13 @@ class ToolManager:
|
|||
if api_provider is None:
|
||||
raise ToolProviderNotFoundError(f"api provider {provider_id} not found")
|
||||
|
||||
icon: dict = json.loads(api_provider.icon)
|
||||
icon = json.loads(api_provider.icon)
|
||||
return icon
|
||||
except Exception:
|
||||
return {"background": "#252525", "content": "\ud83d\ude01"}
|
||||
|
||||
@classmethod
|
||||
def generate_mcp_tool_icon_url(cls, tenant_id: str, provider_id: str) -> dict[str, str] | str:
|
||||
def generate_mcp_tool_icon_url(cls, tenant_id: str, provider_id: str) -> Mapping[str, str] | str:
|
||||
try:
|
||||
mcp_provider: MCPToolProvider | None = (
|
||||
db.session.query(MCPToolProvider)
|
||||
|
|
@ -945,7 +940,7 @@ class ToolManager:
|
|||
tenant_id: str,
|
||||
provider_type: ToolProviderType,
|
||||
provider_id: str,
|
||||
) -> Union[str, dict[str, Any]]:
|
||||
) -> str | Mapping[str, str]:
|
||||
"""
|
||||
get the tool icon
|
||||
|
||||
|
|
@ -970,11 +965,10 @@ class ToolManager:
|
|||
return cls.generate_workflow_tool_icon_url(tenant_id, provider_id)
|
||||
elif provider_type == ToolProviderType.PLUGIN:
|
||||
provider = ToolManager.get_plugin_provider(provider_id, tenant_id)
|
||||
if isinstance(provider, PluginToolProviderController):
|
||||
try:
|
||||
return cls.generate_plugin_tool_icon_url(tenant_id, provider.entity.identity.icon)
|
||||
except Exception:
|
||||
return {"background": "#252525", "content": "\ud83d\ude01"}
|
||||
try:
|
||||
return cls.generate_plugin_tool_icon_url(tenant_id, provider.entity.identity.icon)
|
||||
except Exception:
|
||||
return {"background": "#252525", "content": "\ud83d\ude01"}
|
||||
raise ValueError(f"plugin provider {provider_id} not found")
|
||||
elif provider_type == ToolProviderType.MCP:
|
||||
return cls.generate_mcp_tool_icon_url(tenant_id, provider_id)
|
||||
|
|
|
|||
|
|
@ -0,0 +1,132 @@
|
|||
# Workflow
|
||||
|
||||
## Project Overview
|
||||
|
||||
This is the workflow graph engine module of Dify, implementing a queue-based distributed workflow execution system. The engine handles agentic AI workflows with support for parallel execution, node iteration, conditional logic, and external command control.
|
||||
|
||||
## Architecture
|
||||
|
||||
### Core Components
|
||||
|
||||
The graph engine follows a layered architecture with strict dependency rules:
|
||||
|
||||
1. **Graph Engine** (`graph_engine/`) - Orchestrates workflow execution
|
||||
|
||||
- **Manager** - External control interface for stop/pause/resume commands
|
||||
- **Worker** - Node execution runtime
|
||||
- **Command Processing** - Handles control commands (abort, pause, resume)
|
||||
- **Event Management** - Event propagation and layer notifications
|
||||
- **Graph Traversal** - Edge processing and skip propagation
|
||||
- **Response Coordinator** - Path tracking and session management
|
||||
- **Layers** - Pluggable middleware (debug logging, execution limits)
|
||||
- **Command Channels** - Communication channels (InMemory, Redis)
|
||||
|
||||
1. **Graph** (`graph/`) - Graph structure and runtime state
|
||||
|
||||
- **Graph Template** - Workflow definition
|
||||
- **Edge** - Node connections with conditions
|
||||
- **Runtime State Protocol** - State management interface
|
||||
|
||||
1. **Nodes** (`nodes/`) - Node implementations
|
||||
|
||||
- **Base** - Abstract node classes and variable parsing
|
||||
- **Specific Nodes** - LLM, Agent, Code, HTTP Request, Iteration, Loop, etc.
|
||||
|
||||
1. **Events** (`node_events/`) - Event system
|
||||
|
||||
- **Base** - Event protocols
|
||||
- **Node Events** - Node lifecycle events
|
||||
|
||||
1. **Entities** (`entities/`) - Domain models
|
||||
|
||||
- **Variable Pool** - Variable storage
|
||||
- **Graph Init Params** - Initialization configuration
|
||||
|
||||
## Key Design Patterns
|
||||
|
||||
### Command Channel Pattern
|
||||
|
||||
External workflow control via Redis or in-memory channels:
|
||||
|
||||
```python
|
||||
# Send stop command to running workflow
|
||||
channel = RedisChannel(redis_client, f"workflow:{task_id}:commands")
|
||||
channel.send_command(AbortCommand(reason="User requested"))
|
||||
```
|
||||
|
||||
### Layer System
|
||||
|
||||
Extensible middleware for cross-cutting concerns:
|
||||
|
||||
```python
|
||||
engine = GraphEngine(graph)
|
||||
engine.add_layer(DebugLoggingLayer(level="INFO"))
|
||||
engine.add_layer(ExecutionLimitsLayer(max_nodes=100))
|
||||
```
|
||||
|
||||
### Event-Driven Architecture
|
||||
|
||||
All node executions emit events for monitoring and integration:
|
||||
|
||||
- `NodeRunStartedEvent` - Node execution begins
|
||||
- `NodeRunSucceededEvent` - Node completes successfully
|
||||
- `NodeRunFailedEvent` - Node encounters error
|
||||
- `GraphRunStartedEvent/GraphRunCompletedEvent` - Workflow lifecycle
|
||||
|
||||
### Variable Pool
|
||||
|
||||
Centralized variable storage with namespace isolation:
|
||||
|
||||
```python
|
||||
# Variables scoped by node_id
|
||||
pool.add(["node1", "output"], value)
|
||||
result = pool.get(["node1", "output"])
|
||||
```
|
||||
|
||||
## Import Architecture Rules
|
||||
|
||||
The codebase enforces strict layering via import-linter:
|
||||
|
||||
1. **Workflow Layers** (top to bottom):
|
||||
|
||||
- graph_engine → graph_events → graph → nodes → node_events → entities
|
||||
|
||||
1. **Graph Engine Internal Layers**:
|
||||
|
||||
- orchestration → command_processing → event_management → graph_traversal → domain
|
||||
|
||||
1. **Domain Isolation**:
|
||||
|
||||
- Domain models cannot import from infrastructure layers
|
||||
|
||||
1. **Command Channel Independence**:
|
||||
|
||||
- InMemory and Redis channels must remain independent
|
||||
|
||||
## Common Tasks
|
||||
|
||||
### Adding a New Node Type
|
||||
|
||||
1. Create node class in `nodes/<node_type>/`
|
||||
1. Inherit from `BaseNode` or appropriate base class
|
||||
1. Implement `_run()` method
|
||||
1. Register in `nodes/node_mapping.py`
|
||||
1. Add tests in `tests/unit_tests/core/workflow/nodes/`
|
||||
|
||||
### Implementing a Custom Layer
|
||||
|
||||
1. Create class inheriting from `Layer` base
|
||||
1. Override lifecycle methods: `on_graph_start()`, `on_event()`, `on_graph_end()`
|
||||
1. Add to engine via `engine.add_layer()`
|
||||
|
||||
### Debugging Workflow Execution
|
||||
|
||||
Enable debug logging layer:
|
||||
|
||||
```python
|
||||
debug_layer = DebugLoggingLayer(
|
||||
level="DEBUG",
|
||||
include_inputs=True,
|
||||
include_outputs=True
|
||||
)
|
||||
```
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
|
|
@ -7,4 +5,4 @@ class AgentNodeStrategyInit(BaseModel):
|
|||
"""Agent node strategy initialization data."""
|
||||
|
||||
name: str
|
||||
icon: Optional[str] = None
|
||||
icon: str | None = None
|
||||
|
|
|
|||
|
|
@ -1,5 +1,4 @@
|
|||
from copy import deepcopy
|
||||
from typing import Any
|
||||
|
||||
from pydantic import BaseModel, PrivateAttr
|
||||
|
||||
|
|
@ -14,17 +13,24 @@ class GraphRuntimeState(BaseModel):
|
|||
_start_at: float = PrivateAttr()
|
||||
_total_tokens: int = PrivateAttr(default=0)
|
||||
_llm_usage: LLMUsage = PrivateAttr(default_factory=LLMUsage.empty_usage)
|
||||
_outputs: dict[str, Any] = PrivateAttr(default_factory=dict)
|
||||
_outputs: dict[str, object] = PrivateAttr(default_factory=dict[str, object])
|
||||
_node_run_steps: int = PrivateAttr(default=0)
|
||||
_ready_queue_json: str = PrivateAttr()
|
||||
_graph_execution_json: str = PrivateAttr()
|
||||
_response_coordinator_json: str = PrivateAttr()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
*,
|
||||
variable_pool: VariablePool,
|
||||
start_at: float,
|
||||
total_tokens: int = 0,
|
||||
llm_usage: LLMUsage | None = None,
|
||||
outputs: dict[str, Any] | None = None,
|
||||
outputs: dict[str, object] | None = None,
|
||||
node_run_steps: int = 0,
|
||||
ready_queue_json: str = "",
|
||||
graph_execution_json: str = "",
|
||||
response_coordinator_json: str = "",
|
||||
**kwargs: object,
|
||||
):
|
||||
"""Initialize the GraphRuntimeState with validation."""
|
||||
|
|
@ -51,6 +57,10 @@ class GraphRuntimeState(BaseModel):
|
|||
raise ValueError("node_run_steps must be non-negative")
|
||||
self._node_run_steps = node_run_steps
|
||||
|
||||
self._ready_queue_json = ready_queue_json
|
||||
self._graph_execution_json = graph_execution_json
|
||||
self._response_coordinator_json = response_coordinator_json
|
||||
|
||||
@property
|
||||
def variable_pool(self) -> VariablePool:
|
||||
"""Get the variable pool."""
|
||||
|
|
@ -90,24 +100,24 @@ class GraphRuntimeState(BaseModel):
|
|||
self._llm_usage = value.model_copy()
|
||||
|
||||
@property
|
||||
def outputs(self) -> dict[str, Any]:
|
||||
def outputs(self) -> dict[str, object]:
|
||||
"""Get a copy of the outputs dictionary."""
|
||||
return deepcopy(self._outputs)
|
||||
|
||||
@outputs.setter
|
||||
def outputs(self, value: dict[str, Any]) -> None:
|
||||
def outputs(self, value: dict[str, object]) -> None:
|
||||
"""Set the outputs dictionary."""
|
||||
self._outputs = deepcopy(value)
|
||||
|
||||
def set_output(self, key: str, value: Any) -> None:
|
||||
def set_output(self, key: str, value: object) -> None:
|
||||
"""Set a single output value."""
|
||||
self._outputs[key] = deepcopy(value)
|
||||
|
||||
def get_output(self, key: str, default: Any = None) -> Any:
|
||||
def get_output(self, key: str, default: object = None) -> object:
|
||||
"""Get a single output value."""
|
||||
return deepcopy(self._outputs.get(key, default))
|
||||
|
||||
def update_outputs(self, updates: dict[str, Any]) -> None:
|
||||
def update_outputs(self, updates: dict[str, object]) -> None:
|
||||
"""Update multiple output values."""
|
||||
for key, value in updates.items():
|
||||
self._outputs[key] = deepcopy(value)
|
||||
|
|
@ -133,3 +143,18 @@ class GraphRuntimeState(BaseModel):
|
|||
if tokens < 0:
|
||||
raise ValueError("tokens must be non-negative")
|
||||
self._total_tokens += tokens
|
||||
|
||||
@property
|
||||
def ready_queue_json(self) -> str:
|
||||
"""Get a copy of the ready queue state."""
|
||||
return self._ready_queue_json
|
||||
|
||||
@property
|
||||
def graph_execution_json(self) -> str:
|
||||
"""Get a copy of the serialized graph execution state."""
|
||||
return self._graph_execution_json
|
||||
|
||||
@property
|
||||
def response_coordinator_json(self) -> str:
|
||||
"""Get a copy of the serialized response coordinator state."""
|
||||
return self._response_coordinator_json
|
||||
|
|
|
|||
|
|
@ -188,9 +188,9 @@ class Graph:
|
|||
for node_id, node_config in node_configs_map.items():
|
||||
try:
|
||||
node_instance = node_factory.create_node(node_config)
|
||||
except ValueError as e:
|
||||
logger.warning("Failed to create node instance: %s", str(e))
|
||||
continue
|
||||
except Exception:
|
||||
logger.exception("Failed to create node instance for node_id %s", node_id)
|
||||
raise
|
||||
nodes[node_id] = node_instance
|
||||
|
||||
return nodes
|
||||
|
|
|
|||
|
|
@ -97,8 +97,12 @@ class RedisChannel:
|
|||
Returns:
|
||||
Deserialized command or None if invalid
|
||||
"""
|
||||
command_type_value = data.get("command_type")
|
||||
if not isinstance(command_type_value, str):
|
||||
return None
|
||||
|
||||
try:
|
||||
command_type = CommandType(data.get("command_type"))
|
||||
command_type = CommandType(command_type_value)
|
||||
|
||||
if command_type == CommandType.ABORT:
|
||||
return AbortCommand(**data)
|
||||
|
|
|
|||
|
|
@ -5,12 +5,10 @@ This package contains the core domain entities, value objects, and aggregates
|
|||
that represent the business concepts of workflow graph execution.
|
||||
"""
|
||||
|
||||
from .execution_context import ExecutionContext
|
||||
from .graph_execution import GraphExecution
|
||||
from .node_execution import NodeExecution
|
||||
|
||||
__all__ = [
|
||||
"ExecutionContext",
|
||||
"GraphExecution",
|
||||
"NodeExecution",
|
||||
]
|
||||
|
|
|
|||
|
|
@ -1,37 +0,0 @@
|
|||
"""
|
||||
ExecutionContext value object containing immutable execution parameters.
|
||||
"""
|
||||
|
||||
from dataclasses import dataclass
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from models.enums import UserFrom
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExecutionContext:
|
||||
"""
|
||||
Immutable value object containing the context for a graph execution.
|
||||
|
||||
This encapsulates all the contextual information needed to execute a workflow,
|
||||
keeping it separate from the mutable execution state.
|
||||
"""
|
||||
|
||||
tenant_id: str
|
||||
app_id: str
|
||||
workflow_id: str
|
||||
user_id: str
|
||||
user_from: UserFrom
|
||||
invoke_from: InvokeFrom
|
||||
call_depth: int
|
||||
max_execution_steps: int
|
||||
max_execution_time: int
|
||||
|
||||
def __post_init__(self) -> None:
|
||||
"""Validate execution context parameters."""
|
||||
if self.call_depth < 0:
|
||||
raise ValueError("Call depth must be non-negative")
|
||||
if self.max_execution_steps <= 0:
|
||||
raise ValueError("Max execution steps must be positive")
|
||||
if self.max_execution_time <= 0:
|
||||
raise ValueError("Max execution time must be positive")
|
||||
|
|
@ -1,12 +1,94 @@
|
|||
"""
|
||||
GraphExecution aggregate root managing the overall graph execution state.
|
||||
"""
|
||||
"""GraphExecution aggregate root managing the overall graph execution state."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass, field
|
||||
from importlib import import_module
|
||||
from typing import Literal
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.workflow.enums import NodeState
|
||||
|
||||
from .node_execution import NodeExecution
|
||||
|
||||
|
||||
class GraphExecutionErrorState(BaseModel):
|
||||
"""Serializable representation of an execution error."""
|
||||
|
||||
module: str = Field(description="Module containing the exception class")
|
||||
qualname: str = Field(description="Qualified name of the exception class")
|
||||
message: str | None = Field(default=None, description="Exception message string")
|
||||
|
||||
|
||||
class NodeExecutionState(BaseModel):
|
||||
"""Serializable representation of a node execution entity."""
|
||||
|
||||
node_id: str
|
||||
state: NodeState = Field(default=NodeState.UNKNOWN)
|
||||
retry_count: int = Field(default=0)
|
||||
execution_id: str | None = Field(default=None)
|
||||
error: str | None = Field(default=None)
|
||||
|
||||
|
||||
class GraphExecutionState(BaseModel):
|
||||
"""Pydantic model describing serialized GraphExecution state."""
|
||||
|
||||
type: Literal["GraphExecution"] = Field(default="GraphExecution")
|
||||
version: str = Field(default="1.0")
|
||||
workflow_id: str
|
||||
started: bool = Field(default=False)
|
||||
completed: bool = Field(default=False)
|
||||
aborted: bool = Field(default=False)
|
||||
error: GraphExecutionErrorState | None = Field(default=None)
|
||||
node_executions: list[NodeExecutionState] = Field(default_factory=list)
|
||||
|
||||
|
||||
def _serialize_error(error: Exception | None) -> GraphExecutionErrorState | None:
|
||||
"""Convert an exception into its serializable representation."""
|
||||
|
||||
if error is None:
|
||||
return None
|
||||
|
||||
return GraphExecutionErrorState(
|
||||
module=error.__class__.__module__,
|
||||
qualname=error.__class__.__qualname__,
|
||||
message=str(error),
|
||||
)
|
||||
|
||||
|
||||
def _resolve_exception_class(module_name: str, qualname: str) -> type[Exception]:
|
||||
"""Locate an exception class from its module and qualified name."""
|
||||
|
||||
module = import_module(module_name)
|
||||
attr: object = module
|
||||
for part in qualname.split("."):
|
||||
attr = getattr(attr, part)
|
||||
|
||||
if isinstance(attr, type) and issubclass(attr, Exception):
|
||||
return attr
|
||||
|
||||
raise TypeError(f"{qualname} in {module_name} is not an Exception subclass")
|
||||
|
||||
|
||||
def _deserialize_error(state: GraphExecutionErrorState | None) -> Exception | None:
|
||||
"""Reconstruct an exception instance from serialized data."""
|
||||
|
||||
if state is None:
|
||||
return None
|
||||
|
||||
try:
|
||||
exception_class = _resolve_exception_class(state.module, state.qualname)
|
||||
if state.message is None:
|
||||
return exception_class()
|
||||
return exception_class(state.message)
|
||||
except Exception:
|
||||
# Fallback to RuntimeError when reconstruction fails
|
||||
if state.message is None:
|
||||
return RuntimeError(state.qualname)
|
||||
return RuntimeError(state.message)
|
||||
|
||||
|
||||
@dataclass
|
||||
class GraphExecution:
|
||||
"""
|
||||
|
|
@ -69,3 +151,57 @@ class GraphExecution:
|
|||
if not self.error:
|
||||
return None
|
||||
return str(self.error)
|
||||
|
||||
def dumps(self) -> str:
|
||||
"""Serialize the aggregate state into a JSON string."""
|
||||
|
||||
node_states = [
|
||||
NodeExecutionState(
|
||||
node_id=node_id,
|
||||
state=node_execution.state,
|
||||
retry_count=node_execution.retry_count,
|
||||
execution_id=node_execution.execution_id,
|
||||
error=node_execution.error,
|
||||
)
|
||||
for node_id, node_execution in sorted(self.node_executions.items())
|
||||
]
|
||||
|
||||
state = GraphExecutionState(
|
||||
workflow_id=self.workflow_id,
|
||||
started=self.started,
|
||||
completed=self.completed,
|
||||
aborted=self.aborted,
|
||||
error=_serialize_error(self.error),
|
||||
node_executions=node_states,
|
||||
)
|
||||
|
||||
return state.model_dump_json()
|
||||
|
||||
def loads(self, data: str) -> None:
|
||||
"""Restore aggregate state from a serialized JSON string."""
|
||||
|
||||
state = GraphExecutionState.model_validate_json(data)
|
||||
|
||||
if state.type != "GraphExecution":
|
||||
raise ValueError(f"Invalid serialized data type: {state.type}")
|
||||
|
||||
if state.version != "1.0":
|
||||
raise ValueError(f"Unsupported serialized version: {state.version}")
|
||||
|
||||
if self.workflow_id != state.workflow_id:
|
||||
raise ValueError("Serialized workflow_id does not match aggregate identity")
|
||||
|
||||
self.started = state.started
|
||||
self.completed = state.completed
|
||||
self.aborted = state.aborted
|
||||
self.error = _deserialize_error(state.error)
|
||||
self.node_executions = {
|
||||
item.node_id: NodeExecution(
|
||||
node_id=item.node_id,
|
||||
state=item.state,
|
||||
retry_count=item.retry_count,
|
||||
execution_id=item.execution_id,
|
||||
error=item.error,
|
||||
)
|
||||
for item in state.node_executions
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,211 @@
|
|||
"""
|
||||
Main error handler that coordinates error strategies.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import time
|
||||
from typing import TYPE_CHECKING, final
|
||||
|
||||
from core.workflow.enums import (
|
||||
ErrorStrategy as ErrorStrategyEnum,
|
||||
)
|
||||
from core.workflow.enums import (
|
||||
WorkflowNodeExecutionMetadataKey,
|
||||
WorkflowNodeExecutionStatus,
|
||||
)
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import (
|
||||
GraphNodeEventBase,
|
||||
NodeRunExceptionEvent,
|
||||
NodeRunFailedEvent,
|
||||
NodeRunRetryEvent,
|
||||
)
|
||||
from core.workflow.node_events import NodeRunResult
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .domain import GraphExecution
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@final
|
||||
class ErrorHandler:
|
||||
"""
|
||||
Coordinates error handling strategies for node failures.
|
||||
|
||||
This acts as a facade for the various error strategies,
|
||||
selecting and applying the appropriate strategy based on
|
||||
node configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, graph: Graph, graph_execution: "GraphExecution") -> None:
|
||||
"""
|
||||
Initialize the error handler.
|
||||
|
||||
Args:
|
||||
graph: The workflow graph
|
||||
graph_execution: The graph execution state
|
||||
"""
|
||||
self._graph = graph
|
||||
self._graph_execution = graph_execution
|
||||
|
||||
def handle_node_failure(self, event: NodeRunFailedEvent) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle a node failure event.
|
||||
|
||||
Selects and applies the appropriate error strategy based on
|
||||
the node's configuration.
|
||||
|
||||
Args:
|
||||
event: The node failure event
|
||||
|
||||
Returns:
|
||||
Optional new event to process, or None to abort
|
||||
"""
|
||||
node = self._graph.nodes[event.node_id]
|
||||
# Get retry count from NodeExecution
|
||||
node_execution = self._graph_execution.get_or_create_node_execution(event.node_id)
|
||||
retry_count = node_execution.retry_count
|
||||
|
||||
# First check if retry is configured and not exhausted
|
||||
if node.retry and retry_count < node.retry_config.max_retries:
|
||||
result = self._handle_retry(event, retry_count)
|
||||
if result:
|
||||
# Retry count will be incremented when NodeRunRetryEvent is handled
|
||||
return result
|
||||
|
||||
# Apply configured error strategy
|
||||
strategy = node.error_strategy
|
||||
|
||||
match strategy:
|
||||
case None:
|
||||
return self._handle_abort(event)
|
||||
case ErrorStrategyEnum.FAIL_BRANCH:
|
||||
return self._handle_fail_branch(event)
|
||||
case ErrorStrategyEnum.DEFAULT_VALUE:
|
||||
return self._handle_default_value(event)
|
||||
|
||||
def _handle_abort(self, event: NodeRunFailedEvent):
|
||||
"""
|
||||
Handle error by aborting execution.
|
||||
|
||||
This is the default strategy when no other strategy is specified.
|
||||
It stops the entire graph execution when a node fails.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
|
||||
Returns:
|
||||
None - signals abortion
|
||||
"""
|
||||
logger.error("Node %s failed with ABORT strategy: %s", event.node_id, event.error)
|
||||
# Return None to signal that execution should stop
|
||||
|
||||
def _handle_retry(self, event: NodeRunFailedEvent, retry_count: int):
|
||||
"""
|
||||
Handle error by retrying the node.
|
||||
|
||||
This strategy re-attempts node execution up to a configured
|
||||
maximum number of retries with configurable intervals.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
retry_count: Current retry attempt count
|
||||
|
||||
Returns:
|
||||
NodeRunRetryEvent if retry should occur, None otherwise
|
||||
"""
|
||||
node = self._graph.nodes[event.node_id]
|
||||
|
||||
# Check if we've exceeded max retries
|
||||
if not node.retry or retry_count >= node.retry_config.max_retries:
|
||||
return None
|
||||
|
||||
# Wait for retry interval
|
||||
time.sleep(node.retry_config.retry_interval_seconds)
|
||||
|
||||
# Create retry event
|
||||
return NodeRunRetryEvent(
|
||||
id=event.id,
|
||||
node_title=node.title,
|
||||
node_id=event.node_id,
|
||||
node_type=event.node_type,
|
||||
node_run_result=event.node_run_result,
|
||||
start_at=event.start_at,
|
||||
error=event.error,
|
||||
retry_index=retry_count + 1,
|
||||
)
|
||||
|
||||
def _handle_fail_branch(self, event: NodeRunFailedEvent):
|
||||
"""
|
||||
Handle error by taking the fail branch.
|
||||
|
||||
This strategy converts failures to exceptions and routes execution
|
||||
through a designated fail-branch edge.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
|
||||
Returns:
|
||||
NodeRunExceptionEvent to continue via fail branch
|
||||
"""
|
||||
outputs = {
|
||||
"error_message": event.node_run_result.error,
|
||||
"error_type": event.node_run_result.error_type,
|
||||
}
|
||||
|
||||
return NodeRunExceptionEvent(
|
||||
id=event.id,
|
||||
node_id=event.node_id,
|
||||
node_type=event.node_type,
|
||||
start_at=event.start_at,
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.EXCEPTION,
|
||||
inputs=event.node_run_result.inputs,
|
||||
process_data=event.node_run_result.process_data,
|
||||
outputs=outputs,
|
||||
edge_source_handle="fail-branch",
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.ERROR_STRATEGY: ErrorStrategyEnum.FAIL_BRANCH,
|
||||
},
|
||||
),
|
||||
error=event.error,
|
||||
)
|
||||
|
||||
def _handle_default_value(self, event: NodeRunFailedEvent):
|
||||
"""
|
||||
Handle error by using default values.
|
||||
|
||||
This strategy allows nodes to fail gracefully by providing
|
||||
predefined default output values.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
|
||||
Returns:
|
||||
NodeRunExceptionEvent with default values
|
||||
"""
|
||||
node = self._graph.nodes[event.node_id]
|
||||
|
||||
outputs = {
|
||||
**node.default_value_dict,
|
||||
"error_message": event.node_run_result.error,
|
||||
"error_type": event.node_run_result.error_type,
|
||||
}
|
||||
|
||||
return NodeRunExceptionEvent(
|
||||
id=event.id,
|
||||
node_id=event.node_id,
|
||||
node_type=event.node_type,
|
||||
start_at=event.start_at,
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.EXCEPTION,
|
||||
inputs=event.node_run_result.inputs,
|
||||
process_data=event.node_run_result.process_data,
|
||||
outputs=outputs,
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.ERROR_STRATEGY: ErrorStrategyEnum.DEFAULT_VALUE,
|
||||
},
|
||||
),
|
||||
error=event.error,
|
||||
)
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
"""
|
||||
Error handling strategies for graph engine.
|
||||
|
||||
This package implements different error recovery strategies using
|
||||
the Strategy pattern for clean separation of concerns.
|
||||
"""
|
||||
|
||||
from .abort_strategy import AbortStrategy
|
||||
from .default_value_strategy import DefaultValueStrategy
|
||||
from .error_handler import ErrorHandler
|
||||
from .fail_branch_strategy import FailBranchStrategy
|
||||
from .retry_strategy import RetryStrategy
|
||||
|
||||
__all__ = [
|
||||
"AbortStrategy",
|
||||
"DefaultValueStrategy",
|
||||
"ErrorHandler",
|
||||
"FailBranchStrategy",
|
||||
"RetryStrategy",
|
||||
]
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
"""
|
||||
Abort error strategy implementation.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from typing import final
|
||||
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase, NodeRunFailedEvent
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@final
|
||||
class AbortStrategy:
|
||||
"""
|
||||
Error strategy that aborts execution on failure.
|
||||
|
||||
This is the default strategy when no other strategy is specified.
|
||||
It stops the entire graph execution when a node fails.
|
||||
"""
|
||||
|
||||
def handle_error(self, event: NodeRunFailedEvent, graph: Graph, retry_count: int) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle error by aborting execution.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
graph: The workflow graph
|
||||
retry_count: Current retry attempt count (unused)
|
||||
|
||||
Returns:
|
||||
None - signals abortion
|
||||
"""
|
||||
_ = graph
|
||||
_ = retry_count
|
||||
logger.error("Node %s failed with ABORT strategy: %s", event.node_id, event.error)
|
||||
|
||||
# Return None to signal that execution should stop
|
||||
return None
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
"""
|
||||
Default value error strategy implementation.
|
||||
"""
|
||||
|
||||
from typing import final
|
||||
|
||||
from core.workflow.enums import ErrorStrategy, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase, NodeRunExceptionEvent, NodeRunFailedEvent
|
||||
from core.workflow.node_events import NodeRunResult
|
||||
|
||||
|
||||
@final
|
||||
class DefaultValueStrategy:
|
||||
"""
|
||||
Error strategy that uses default values on failure.
|
||||
|
||||
This strategy allows nodes to fail gracefully by providing
|
||||
predefined default output values.
|
||||
"""
|
||||
|
||||
def handle_error(self, event: NodeRunFailedEvent, graph: Graph, retry_count: int) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle error by using default values.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
graph: The workflow graph
|
||||
retry_count: Current retry attempt count (unused)
|
||||
|
||||
Returns:
|
||||
NodeRunExceptionEvent with default values
|
||||
"""
|
||||
_ = retry_count
|
||||
node = graph.nodes[event.node_id]
|
||||
|
||||
outputs = {
|
||||
**node.default_value_dict,
|
||||
"error_message": event.node_run_result.error,
|
||||
"error_type": event.node_run_result.error_type,
|
||||
}
|
||||
|
||||
return NodeRunExceptionEvent(
|
||||
id=event.id,
|
||||
node_id=event.node_id,
|
||||
node_type=event.node_type,
|
||||
start_at=event.start_at,
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.EXCEPTION,
|
||||
inputs=event.node_run_result.inputs,
|
||||
process_data=event.node_run_result.process_data,
|
||||
outputs=outputs,
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.ERROR_STRATEGY: ErrorStrategy.DEFAULT_VALUE,
|
||||
},
|
||||
),
|
||||
error=event.error,
|
||||
)
|
||||
|
|
@ -1,81 +0,0 @@
|
|||
"""
|
||||
Main error handler that coordinates error strategies.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING, final
|
||||
|
||||
from core.workflow.enums import ErrorStrategy as ErrorStrategyEnum
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase, NodeRunFailedEvent
|
||||
|
||||
from .abort_strategy import AbortStrategy
|
||||
from .default_value_strategy import DefaultValueStrategy
|
||||
from .fail_branch_strategy import FailBranchStrategy
|
||||
from .retry_strategy import RetryStrategy
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..domain import GraphExecution
|
||||
|
||||
|
||||
@final
|
||||
class ErrorHandler:
|
||||
"""
|
||||
Coordinates error handling strategies for node failures.
|
||||
|
||||
This acts as a facade for the various error strategies,
|
||||
selecting and applying the appropriate strategy based on
|
||||
node configuration.
|
||||
"""
|
||||
|
||||
def __init__(self, graph: Graph, graph_execution: "GraphExecution") -> None:
|
||||
"""
|
||||
Initialize the error handler.
|
||||
|
||||
Args:
|
||||
graph: The workflow graph
|
||||
graph_execution: The graph execution state
|
||||
"""
|
||||
self._graph = graph
|
||||
self._graph_execution = graph_execution
|
||||
|
||||
# Initialize strategies
|
||||
self._abort_strategy = AbortStrategy()
|
||||
self._retry_strategy = RetryStrategy()
|
||||
self._fail_branch_strategy = FailBranchStrategy()
|
||||
self._default_value_strategy = DefaultValueStrategy()
|
||||
|
||||
def handle_node_failure(self, event: NodeRunFailedEvent) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle a node failure event.
|
||||
|
||||
Selects and applies the appropriate error strategy based on
|
||||
the node's configuration.
|
||||
|
||||
Args:
|
||||
event: The node failure event
|
||||
|
||||
Returns:
|
||||
Optional new event to process, or None to abort
|
||||
"""
|
||||
node = self._graph.nodes[event.node_id]
|
||||
# Get retry count from NodeExecution
|
||||
node_execution = self._graph_execution.get_or_create_node_execution(event.node_id)
|
||||
retry_count = node_execution.retry_count
|
||||
|
||||
# First check if retry is configured and not exhausted
|
||||
if node.retry and retry_count < node.retry_config.max_retries:
|
||||
result = self._retry_strategy.handle_error(event, self._graph, retry_count)
|
||||
if result:
|
||||
# Retry count will be incremented when NodeRunRetryEvent is handled
|
||||
return result
|
||||
|
||||
# Apply configured error strategy
|
||||
strategy = node.error_strategy
|
||||
|
||||
match strategy:
|
||||
case None:
|
||||
return self._abort_strategy.handle_error(event, self._graph, retry_count)
|
||||
case ErrorStrategyEnum.FAIL_BRANCH:
|
||||
return self._fail_branch_strategy.handle_error(event, self._graph, retry_count)
|
||||
case ErrorStrategyEnum.DEFAULT_VALUE:
|
||||
return self._default_value_strategy.handle_error(event, self._graph, retry_count)
|
||||
|
|
@ -1,57 +0,0 @@
|
|||
"""
|
||||
Fail branch error strategy implementation.
|
||||
"""
|
||||
|
||||
from typing import final
|
||||
|
||||
from core.workflow.enums import ErrorStrategy, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase, NodeRunExceptionEvent, NodeRunFailedEvent
|
||||
from core.workflow.node_events import NodeRunResult
|
||||
|
||||
|
||||
@final
|
||||
class FailBranchStrategy:
|
||||
"""
|
||||
Error strategy that continues execution via a fail branch.
|
||||
|
||||
This strategy converts failures to exceptions and routes execution
|
||||
through a designated fail-branch edge.
|
||||
"""
|
||||
|
||||
def handle_error(self, event: NodeRunFailedEvent, graph: Graph, retry_count: int) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle error by taking the fail branch.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
graph: The workflow graph
|
||||
retry_count: Current retry attempt count (unused)
|
||||
|
||||
Returns:
|
||||
NodeRunExceptionEvent to continue via fail branch
|
||||
"""
|
||||
_ = graph
|
||||
_ = retry_count
|
||||
outputs = {
|
||||
"error_message": event.node_run_result.error,
|
||||
"error_type": event.node_run_result.error_type,
|
||||
}
|
||||
|
||||
return NodeRunExceptionEvent(
|
||||
id=event.id,
|
||||
node_id=event.node_id,
|
||||
node_type=event.node_type,
|
||||
start_at=event.start_at,
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.EXCEPTION,
|
||||
inputs=event.node_run_result.inputs,
|
||||
process_data=event.node_run_result.process_data,
|
||||
outputs=outputs,
|
||||
edge_source_handle="fail-branch",
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.ERROR_STRATEGY: ErrorStrategy.FAIL_BRANCH,
|
||||
},
|
||||
),
|
||||
error=event.error,
|
||||
)
|
||||
|
|
@ -1,52 +0,0 @@
|
|||
"""
|
||||
Retry error strategy implementation.
|
||||
"""
|
||||
|
||||
import time
|
||||
from typing import final
|
||||
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase, NodeRunFailedEvent, NodeRunRetryEvent
|
||||
|
||||
|
||||
@final
|
||||
class RetryStrategy:
|
||||
"""
|
||||
Error strategy that retries failed nodes.
|
||||
|
||||
This strategy re-attempts node execution up to a configured
|
||||
maximum number of retries with configurable intervals.
|
||||
"""
|
||||
|
||||
def handle_error(self, event: NodeRunFailedEvent, graph: Graph, retry_count: int) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle error by retrying the node.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
graph: The workflow graph
|
||||
retry_count: Current retry attempt count
|
||||
|
||||
Returns:
|
||||
NodeRunRetryEvent if retry should occur, None otherwise
|
||||
"""
|
||||
node = graph.nodes[event.node_id]
|
||||
|
||||
# Check if we've exceeded max retries
|
||||
if not node.retry or retry_count >= node.retry_config.max_retries:
|
||||
return None
|
||||
|
||||
# Wait for retry interval
|
||||
time.sleep(node.retry_config.retry_interval_seconds)
|
||||
|
||||
# Create retry event
|
||||
return NodeRunRetryEvent(
|
||||
id=event.id,
|
||||
node_title=node.title,
|
||||
node_id=event.node_id,
|
||||
node_type=event.node_type,
|
||||
node_run_result=event.node_run_result,
|
||||
start_at=event.start_at,
|
||||
error=event.error,
|
||||
retry_index=retry_count + 1,
|
||||
)
|
||||
|
|
@ -3,6 +3,7 @@ Event handler implementations for different event types.
|
|||
"""
|
||||
|
||||
import logging
|
||||
from functools import singledispatchmethod
|
||||
from typing import TYPE_CHECKING, final
|
||||
|
||||
from core.workflow.entities import GraphRuntimeState
|
||||
|
|
@ -31,9 +32,9 @@ from ..domain.graph_execution import GraphExecution
|
|||
from ..response_coordinator import ResponseStreamCoordinator
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from ..error_handling import ErrorHandler
|
||||
from ..error_handler import ErrorHandler
|
||||
from ..graph_state_manager import GraphStateManager
|
||||
from ..graph_traversal import EdgeProcessor
|
||||
from ..state_management import UnifiedStateManager
|
||||
from .event_manager import EventManager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -56,7 +57,7 @@ class EventHandler:
|
|||
response_coordinator: ResponseStreamCoordinator,
|
||||
event_collector: "EventManager",
|
||||
edge_processor: "EdgeProcessor",
|
||||
state_manager: "UnifiedStateManager",
|
||||
state_manager: "GraphStateManager",
|
||||
error_handler: "ErrorHandler",
|
||||
) -> None:
|
||||
"""
|
||||
|
|
@ -81,7 +82,7 @@ class EventHandler:
|
|||
self._state_manager = state_manager
|
||||
self._error_handler = error_handler
|
||||
|
||||
def handle_event(self, event: GraphNodeEventBase) -> None:
|
||||
def dispatch(self, event: GraphNodeEventBase) -> None:
|
||||
"""
|
||||
Handle any node event by dispatching to the appropriate handler.
|
||||
|
||||
|
|
@ -92,42 +93,27 @@ class EventHandler:
|
|||
if event.in_loop_id or event.in_iteration_id:
|
||||
self._event_collector.collect(event)
|
||||
return
|
||||
return self._dispatch(event)
|
||||
|
||||
# Handle specific event types
|
||||
if isinstance(event, NodeRunStartedEvent):
|
||||
self._handle_node_started(event)
|
||||
elif isinstance(event, NodeRunStreamChunkEvent):
|
||||
self._handle_stream_chunk(event)
|
||||
elif isinstance(event, NodeRunSucceededEvent):
|
||||
self._handle_node_succeeded(event)
|
||||
elif isinstance(event, NodeRunFailedEvent):
|
||||
self._handle_node_failed(event)
|
||||
elif isinstance(event, NodeRunExceptionEvent):
|
||||
self._handle_node_exception(event)
|
||||
elif isinstance(event, NodeRunRetryEvent):
|
||||
self._handle_node_retry(event)
|
||||
elif isinstance(
|
||||
event,
|
||||
(
|
||||
NodeRunIterationStartedEvent,
|
||||
NodeRunIterationNextEvent,
|
||||
NodeRunIterationSucceededEvent,
|
||||
NodeRunIterationFailedEvent,
|
||||
NodeRunLoopStartedEvent,
|
||||
NodeRunLoopNextEvent,
|
||||
NodeRunLoopSucceededEvent,
|
||||
NodeRunLoopFailedEvent,
|
||||
NodeRunAgentLogEvent,
|
||||
),
|
||||
):
|
||||
# Iteration and loop events are collected directly
|
||||
self._event_collector.collect(event)
|
||||
else:
|
||||
# Collect unhandled events
|
||||
self._event_collector.collect(event)
|
||||
logger.warning("Unhandled event type: %s", type(event).__name__)
|
||||
@singledispatchmethod
|
||||
def _dispatch(self, event: GraphNodeEventBase) -> None:
|
||||
self._event_collector.collect(event)
|
||||
logger.warning("Unhandled event type: %s", type(event).__name__)
|
||||
|
||||
def _handle_node_started(self, event: NodeRunStartedEvent) -> None:
|
||||
@_dispatch.register(NodeRunIterationStartedEvent)
|
||||
@_dispatch.register(NodeRunIterationNextEvent)
|
||||
@_dispatch.register(NodeRunIterationSucceededEvent)
|
||||
@_dispatch.register(NodeRunIterationFailedEvent)
|
||||
@_dispatch.register(NodeRunLoopStartedEvent)
|
||||
@_dispatch.register(NodeRunLoopNextEvent)
|
||||
@_dispatch.register(NodeRunLoopSucceededEvent)
|
||||
@_dispatch.register(NodeRunLoopFailedEvent)
|
||||
@_dispatch.register(NodeRunAgentLogEvent)
|
||||
def _(self, event: GraphNodeEventBase) -> None:
|
||||
self._event_collector.collect(event)
|
||||
|
||||
@_dispatch.register
|
||||
def _(self, event: NodeRunStartedEvent) -> None:
|
||||
"""
|
||||
Handle node started event.
|
||||
|
||||
|
|
@ -144,7 +130,8 @@ class EventHandler:
|
|||
# Collect the event
|
||||
self._event_collector.collect(event)
|
||||
|
||||
def _handle_stream_chunk(self, event: NodeRunStreamChunkEvent) -> None:
|
||||
@_dispatch.register
|
||||
def _(self, event: NodeRunStreamChunkEvent) -> None:
|
||||
"""
|
||||
Handle stream chunk event with full processing.
|
||||
|
||||
|
|
@ -158,7 +145,8 @@ class EventHandler:
|
|||
for stream_event in streaming_events:
|
||||
self._event_collector.collect(stream_event)
|
||||
|
||||
def _handle_node_succeeded(self, event: NodeRunSucceededEvent) -> None:
|
||||
@_dispatch.register
|
||||
def _(self, event: NodeRunSucceededEvent) -> None:
|
||||
"""
|
||||
Handle node success by coordinating subsystems.
|
||||
|
||||
|
|
@ -208,7 +196,8 @@ class EventHandler:
|
|||
# Collect the event
|
||||
self._event_collector.collect(event)
|
||||
|
||||
def _handle_node_failed(self, event: NodeRunFailedEvent) -> None:
|
||||
@_dispatch.register
|
||||
def _(self, event: NodeRunFailedEvent) -> None:
|
||||
"""
|
||||
Handle node failure using error handler.
|
||||
|
||||
|
|
@ -223,14 +212,15 @@ class EventHandler:
|
|||
|
||||
if result:
|
||||
# Process the resulting event (retry, exception, etc.)
|
||||
self.handle_event(result)
|
||||
self.dispatch(result)
|
||||
else:
|
||||
# Abort execution
|
||||
self._graph_execution.fail(RuntimeError(event.error))
|
||||
self._event_collector.collect(event)
|
||||
self._state_manager.finish_execution(event.node_id)
|
||||
|
||||
def _handle_node_exception(self, event: NodeRunExceptionEvent) -> None:
|
||||
@_dispatch.register
|
||||
def _(self, event: NodeRunExceptionEvent) -> None:
|
||||
"""
|
||||
Handle node exception event (fail-branch strategy).
|
||||
|
||||
|
|
@ -241,7 +231,8 @@ class EventHandler:
|
|||
node_execution = self._graph_execution.get_or_create_node_execution(event.node_id)
|
||||
node_execution.mark_taken()
|
||||
|
||||
def _handle_node_retry(self, event: NodeRunRetryEvent) -> None:
|
||||
@_dispatch.register
|
||||
def _(self, event: NodeRunRetryEvent) -> None:
|
||||
"""
|
||||
Handle node retry event.
|
||||
|
||||
|
|
|
|||
|
|
@ -8,16 +8,16 @@ Domain-Driven Design principles for improved maintainability and testability.
|
|||
import contextvars
|
||||
import logging
|
||||
import queue
|
||||
from collections.abc import Generator, Mapping
|
||||
from collections.abc import Generator
|
||||
from typing import final
|
||||
|
||||
from flask import Flask, current_app
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities import GraphRuntimeState
|
||||
from core.workflow.enums import NodeExecutionType
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph.read_only_state_wrapper import ReadOnlyGraphRuntimeStateWrapper
|
||||
from core.workflow.graph_engine.ready_queue import InMemoryReadyQueue
|
||||
from core.workflow.graph_events import (
|
||||
GraphEngineEvent,
|
||||
GraphNodeEventBase,
|
||||
|
|
@ -26,19 +26,19 @@ from core.workflow.graph_events import (
|
|||
GraphRunStartedEvent,
|
||||
GraphRunSucceededEvent,
|
||||
)
|
||||
from models.enums import UserFrom
|
||||
|
||||
from .command_processing import AbortCommandHandler, CommandProcessor
|
||||
from .domain import ExecutionContext, GraphExecution
|
||||
from .domain import GraphExecution
|
||||
from .entities.commands import AbortCommand
|
||||
from .error_handling import ErrorHandler
|
||||
from .error_handler import ErrorHandler
|
||||
from .event_management import EventHandler, EventManager
|
||||
from .graph_state_manager import GraphStateManager
|
||||
from .graph_traversal import EdgeProcessor, SkipPropagator
|
||||
from .layers.base import GraphEngineLayer
|
||||
from .orchestration import Dispatcher, ExecutionCoordinator
|
||||
from .protocols.command_channel import CommandChannel
|
||||
from .ready_queue import ReadyQueue, ReadyQueueState, create_ready_queue_from_state
|
||||
from .response_coordinator import ResponseStreamCoordinator
|
||||
from .state_management import UnifiedStateManager
|
||||
from .worker_management import WorkerPool
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -55,18 +55,9 @@ class GraphEngine:
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
tenant_id: str,
|
||||
app_id: str,
|
||||
workflow_id: str,
|
||||
user_id: str,
|
||||
user_from: UserFrom,
|
||||
invoke_from: InvokeFrom,
|
||||
call_depth: int,
|
||||
graph: Graph,
|
||||
graph_config: Mapping[str, object],
|
||||
graph_runtime_state: GraphRuntimeState,
|
||||
max_execution_steps: int,
|
||||
max_execution_time: int,
|
||||
command_channel: CommandChannel,
|
||||
min_workers: int | None = None,
|
||||
max_workers: int | None = None,
|
||||
|
|
@ -75,27 +66,14 @@ class GraphEngine:
|
|||
) -> None:
|
||||
"""Initialize the graph engine with all subsystems and dependencies."""
|
||||
|
||||
# === Domain Models ===
|
||||
# Execution context encapsulates workflow execution metadata
|
||||
self._execution_context = ExecutionContext(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
workflow_id=workflow_id,
|
||||
user_id=user_id,
|
||||
user_from=user_from,
|
||||
invoke_from=invoke_from,
|
||||
call_depth=call_depth,
|
||||
max_execution_steps=max_execution_steps,
|
||||
max_execution_time=max_execution_time,
|
||||
)
|
||||
|
||||
# Graph execution tracks the overall execution state
|
||||
self._graph_execution = GraphExecution(workflow_id=workflow_id)
|
||||
if graph_runtime_state.graph_execution_json != "":
|
||||
self._graph_execution.loads(graph_runtime_state.graph_execution_json)
|
||||
|
||||
# === Core Dependencies ===
|
||||
# Graph structure and configuration
|
||||
self._graph = graph
|
||||
self._graph_config = graph_config
|
||||
self._graph_runtime_state = graph_runtime_state
|
||||
self._command_channel = command_channel
|
||||
|
||||
|
|
@ -107,20 +85,28 @@ class GraphEngine:
|
|||
self._scale_down_idle_time = scale_down_idle_time
|
||||
|
||||
# === Execution Queues ===
|
||||
# Queue for nodes ready to execute
|
||||
self._ready_queue: queue.Queue[str] = queue.Queue()
|
||||
# Create ready queue from saved state or initialize new one
|
||||
self._ready_queue: ReadyQueue
|
||||
if self._graph_runtime_state.ready_queue_json == "":
|
||||
self._ready_queue = InMemoryReadyQueue()
|
||||
else:
|
||||
ready_queue_state = ReadyQueueState.model_validate_json(self._graph_runtime_state.ready_queue_json)
|
||||
self._ready_queue = create_ready_queue_from_state(ready_queue_state)
|
||||
|
||||
# Queue for events generated during execution
|
||||
self._event_queue: queue.Queue[GraphNodeEventBase] = queue.Queue()
|
||||
|
||||
# === State Management ===
|
||||
# Unified state manager handles all node state transitions and queue operations
|
||||
self._state_manager = UnifiedStateManager(self._graph, self._ready_queue)
|
||||
self._state_manager = GraphStateManager(self._graph, self._ready_queue)
|
||||
|
||||
# === Response Coordination ===
|
||||
# Coordinates response streaming from response nodes
|
||||
self._response_coordinator = ResponseStreamCoordinator(
|
||||
variable_pool=self._graph_runtime_state.variable_pool, graph=self._graph
|
||||
)
|
||||
if graph_runtime_state.response_coordinator_json != "":
|
||||
self._response_coordinator.loads(graph_runtime_state.response_coordinator_json)
|
||||
|
||||
# === Event Management ===
|
||||
# Event manager handles both collection and emission of events
|
||||
|
|
@ -216,7 +202,6 @@ class GraphEngine:
|
|||
event_handler=self._event_handler_registry,
|
||||
event_collector=self._event_manager,
|
||||
execution_coordinator=self._execution_coordinator,
|
||||
max_execution_time=self._execution_context.max_execution_time,
|
||||
event_emitter=self._event_manager,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,7 @@
|
|||
"""
|
||||
Unified state manager that combines node, edge, and execution tracking.
|
||||
|
||||
This is a proposed simplification that merges NodeStateManager, EdgeStateManager,
|
||||
and ExecutionTracker into a single cohesive class.
|
||||
Graph state manager that combines node, edge, and execution tracking.
|
||||
"""
|
||||
|
||||
import queue
|
||||
import threading
|
||||
from collections.abc import Sequence
|
||||
from typing import TypedDict, final
|
||||
|
|
@ -13,6 +9,8 @@ from typing import TypedDict, final
|
|||
from core.workflow.enums import NodeState
|
||||
from core.workflow.graph import Edge, Graph
|
||||
|
||||
from .ready_queue import ReadyQueue
|
||||
|
||||
|
||||
class EdgeStateAnalysis(TypedDict):
|
||||
"""Analysis result for edge states."""
|
||||
|
|
@ -23,24 +21,10 @@ class EdgeStateAnalysis(TypedDict):
|
|||
|
||||
|
||||
@final
|
||||
class UnifiedStateManager:
|
||||
"""
|
||||
Unified manager for all graph state operations.
|
||||
|
||||
This class combines the responsibilities of:
|
||||
- NodeStateManager: Node state transitions and ready queue
|
||||
- EdgeStateManager: Edge state transitions and analysis
|
||||
- ExecutionTracker: Tracking executing nodes
|
||||
|
||||
Benefits:
|
||||
- Single lock for all state operations (reduced contention)
|
||||
- Cohesive state management interface
|
||||
- Simplified dependency injection
|
||||
"""
|
||||
|
||||
def __init__(self, graph: Graph, ready_queue: queue.Queue[str]) -> None:
|
||||
class GraphStateManager:
|
||||
def __init__(self, graph: Graph, ready_queue: ReadyQueue) -> None:
|
||||
"""
|
||||
Initialize the unified state manager.
|
||||
Initialize the state manager.
|
||||
|
||||
Args:
|
||||
graph: The workflow graph
|
||||
|
|
@ -9,8 +9,8 @@ from core.workflow.enums import NodeExecutionType
|
|||
from core.workflow.graph import Edge, Graph
|
||||
from core.workflow.graph_events import NodeRunStreamChunkEvent
|
||||
|
||||
from ..graph_state_manager import GraphStateManager
|
||||
from ..response_coordinator import ResponseStreamCoordinator
|
||||
from ..state_management import UnifiedStateManager
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .skip_propagator import SkipPropagator
|
||||
|
|
@ -29,7 +29,7 @@ class EdgeProcessor:
|
|||
def __init__(
|
||||
self,
|
||||
graph: Graph,
|
||||
state_manager: UnifiedStateManager,
|
||||
state_manager: GraphStateManager,
|
||||
response_coordinator: ResponseStreamCoordinator,
|
||||
skip_propagator: "SkipPropagator",
|
||||
) -> None:
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from typing import final
|
|||
|
||||
from core.workflow.graph import Edge, Graph
|
||||
|
||||
from ..state_management import UnifiedStateManager
|
||||
from ..graph_state_manager import GraphStateManager
|
||||
|
||||
|
||||
@final
|
||||
|
|
@ -22,7 +22,7 @@ class SkipPropagator:
|
|||
def __init__(
|
||||
self,
|
||||
graph: Graph,
|
||||
state_manager: UnifiedStateManager,
|
||||
state_manager: GraphStateManager,
|
||||
) -> None:
|
||||
"""
|
||||
Initialize the skip propagator.
|
||||
|
|
|
|||
|
|
@ -34,7 +34,6 @@ class Dispatcher:
|
|||
event_handler: "EventHandler",
|
||||
event_collector: EventManager,
|
||||
execution_coordinator: ExecutionCoordinator,
|
||||
max_execution_time: int,
|
||||
event_emitter: EventManager | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
|
|
@ -45,14 +44,12 @@ class Dispatcher:
|
|||
event_handler: Event handler registry for processing events
|
||||
event_collector: Event manager for collecting unhandled events
|
||||
execution_coordinator: Coordinator for execution flow
|
||||
max_execution_time: Maximum execution time in seconds
|
||||
event_emitter: Optional event manager to signal completion
|
||||
"""
|
||||
self._event_queue = event_queue
|
||||
self._event_handler = event_handler
|
||||
self._event_collector = event_collector
|
||||
self._execution_coordinator = execution_coordinator
|
||||
self._max_execution_time = max_execution_time
|
||||
self._event_emitter = event_emitter
|
||||
|
||||
self._thread: threading.Thread | None = None
|
||||
|
|
@ -89,7 +86,7 @@ class Dispatcher:
|
|||
try:
|
||||
event = self._event_queue.get(timeout=0.1)
|
||||
# Route to the event handler
|
||||
self._event_handler.handle_event(event)
|
||||
self._event_handler.dispatch(event)
|
||||
self._event_queue.task_done()
|
||||
except queue.Empty:
|
||||
# Check if execution is complete
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from typing import TYPE_CHECKING, final
|
|||
from ..command_processing import CommandProcessor
|
||||
from ..domain import GraphExecution
|
||||
from ..event_management import EventManager
|
||||
from ..state_management import UnifiedStateManager
|
||||
from ..graph_state_manager import GraphStateManager
|
||||
from ..worker_management import WorkerPool
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
@ -26,7 +26,7 @@ class ExecutionCoordinator:
|
|||
def __init__(
|
||||
self,
|
||||
graph_execution: GraphExecution,
|
||||
state_manager: UnifiedStateManager,
|
||||
state_manager: GraphStateManager,
|
||||
event_handler: "EventHandler",
|
||||
event_collector: EventManager,
|
||||
command_processor: CommandProcessor,
|
||||
|
|
|
|||
|
|
@ -1,31 +0,0 @@
|
|||
"""
|
||||
Base error strategy protocol.
|
||||
"""
|
||||
|
||||
from typing import Protocol
|
||||
|
||||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase, NodeRunFailedEvent
|
||||
|
||||
|
||||
class ErrorStrategy(Protocol):
|
||||
"""
|
||||
Protocol for error handling strategies.
|
||||
|
||||
Each strategy implements a different approach to handling
|
||||
node execution failures.
|
||||
"""
|
||||
|
||||
def handle_error(self, event: NodeRunFailedEvent, graph: Graph, retry_count: int) -> GraphNodeEventBase | None:
|
||||
"""
|
||||
Handle a node failure event.
|
||||
|
||||
Args:
|
||||
event: The failure event
|
||||
graph: The workflow graph
|
||||
retry_count: Current retry attempt count
|
||||
|
||||
Returns:
|
||||
Optional new event to process, or None to stop
|
||||
"""
|
||||
...
|
||||
|
|
@ -0,0 +1,12 @@
|
|||
"""
|
||||
Ready queue implementations for GraphEngine.
|
||||
|
||||
This package contains the protocol and implementations for managing
|
||||
the queue of nodes ready for execution.
|
||||
"""
|
||||
|
||||
from .factory import create_ready_queue_from_state
|
||||
from .in_memory import InMemoryReadyQueue
|
||||
from .protocol import ReadyQueue, ReadyQueueState
|
||||
|
||||
__all__ = ["InMemoryReadyQueue", "ReadyQueue", "ReadyQueueState", "create_ready_queue_from_state"]
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
"""
|
||||
Factory for creating ReadyQueue instances from serialized state.
|
||||
"""
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from .in_memory import InMemoryReadyQueue
|
||||
from .protocol import ReadyQueueState
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from .protocol import ReadyQueue
|
||||
|
||||
|
||||
def create_ready_queue_from_state(state: ReadyQueueState) -> "ReadyQueue":
|
||||
"""
|
||||
Create a ReadyQueue instance from a serialized state.
|
||||
|
||||
Args:
|
||||
state: The serialized queue state (Pydantic model, dict, or JSON string), or None for a new empty queue
|
||||
|
||||
Returns:
|
||||
A ReadyQueue instance initialized with the given state
|
||||
|
||||
Raises:
|
||||
ValueError: If the queue type is unknown or version is unsupported
|
||||
"""
|
||||
if state.type == "InMemoryReadyQueue":
|
||||
if state.version != "1.0":
|
||||
raise ValueError(f"Unsupported InMemoryReadyQueue version: {state.version}")
|
||||
queue = InMemoryReadyQueue()
|
||||
# Always pass as JSON string to loads()
|
||||
queue.loads(state.model_dump_json())
|
||||
return queue
|
||||
else:
|
||||
raise ValueError(f"Unknown ready queue type: {state.type}")
|
||||
|
|
@ -0,0 +1,140 @@
|
|||
"""
|
||||
In-memory implementation of the ReadyQueue protocol.
|
||||
|
||||
This implementation wraps Python's standard queue.Queue and adds
|
||||
serialization capabilities for state storage.
|
||||
"""
|
||||
|
||||
import queue
|
||||
from typing import final
|
||||
|
||||
from .protocol import ReadyQueue, ReadyQueueState
|
||||
|
||||
|
||||
@final
|
||||
class InMemoryReadyQueue(ReadyQueue):
|
||||
"""
|
||||
In-memory ready queue implementation with serialization support.
|
||||
|
||||
This implementation uses Python's queue.Queue internally and provides
|
||||
methods to serialize and restore the queue state.
|
||||
"""
|
||||
|
||||
def __init__(self, maxsize: int = 0) -> None:
|
||||
"""
|
||||
Initialize the in-memory ready queue.
|
||||
|
||||
Args:
|
||||
maxsize: Maximum size of the queue (0 for unlimited)
|
||||
"""
|
||||
self._queue: queue.Queue[str] = queue.Queue(maxsize=maxsize)
|
||||
|
||||
def put(self, item: str) -> None:
|
||||
"""
|
||||
Add a node ID to the ready queue.
|
||||
|
||||
Args:
|
||||
item: The node ID to add to the queue
|
||||
"""
|
||||
self._queue.put(item)
|
||||
|
||||
def get(self, timeout: float | None = None) -> str:
|
||||
"""
|
||||
Retrieve and remove a node ID from the queue.
|
||||
|
||||
Args:
|
||||
timeout: Maximum time to wait for an item (None for blocking)
|
||||
|
||||
Returns:
|
||||
The node ID retrieved from the queue
|
||||
|
||||
Raises:
|
||||
queue.Empty: If timeout expires and no item is available
|
||||
"""
|
||||
if timeout is None:
|
||||
return self._queue.get(block=True)
|
||||
return self._queue.get(timeout=timeout)
|
||||
|
||||
def task_done(self) -> None:
|
||||
"""
|
||||
Indicate that a previously retrieved task is complete.
|
||||
|
||||
Used by worker threads to signal task completion for
|
||||
join() synchronization.
|
||||
"""
|
||||
self._queue.task_done()
|
||||
|
||||
def empty(self) -> bool:
|
||||
"""
|
||||
Check if the queue is empty.
|
||||
|
||||
Returns:
|
||||
True if the queue has no items, False otherwise
|
||||
"""
|
||||
return self._queue.empty()
|
||||
|
||||
def qsize(self) -> int:
|
||||
"""
|
||||
Get the approximate size of the queue.
|
||||
|
||||
Returns:
|
||||
The approximate number of items in the queue
|
||||
"""
|
||||
return self._queue.qsize()
|
||||
|
||||
def dumps(self) -> str:
|
||||
"""
|
||||
Serialize the queue state to a JSON string for storage.
|
||||
|
||||
Returns:
|
||||
A JSON string containing the serialized queue state
|
||||
"""
|
||||
# Extract all items from the queue without removing them
|
||||
items: list[str] = []
|
||||
temp_items: list[str] = []
|
||||
|
||||
# Drain the queue temporarily to get all items
|
||||
while not self._queue.empty():
|
||||
try:
|
||||
item = self._queue.get_nowait()
|
||||
temp_items.append(item)
|
||||
items.append(item)
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
# Put items back in the same order
|
||||
for item in temp_items:
|
||||
self._queue.put(item)
|
||||
|
||||
state = ReadyQueueState(
|
||||
type="InMemoryReadyQueue",
|
||||
version="1.0",
|
||||
items=items,
|
||||
)
|
||||
return state.model_dump_json()
|
||||
|
||||
def loads(self, data: str) -> None:
|
||||
"""
|
||||
Restore the queue state from a JSON string.
|
||||
|
||||
Args:
|
||||
data: The JSON string containing the serialized queue state to restore
|
||||
"""
|
||||
state = ReadyQueueState.model_validate_json(data)
|
||||
|
||||
if state.type != "InMemoryReadyQueue":
|
||||
raise ValueError(f"Invalid serialized data type: {state.type}")
|
||||
|
||||
if state.version != "1.0":
|
||||
raise ValueError(f"Unsupported version: {state.version}")
|
||||
|
||||
# Clear the current queue
|
||||
while not self._queue.empty():
|
||||
try:
|
||||
self._queue.get_nowait()
|
||||
except queue.Empty:
|
||||
break
|
||||
|
||||
# Restore items
|
||||
for item in state.items:
|
||||
self._queue.put(item)
|
||||
|
|
@ -0,0 +1,104 @@
|
|||
"""
|
||||
ReadyQueue protocol for GraphEngine node execution queue.
|
||||
|
||||
This protocol defines the interface for managing the queue of nodes ready
|
||||
for execution, supporting both in-memory and persistent storage scenarios.
|
||||
"""
|
||||
|
||||
from collections.abc import Sequence
|
||||
from typing import Protocol
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ReadyQueueState(BaseModel):
|
||||
"""
|
||||
Pydantic model for serialized ready queue state.
|
||||
|
||||
This defines the structure of the data returned by dumps()
|
||||
and expected by loads() for ready queue serialization.
|
||||
"""
|
||||
|
||||
type: str = Field(description="Queue implementation type (e.g., 'InMemoryReadyQueue')")
|
||||
version: str = Field(description="Serialization format version")
|
||||
items: Sequence[str] = Field(default_factory=list, description="List of node IDs in the queue")
|
||||
|
||||
|
||||
class ReadyQueue(Protocol):
|
||||
"""
|
||||
Protocol for managing nodes ready for execution in GraphEngine.
|
||||
|
||||
This protocol defines the interface that any ready queue implementation
|
||||
must provide, enabling both in-memory queues and persistent queues
|
||||
that can be serialized for state storage.
|
||||
"""
|
||||
|
||||
def put(self, item: str) -> None:
|
||||
"""
|
||||
Add a node ID to the ready queue.
|
||||
|
||||
Args:
|
||||
item: The node ID to add to the queue
|
||||
"""
|
||||
...
|
||||
|
||||
def get(self, timeout: float | None = None) -> str:
|
||||
"""
|
||||
Retrieve and remove a node ID from the queue.
|
||||
|
||||
Args:
|
||||
timeout: Maximum time to wait for an item (None for blocking)
|
||||
|
||||
Returns:
|
||||
The node ID retrieved from the queue
|
||||
|
||||
Raises:
|
||||
queue.Empty: If timeout expires and no item is available
|
||||
"""
|
||||
...
|
||||
|
||||
def task_done(self) -> None:
|
||||
"""
|
||||
Indicate that a previously retrieved task is complete.
|
||||
|
||||
Used by worker threads to signal task completion for
|
||||
join() synchronization.
|
||||
"""
|
||||
...
|
||||
|
||||
def empty(self) -> bool:
|
||||
"""
|
||||
Check if the queue is empty.
|
||||
|
||||
Returns:
|
||||
True if the queue has no items, False otherwise
|
||||
"""
|
||||
...
|
||||
|
||||
def qsize(self) -> int:
|
||||
"""
|
||||
Get the approximate size of the queue.
|
||||
|
||||
Returns:
|
||||
The approximate number of items in the queue
|
||||
"""
|
||||
...
|
||||
|
||||
def dumps(self) -> str:
|
||||
"""
|
||||
Serialize the queue state to a JSON string for storage.
|
||||
|
||||
Returns:
|
||||
A JSON string containing the serialized queue state
|
||||
that can be persisted and later restored
|
||||
"""
|
||||
...
|
||||
|
||||
def loads(self, data: str) -> None:
|
||||
"""
|
||||
Restore the queue state from a JSON string.
|
||||
|
||||
Args:
|
||||
data: The JSON string containing the serialized queue state to restore
|
||||
"""
|
||||
...
|
||||
|
|
@ -9,9 +9,11 @@ import logging
|
|||
from collections import deque
|
||||
from collections.abc import Sequence
|
||||
from threading import RLock
|
||||
from typing import TypeAlias, final
|
||||
from typing import Literal, TypeAlias, final
|
||||
from uuid import uuid4
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.workflow.entities.variable_pool import VariablePool
|
||||
from core.workflow.enums import NodeExecutionType, NodeState
|
||||
from core.workflow.graph import Graph
|
||||
|
|
@ -28,6 +30,43 @@ NodeID: TypeAlias = str
|
|||
EdgeID: TypeAlias = str
|
||||
|
||||
|
||||
class ResponseSessionState(BaseModel):
|
||||
"""Serializable representation of a response session."""
|
||||
|
||||
node_id: str
|
||||
index: int = Field(default=0, ge=0)
|
||||
|
||||
|
||||
class StreamBufferState(BaseModel):
|
||||
"""Serializable representation of buffered stream chunks."""
|
||||
|
||||
selector: tuple[str, ...]
|
||||
events: list[NodeRunStreamChunkEvent] = Field(default_factory=list)
|
||||
|
||||
|
||||
class StreamPositionState(BaseModel):
|
||||
"""Serializable representation for stream read positions."""
|
||||
|
||||
selector: tuple[str, ...]
|
||||
position: int = Field(default=0, ge=0)
|
||||
|
||||
|
||||
class ResponseStreamCoordinatorState(BaseModel):
|
||||
"""Serialized snapshot of ResponseStreamCoordinator."""
|
||||
|
||||
type: Literal["ResponseStreamCoordinator"] = Field(default="ResponseStreamCoordinator")
|
||||
version: str = Field(default="1.0")
|
||||
response_nodes: Sequence[str] = Field(default_factory=list)
|
||||
active_session: ResponseSessionState | None = None
|
||||
waiting_sessions: Sequence[ResponseSessionState] = Field(default_factory=list)
|
||||
pending_sessions: Sequence[ResponseSessionState] = Field(default_factory=list)
|
||||
node_execution_ids: dict[str, str] = Field(default_factory=dict)
|
||||
paths_map: dict[str, list[list[str]]] = Field(default_factory=dict)
|
||||
stream_buffers: Sequence[StreamBufferState] = Field(default_factory=list)
|
||||
stream_positions: Sequence[StreamPositionState] = Field(default_factory=list)
|
||||
closed_streams: Sequence[tuple[str, ...]] = Field(default_factory=list)
|
||||
|
||||
|
||||
@final
|
||||
class ResponseStreamCoordinator:
|
||||
"""
|
||||
|
|
@ -69,6 +108,8 @@ class ResponseStreamCoordinator:
|
|||
|
||||
def register(self, response_node_id: NodeID) -> None:
|
||||
with self._lock:
|
||||
if response_node_id in self._response_nodes:
|
||||
return
|
||||
self._response_nodes.add(response_node_id)
|
||||
|
||||
# Build and save paths map for this response node
|
||||
|
|
@ -558,3 +599,98 @@ class ResponseStreamCoordinator:
|
|||
"""
|
||||
key = tuple(selector)
|
||||
return key in self._closed_streams
|
||||
|
||||
def _serialize_session(self, session: ResponseSession | None) -> ResponseSessionState | None:
|
||||
"""Convert an in-memory session into its serializable form."""
|
||||
|
||||
if session is None:
|
||||
return None
|
||||
return ResponseSessionState(node_id=session.node_id, index=session.index)
|
||||
|
||||
def _session_from_state(self, session_state: ResponseSessionState) -> ResponseSession:
|
||||
"""Rebuild a response session from serialized data."""
|
||||
|
||||
node = self._graph.nodes.get(session_state.node_id)
|
||||
if node is None:
|
||||
raise ValueError(f"Unknown response node '{session_state.node_id}' in serialized state")
|
||||
|
||||
session = ResponseSession.from_node(node)
|
||||
session.index = session_state.index
|
||||
return session
|
||||
|
||||
def dumps(self) -> str:
|
||||
"""Serialize coordinator state to JSON."""
|
||||
|
||||
with self._lock:
|
||||
state = ResponseStreamCoordinatorState(
|
||||
response_nodes=sorted(self._response_nodes),
|
||||
active_session=self._serialize_session(self._active_session),
|
||||
waiting_sessions=[
|
||||
session_state
|
||||
for session in list(self._waiting_sessions)
|
||||
if (session_state := self._serialize_session(session)) is not None
|
||||
],
|
||||
pending_sessions=[
|
||||
session_state
|
||||
for _, session in sorted(self._response_sessions.items())
|
||||
if (session_state := self._serialize_session(session)) is not None
|
||||
],
|
||||
node_execution_ids=dict(sorted(self._node_execution_ids.items())),
|
||||
paths_map={
|
||||
node_id: [path.edges.copy() for path in paths]
|
||||
for node_id, paths in sorted(self._paths_maps.items())
|
||||
},
|
||||
stream_buffers=[
|
||||
StreamBufferState(
|
||||
selector=selector,
|
||||
events=[event.model_copy(deep=True) for event in events],
|
||||
)
|
||||
for selector, events in sorted(self._stream_buffers.items())
|
||||
],
|
||||
stream_positions=[
|
||||
StreamPositionState(selector=selector, position=position)
|
||||
for selector, position in sorted(self._stream_positions.items())
|
||||
],
|
||||
closed_streams=sorted(self._closed_streams),
|
||||
)
|
||||
return state.model_dump_json()
|
||||
|
||||
def loads(self, data: str) -> None:
|
||||
"""Restore coordinator state from JSON."""
|
||||
|
||||
state = ResponseStreamCoordinatorState.model_validate_json(data)
|
||||
|
||||
if state.type != "ResponseStreamCoordinator":
|
||||
raise ValueError(f"Invalid serialized data type: {state.type}")
|
||||
|
||||
if state.version != "1.0":
|
||||
raise ValueError(f"Unsupported serialized version: {state.version}")
|
||||
|
||||
with self._lock:
|
||||
self._response_nodes = set(state.response_nodes)
|
||||
self._paths_maps = {
|
||||
node_id: [Path(edges=list(path_edges)) for path_edges in paths]
|
||||
for node_id, paths in state.paths_map.items()
|
||||
}
|
||||
self._node_execution_ids = dict(state.node_execution_ids)
|
||||
|
||||
self._stream_buffers = {
|
||||
tuple(buffer.selector): [event.model_copy(deep=True) for event in buffer.events]
|
||||
for buffer in state.stream_buffers
|
||||
}
|
||||
self._stream_positions = {
|
||||
tuple(position.selector): position.position for position in state.stream_positions
|
||||
}
|
||||
for selector in self._stream_buffers:
|
||||
self._stream_positions.setdefault(selector, 0)
|
||||
|
||||
self._closed_streams = {tuple(selector) for selector in state.closed_streams}
|
||||
|
||||
self._waiting_sessions = deque(
|
||||
self._session_from_state(session_state) for session_state in state.waiting_sessions
|
||||
)
|
||||
self._response_sessions = {
|
||||
session_state.node_id: self._session_from_state(session_state)
|
||||
for session_state in state.pending_sessions
|
||||
}
|
||||
self._active_session = self._session_from_state(state.active_session) if state.active_session else None
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ class Path:
|
|||
Note: This is an internal class not exposed in the public API.
|
||||
"""
|
||||
|
||||
edges: list[EdgeID] = field(default_factory=list)
|
||||
edges: list[EdgeID] = field(default_factory=list[EdgeID])
|
||||
|
||||
def contains_edge(self, edge_id: EdgeID) -> bool:
|
||||
"""Check if this path contains the given edge."""
|
||||
|
|
|
|||
|
|
@ -1,12 +0,0 @@
|
|||
"""
|
||||
State management subsystem for graph engine.
|
||||
|
||||
This package manages node states, edge states, and execution tracking
|
||||
during workflow graph execution.
|
||||
"""
|
||||
|
||||
from .unified_state_manager import UnifiedStateManager
|
||||
|
||||
__all__ = [
|
||||
"UnifiedStateManager",
|
||||
]
|
||||
|
|
@ -22,6 +22,8 @@ from core.workflow.graph_events import GraphNodeEventBase, NodeRunFailedEvent
|
|||
from core.workflow.nodes.base.node import Node
|
||||
from libs.flask_utils import preserve_flask_contexts
|
||||
|
||||
from .ready_queue import ReadyQueue
|
||||
|
||||
|
||||
@final
|
||||
class Worker(threading.Thread):
|
||||
|
|
@ -35,7 +37,7 @@ class Worker(threading.Thread):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
ready_queue: queue.Queue[str],
|
||||
ready_queue: ReadyQueue,
|
||||
event_queue: queue.Queue[GraphNodeEventBase],
|
||||
graph: Graph,
|
||||
worker_id: int = 0,
|
||||
|
|
@ -46,7 +48,7 @@ class Worker(threading.Thread):
|
|||
Initialize worker thread.
|
||||
|
||||
Args:
|
||||
ready_queue: Queue containing node IDs ready for execution
|
||||
ready_queue: Ready queue containing node IDs ready for execution
|
||||
event_queue: Queue for pushing execution events
|
||||
graph: Graph containing nodes to execute
|
||||
worker_id: Unique identifier for this worker
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ from configs import dify_config
|
|||
from core.workflow.graph import Graph
|
||||
from core.workflow.graph_events import GraphNodeEventBase
|
||||
|
||||
from ..ready_queue import ReadyQueue
|
||||
from ..worker import Worker
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
|
@ -35,7 +36,7 @@ class WorkerPool:
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
ready_queue: queue.Queue[str],
|
||||
ready_queue: ReadyQueue,
|
||||
event_queue: queue.Queue[GraphNodeEventBase],
|
||||
graph: Graph,
|
||||
flask_app: "Flask | None" = None,
|
||||
|
|
@ -49,7 +50,7 @@ class WorkerPool:
|
|||
Initialize the simple worker pool.
|
||||
|
||||
Args:
|
||||
ready_queue: Queue of nodes ready for execution
|
||||
ready_queue: Ready queue for nodes ready for execution
|
||||
event_queue: Queue for worker events
|
||||
graph: The workflow graph
|
||||
flask_app: Optional Flask app for context preservation
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from collections.abc import Mapping
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -14,4 +14,4 @@ class NodeRunAgentLogEvent(GraphAgentNodeEventBase):
|
|||
error: str | None = Field(..., description="error")
|
||||
status: str = Field(..., description="status")
|
||||
data: Mapping[str, Any] = Field(..., description="data")
|
||||
metadata: Optional[Mapping[str, Any]] = Field(default=None, description="metadata")
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
from core.workflow.enums import NodeType
|
||||
|
|
@ -19,9 +17,9 @@ class GraphNodeEventBase(GraphEngineEvent):
|
|||
node_id: str
|
||||
node_type: NodeType
|
||||
|
||||
in_iteration_id: Optional[str] = None
|
||||
in_iteration_id: str | None = None
|
||||
"""iteration id if node is in iteration"""
|
||||
in_loop_id: Optional[str] = None
|
||||
in_loop_id: str | None = None
|
||||
"""loop id if node is in loop"""
|
||||
|
||||
# The version of the node, or "1" if not specified.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,3 @@
|
|||
from typing import Any, Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
from core.workflow.graph_events import BaseGraphEvent
|
||||
|
|
@ -10,7 +8,7 @@ class GraphRunStartedEvent(BaseGraphEvent):
|
|||
|
||||
|
||||
class GraphRunSucceededEvent(BaseGraphEvent):
|
||||
outputs: Optional[dict[str, Any]] = None
|
||||
outputs: dict[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class GraphRunFailedEvent(BaseGraphEvent):
|
||||
|
|
@ -20,11 +18,11 @@ class GraphRunFailedEvent(BaseGraphEvent):
|
|||
|
||||
class GraphRunPartialSucceededEvent(BaseGraphEvent):
|
||||
exceptions_count: int = Field(..., description="exception count")
|
||||
outputs: Optional[dict[str, Any]] = None
|
||||
outputs: dict[str, object] = Field(default_factory=dict)
|
||||
|
||||
|
||||
class GraphRunAbortedEvent(BaseGraphEvent):
|
||||
"""Event emitted when a graph run is aborted by user command."""
|
||||
|
||||
reason: Optional[str] = Field(default=None, description="reason for abort")
|
||||
outputs: Optional[dict[str, Any]] = Field(default=None, description="partial outputs if any")
|
||||
reason: str | None = Field(default=None, description="reason for abort")
|
||||
outputs: dict[str, object] = Field(default_factory=dict, description="partial outputs if any")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -10,31 +10,31 @@ from .base import GraphNodeEventBase
|
|||
class NodeRunIterationStartedEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
predecessor_node_id: Optional[str] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
predecessor_node_id: str | None = None
|
||||
|
||||
|
||||
class NodeRunIterationNextEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
index: int = Field(..., description="index")
|
||||
pre_iteration_output: Optional[Any] = None
|
||||
pre_iteration_output: Any = None
|
||||
|
||||
|
||||
class NodeRunIterationSucceededEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
|
||||
|
||||
class NodeRunIterationFailedEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
error: str = Field(..., description="failed reason")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -10,31 +10,31 @@ from .base import GraphNodeEventBase
|
|||
class NodeRunLoopStartedEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
predecessor_node_id: Optional[str] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
predecessor_node_id: str | None = None
|
||||
|
||||
|
||||
class NodeRunLoopNextEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
index: int = Field(..., description="index")
|
||||
pre_loop_output: Optional[Any] = None
|
||||
pre_loop_output: Any = None
|
||||
|
||||
|
||||
class NodeRunLoopSucceededEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
|
||||
|
||||
class NodeRunLoopFailedEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
error: str = Field(..., description="failed reason")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,5 @@
|
|||
from collections.abc import Sequence
|
||||
from datetime import datetime
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -12,9 +11,8 @@ from .base import GraphNodeEventBase
|
|||
|
||||
class NodeRunStartedEvent(GraphNodeEventBase):
|
||||
node_title: str
|
||||
predecessor_node_id: Optional[str] = None
|
||||
parallel_mode_run_id: Optional[str] = None
|
||||
agent_strategy: Optional[AgentNodeStrategyInit] = None
|
||||
predecessor_node_id: str | None = None
|
||||
agent_strategy: AgentNodeStrategyInit | None = None
|
||||
start_at: datetime = Field(..., description="node start time")
|
||||
|
||||
# FIXME(-LAN-): only for ToolNode
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from collections.abc import Mapping
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -14,5 +14,5 @@ class AgentLogEvent(NodeEventBase):
|
|||
error: str | None = Field(..., description="error")
|
||||
status: str = Field(..., description="status")
|
||||
data: Mapping[str, Any] = Field(..., description="data")
|
||||
metadata: Optional[Mapping[str, Any]] = Field(default=None, description="metadata")
|
||||
metadata: Mapping[str, Any] = Field(default_factory=dict, description="metadata")
|
||||
node_id: str = Field(..., description="node id")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -9,28 +9,28 @@ from .base import NodeEventBase
|
|||
|
||||
class IterationStartedEvent(NodeEventBase):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
predecessor_node_id: Optional[str] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
predecessor_node_id: str | None = None
|
||||
|
||||
|
||||
class IterationNextEvent(NodeEventBase):
|
||||
index: int = Field(..., description="index")
|
||||
pre_iteration_output: Optional[Any] = None
|
||||
pre_iteration_output: Any = None
|
||||
|
||||
|
||||
class IterationSucceededEvent(NodeEventBase):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
|
||||
|
||||
class IterationFailedEvent(NodeEventBase):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
error: str = Field(..., description="failed reason")
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from pydantic import Field
|
||||
|
||||
|
|
@ -9,28 +9,28 @@ from .base import NodeEventBase
|
|||
|
||||
class LoopStartedEvent(NodeEventBase):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
predecessor_node_id: Optional[str] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
predecessor_node_id: str | None = None
|
||||
|
||||
|
||||
class LoopNextEvent(NodeEventBase):
|
||||
index: int = Field(..., description="index")
|
||||
pre_loop_output: Optional[Any] = None
|
||||
pre_loop_output: Any = None
|
||||
|
||||
|
||||
class LoopSucceededEvent(NodeEventBase):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
|
||||
|
||||
class LoopFailedEvent(NodeEventBase):
|
||||
start_at: datetime = Field(..., description="start at")
|
||||
inputs: Optional[Mapping[str, Any]] = None
|
||||
outputs: Optional[Mapping[str, Any]] = None
|
||||
metadata: Optional[Mapping[str, Any]] = None
|
||||
inputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
outputs: Mapping[str, object] = Field(default_factory=dict)
|
||||
metadata: Mapping[str, object] = Field(default_factory=dict)
|
||||
steps: int = 0
|
||||
error: str = Field(..., description="failed reason")
|
||||
|
|
|
|||
|
|
@ -33,7 +33,13 @@ from core.workflow.enums import (
|
|||
WorkflowNodeExecutionMetadataKey,
|
||||
WorkflowNodeExecutionStatus,
|
||||
)
|
||||
from core.workflow.node_events import AgentLogEvent, NodeRunResult, StreamChunkEvent, StreamCompletedEvent
|
||||
from core.workflow.node_events import (
|
||||
AgentLogEvent,
|
||||
NodeEventBase,
|
||||
NodeRunResult,
|
||||
StreamChunkEvent,
|
||||
StreamCompletedEvent,
|
||||
)
|
||||
from core.workflow.nodes.agent.entities import AgentNodeData, AgentOldVersionModelFeatures, ParamsAutoGenerated
|
||||
from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig
|
||||
from core.workflow.nodes.base.node import Node
|
||||
|
|
@ -93,7 +99,7 @@ class AgentNode(Node):
|
|||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
||||
def _run(self) -> Generator:
|
||||
def _run(self) -> Generator[NodeEventBase, None, None]:
|
||||
from core.plugin.impl.exc import PluginDaemonClientSideError
|
||||
|
||||
try:
|
||||
|
|
@ -482,7 +488,7 @@ class AgentNode(Node):
|
|||
node_type: NodeType,
|
||||
node_id: str,
|
||||
node_execution_id: str,
|
||||
) -> Generator:
|
||||
) -> Generator[NodeEventBase, None, None]:
|
||||
"""
|
||||
Convert ToolInvokeMessages into tuple[plain_text, files]
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,12 +1,13 @@
|
|||
import logging
|
||||
from abc import abstractmethod
|
||||
from collections.abc import Callable, Generator, Mapping, Sequence
|
||||
from typing import TYPE_CHECKING, Any, ClassVar, Optional
|
||||
from collections.abc import Generator, Mapping, Sequence
|
||||
from functools import singledispatchmethod
|
||||
from typing import Any, ClassVar
|
||||
from uuid import uuid4
|
||||
|
||||
from core.app.entities.app_invoke_entities import InvokeFrom
|
||||
from core.workflow.entities import AgentNodeStrategyInit
|
||||
from core.workflow.enums import NodeExecutionType, NodeState, NodeType, WorkflowNodeExecutionStatus
|
||||
from core.workflow.entities import AgentNodeStrategyInit, GraphInitParams, GraphRuntimeState
|
||||
from core.workflow.enums import ErrorStrategy, NodeExecutionType, NodeState, NodeType, WorkflowNodeExecutionStatus
|
||||
from core.workflow.graph_events import (
|
||||
GraphNodeEventBase,
|
||||
NodeRunAgentLogEvent,
|
||||
|
|
@ -45,11 +46,6 @@ from models.enums import UserFrom
|
|||
|
||||
from .entities import BaseNodeData, RetryConfig
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from core.workflow.entities import GraphInitParams, GraphRuntimeState
|
||||
from core.workflow.enums import ErrorStrategy, NodeType
|
||||
from core.workflow.node_events import NodeRunResult
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
|
|
@ -88,14 +84,14 @@ class Node:
|
|||
def init_node_data(self, data: Mapping[str, Any]) -> None: ...
|
||||
|
||||
@abstractmethod
|
||||
def _run(self) -> "NodeRunResult | Generator[GraphNodeEventBase, None, None]":
|
||||
def _run(self) -> NodeRunResult | Generator[NodeEventBase, None, None]:
|
||||
"""
|
||||
Run node
|
||||
:return:
|
||||
"""
|
||||
raise NotImplementedError
|
||||
|
||||
def run(self) -> "Generator[GraphNodeEventBase, None, None]":
|
||||
def run(self) -> Generator[GraphNodeEventBase, None, None]:
|
||||
# Generate a single node execution ID to use for all events
|
||||
if not self._node_execution_id:
|
||||
self._node_execution_id = str(uuid4())
|
||||
|
|
@ -151,12 +147,14 @@ class Node:
|
|||
|
||||
# Handle event stream
|
||||
for event in result:
|
||||
if isinstance(event, NodeEventBase):
|
||||
event = self._convert_node_event_to_graph_node_event(event)
|
||||
|
||||
if not event.in_iteration_id and not event.in_loop_id:
|
||||
# NOTE: this is necessary because iteration and loop nodes yield GraphNodeEventBase
|
||||
if isinstance(event, NodeEventBase): # pyright: ignore[reportUnnecessaryIsInstance]
|
||||
yield self._dispatch(event)
|
||||
elif isinstance(event, GraphNodeEventBase) and not event.in_iteration_id and not event.in_loop_id: # pyright: ignore[reportUnnecessaryIsInstance]
|
||||
event.id = self._node_execution_id
|
||||
yield event
|
||||
yield event
|
||||
else:
|
||||
yield event
|
||||
except Exception as e:
|
||||
logger.exception("Node %s failed to run", self._node_id)
|
||||
result = NodeRunResult(
|
||||
|
|
@ -249,7 +247,7 @@ class Node:
|
|||
return False
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: Optional[dict] = None) -> dict:
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
return {}
|
||||
|
||||
@classmethod
|
||||
|
|
@ -270,7 +268,7 @@ class Node:
|
|||
# to BaseNodeData properties in a type-safe way
|
||||
|
||||
@abstractmethod
|
||||
def _get_error_strategy(self) -> Optional["ErrorStrategy"]:
|
||||
def _get_error_strategy(self) -> ErrorStrategy | None:
|
||||
"""Get the error strategy for this node."""
|
||||
...
|
||||
|
||||
|
|
@ -301,7 +299,7 @@ class Node:
|
|||
|
||||
# Public interface properties that delegate to abstract methods
|
||||
@property
|
||||
def error_strategy(self) -> Optional["ErrorStrategy"]:
|
||||
def error_strategy(self) -> ErrorStrategy | None:
|
||||
"""Get the error strategy for this node."""
|
||||
return self._get_error_strategy()
|
||||
|
||||
|
|
@ -344,29 +342,15 @@ class Node:
|
|||
start_at=self._start_at,
|
||||
node_run_result=result,
|
||||
)
|
||||
raise Exception(f"result status {result.status} not supported")
|
||||
case _:
|
||||
raise Exception(f"result status {result.status} not supported")
|
||||
|
||||
def _convert_node_event_to_graph_node_event(self, event: NodeEventBase) -> GraphNodeEventBase:
|
||||
handler_maps: dict[type[NodeEventBase], Callable[[Any], GraphNodeEventBase]] = {
|
||||
StreamChunkEvent: self._handle_stream_chunk_event,
|
||||
StreamCompletedEvent: self._handle_stream_completed_event,
|
||||
AgentLogEvent: self._handle_agent_log_event,
|
||||
LoopStartedEvent: self._handle_loop_started_event,
|
||||
LoopNextEvent: self._handle_loop_next_event,
|
||||
LoopSucceededEvent: self._handle_loop_succeeded_event,
|
||||
LoopFailedEvent: self._handle_loop_failed_event,
|
||||
IterationStartedEvent: self._handle_iteration_started_event,
|
||||
IterationNextEvent: self._handle_iteration_next_event,
|
||||
IterationSucceededEvent: self._handle_iteration_succeeded_event,
|
||||
IterationFailedEvent: self._handle_iteration_failed_event,
|
||||
RunRetrieverResourceEvent: self._handle_run_retriever_resource_event,
|
||||
}
|
||||
handler = handler_maps.get(type(event))
|
||||
if not handler:
|
||||
raise NotImplementedError(f"Node {self._node_id} does not support event type {type(event)}")
|
||||
return handler(event)
|
||||
@singledispatchmethod
|
||||
def _dispatch(self, event: NodeEventBase) -> GraphNodeEventBase:
|
||||
raise NotImplementedError(f"Node {self._node_id} does not support event type {type(event)}")
|
||||
|
||||
def _handle_stream_chunk_event(self, event: StreamChunkEvent) -> NodeRunStreamChunkEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: StreamChunkEvent) -> NodeRunStreamChunkEvent:
|
||||
return NodeRunStreamChunkEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -376,7 +360,8 @@ class Node:
|
|||
is_final=event.is_final,
|
||||
)
|
||||
|
||||
def _handle_stream_completed_event(self, event: StreamCompletedEvent) -> NodeRunSucceededEvent | NodeRunFailedEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: StreamCompletedEvent) -> NodeRunSucceededEvent | NodeRunFailedEvent:
|
||||
match event.node_run_result.status:
|
||||
case WorkflowNodeExecutionStatus.SUCCEEDED:
|
||||
return NodeRunSucceededEvent(
|
||||
|
|
@ -395,9 +380,13 @@ class Node:
|
|||
node_run_result=event.node_run_result,
|
||||
error=event.node_run_result.error,
|
||||
)
|
||||
raise NotImplementedError(f"Node {self._node_id} does not support status {event.node_run_result.status}")
|
||||
case _:
|
||||
raise NotImplementedError(
|
||||
f"Node {self._node_id} does not support status {event.node_run_result.status}"
|
||||
)
|
||||
|
||||
def _handle_agent_log_event(self, event: AgentLogEvent) -> NodeRunAgentLogEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: AgentLogEvent) -> NodeRunAgentLogEvent:
|
||||
return NodeRunAgentLogEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -412,7 +401,8 @@ class Node:
|
|||
metadata=event.metadata,
|
||||
)
|
||||
|
||||
def _handle_loop_started_event(self, event: LoopStartedEvent) -> NodeRunLoopStartedEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: LoopStartedEvent) -> NodeRunLoopStartedEvent:
|
||||
return NodeRunLoopStartedEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -424,7 +414,8 @@ class Node:
|
|||
predecessor_node_id=event.predecessor_node_id,
|
||||
)
|
||||
|
||||
def _handle_loop_next_event(self, event: LoopNextEvent) -> NodeRunLoopNextEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: LoopNextEvent) -> NodeRunLoopNextEvent:
|
||||
return NodeRunLoopNextEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -434,7 +425,8 @@ class Node:
|
|||
pre_loop_output=event.pre_loop_output,
|
||||
)
|
||||
|
||||
def _handle_loop_succeeded_event(self, event: LoopSucceededEvent) -> NodeRunLoopSucceededEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: LoopSucceededEvent) -> NodeRunLoopSucceededEvent:
|
||||
return NodeRunLoopSucceededEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -447,7 +439,8 @@ class Node:
|
|||
steps=event.steps,
|
||||
)
|
||||
|
||||
def _handle_loop_failed_event(self, event: LoopFailedEvent) -> NodeRunLoopFailedEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: LoopFailedEvent) -> NodeRunLoopFailedEvent:
|
||||
return NodeRunLoopFailedEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -461,7 +454,8 @@ class Node:
|
|||
error=event.error,
|
||||
)
|
||||
|
||||
def _handle_iteration_started_event(self, event: IterationStartedEvent) -> NodeRunIterationStartedEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: IterationStartedEvent) -> NodeRunIterationStartedEvent:
|
||||
return NodeRunIterationStartedEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -473,7 +467,8 @@ class Node:
|
|||
predecessor_node_id=event.predecessor_node_id,
|
||||
)
|
||||
|
||||
def _handle_iteration_next_event(self, event: IterationNextEvent) -> NodeRunIterationNextEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: IterationNextEvent) -> NodeRunIterationNextEvent:
|
||||
return NodeRunIterationNextEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -483,7 +478,8 @@ class Node:
|
|||
pre_iteration_output=event.pre_iteration_output,
|
||||
)
|
||||
|
||||
def _handle_iteration_succeeded_event(self, event: IterationSucceededEvent) -> NodeRunIterationSucceededEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: IterationSucceededEvent) -> NodeRunIterationSucceededEvent:
|
||||
return NodeRunIterationSucceededEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -496,7 +492,8 @@ class Node:
|
|||
steps=event.steps,
|
||||
)
|
||||
|
||||
def _handle_iteration_failed_event(self, event: IterationFailedEvent) -> NodeRunIterationFailedEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: IterationFailedEvent) -> NodeRunIterationFailedEvent:
|
||||
return NodeRunIterationFailedEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
@ -510,7 +507,8 @@ class Node:
|
|||
error=event.error,
|
||||
)
|
||||
|
||||
def _handle_run_retriever_resource_event(self, event: RunRetrieverResourceEvent) -> NodeRunRetrieverResourceEvent:
|
||||
@_dispatch.register
|
||||
def _(self, event: RunRetrieverResourceEvent) -> NodeRunRetrieverResourceEvent:
|
||||
return NodeRunRetrieverResourceEvent(
|
||||
id=self._node_execution_id,
|
||||
node_id=self._node_id,
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
from collections.abc import Mapping, Sequence
|
||||
from decimal import Decimal
|
||||
from typing import Any
|
||||
from typing import Any, cast
|
||||
|
||||
from configs import dify_config
|
||||
from core.helper.code_executor.code_executor import CodeExecutionError, CodeExecutor, CodeLanguage
|
||||
|
|
@ -49,7 +49,7 @@ class CodeNode(Node):
|
|||
return self._node_data
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
"""
|
||||
Get default config of node.
|
||||
:param filters: filter by node config parameters.
|
||||
|
|
@ -57,7 +57,7 @@ class CodeNode(Node):
|
|||
"""
|
||||
code_language = CodeLanguage.PYTHON3
|
||||
if filters:
|
||||
code_language = filters.get("code_language", CodeLanguage.PYTHON3)
|
||||
code_language = cast(CodeLanguage, filters.get("code_language", CodeLanguage.PYTHON3))
|
||||
|
||||
providers: list[type[CodeNodeProvider]] = [Python3CodeProvider, JavascriptCodeProvider]
|
||||
code_provider: type[CodeNodeProvider] = next(p for p in providers if p.is_accept_language(code_language))
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Annotated, Literal
|
||||
from typing import Annotated, Literal, Self
|
||||
|
||||
from pydantic import AfterValidator, BaseModel
|
||||
|
||||
|
|
@ -34,7 +34,7 @@ class CodeNodeData(BaseNodeData):
|
|||
|
||||
class Output(BaseModel):
|
||||
type: Annotated[SegmentType, AfterValidator(_validate_type)]
|
||||
children: dict[str, "CodeNodeData.Output"] | None = None
|
||||
children: dict[str, Self] | None = None
|
||||
|
||||
class Dependency(BaseModel):
|
||||
name: str
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ class HttpRequestNode(Node):
|
|||
return self._node_data
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict[str, Any] | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
return {
|
||||
"type": "http-request",
|
||||
"config": {
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class IterationState(BaseIterationState):
|
|||
"""
|
||||
|
||||
outputs: list[Any] = Field(default_factory=list)
|
||||
current_output: Any | None = None
|
||||
current_output: Any = None
|
||||
|
||||
class MetaData(BaseIterationState.MetaData):
|
||||
"""
|
||||
|
|
@ -48,7 +48,7 @@ class IterationState(BaseIterationState):
|
|||
|
||||
iterator_length: int
|
||||
|
||||
def get_last_output(self) -> Any | None:
|
||||
def get_last_output(self) -> Any:
|
||||
"""
|
||||
Get last output.
|
||||
"""
|
||||
|
|
@ -56,7 +56,7 @@ class IterationState(BaseIterationState):
|
|||
return self.outputs[-1]
|
||||
return None
|
||||
|
||||
def get_current_output(self) -> Any | None:
|
||||
def get_current_output(self) -> Any:
|
||||
"""
|
||||
Get current output.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
import logging
|
||||
from collections.abc import Generator, Mapping, Sequence
|
||||
from concurrent.futures import Future, ThreadPoolExecutor, as_completed
|
||||
from datetime import UTC, datetime
|
||||
from typing import TYPE_CHECKING, Any, Union, cast
|
||||
from typing import TYPE_CHECKING, Any, NewType, cast
|
||||
|
||||
from typing_extensions import TypeIs
|
||||
|
||||
from core.variables import IntegerVariable, NoneSegment
|
||||
from core.variables.segments import ArrayAnySegment, ArraySegment
|
||||
|
|
@ -23,6 +26,7 @@ from core.workflow.node_events import (
|
|||
IterationNextEvent,
|
||||
IterationStartedEvent,
|
||||
IterationSucceededEvent,
|
||||
NodeEventBase,
|
||||
NodeRunResult,
|
||||
StreamCompletedEvent,
|
||||
)
|
||||
|
|
@ -45,6 +49,8 @@ if TYPE_CHECKING:
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
EmptyArraySegment = NewType("EmptyArraySegment", ArraySegment)
|
||||
|
||||
|
||||
class IterationNode(Node):
|
||||
"""
|
||||
|
|
@ -77,7 +83,7 @@ class IterationNode(Node):
|
|||
return self._node_data
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
return {
|
||||
"type": "iteration",
|
||||
"config": {
|
||||
|
|
@ -91,44 +97,21 @@ class IterationNode(Node):
|
|||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
||||
def _run(self) -> Generator:
|
||||
variable = self.graph_runtime_state.variable_pool.get(self._node_data.iterator_selector)
|
||||
def _run(self) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]: # type: ignore
|
||||
variable = self._get_iterator_variable()
|
||||
|
||||
if not variable:
|
||||
raise IteratorVariableNotFoundError(f"iterator variable {self._node_data.iterator_selector} not found")
|
||||
|
||||
if not isinstance(variable, ArraySegment) and not isinstance(variable, NoneSegment):
|
||||
raise InvalidIteratorValueError(f"invalid iterator value: {variable}, please provide a list.")
|
||||
|
||||
if isinstance(variable, NoneSegment) or len(variable.value) == 0:
|
||||
# Try our best to preserve the type informat.
|
||||
if isinstance(variable, ArraySegment):
|
||||
output = variable.model_copy(update={"value": []})
|
||||
else:
|
||||
output = ArrayAnySegment(value=[])
|
||||
yield StreamCompletedEvent(
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
# TODO(QuantumGhost): is it possible to compute the type of `output`
|
||||
# from graph definition?
|
||||
outputs={"output": output},
|
||||
)
|
||||
)
|
||||
if self._is_empty_iteration(variable):
|
||||
yield from self._handle_empty_iteration(variable)
|
||||
return
|
||||
|
||||
iterator_list_value = variable.to_object()
|
||||
|
||||
if not isinstance(iterator_list_value, list):
|
||||
raise InvalidIteratorValueError(f"Invalid iterator value: {iterator_list_value}, please provide a list.")
|
||||
|
||||
iterator_list_value = self._validate_and_get_iterator_list(variable)
|
||||
inputs = {"iterator_selector": iterator_list_value}
|
||||
|
||||
if not self._node_data.start_node_id:
|
||||
raise StartNodeIdNotFoundError(f"field start_node_id in iteration {self._node_id} not found")
|
||||
self._validate_start_node()
|
||||
|
||||
started_at = naive_utc_now()
|
||||
iter_run_map: dict[str, float] = {}
|
||||
outputs: list[Any] = []
|
||||
outputs: list[object] = []
|
||||
|
||||
yield IterationStartedEvent(
|
||||
start_at=started_at,
|
||||
|
|
@ -137,6 +120,86 @@ class IterationNode(Node):
|
|||
)
|
||||
|
||||
try:
|
||||
yield from self._execute_iterations(
|
||||
iterator_list_value=iterator_list_value,
|
||||
outputs=outputs,
|
||||
iter_run_map=iter_run_map,
|
||||
)
|
||||
|
||||
yield from self._handle_iteration_success(
|
||||
started_at=started_at,
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
iterator_list_value=iterator_list_value,
|
||||
iter_run_map=iter_run_map,
|
||||
)
|
||||
except IterationNodeError as e:
|
||||
yield from self._handle_iteration_failure(
|
||||
started_at=started_at,
|
||||
inputs=inputs,
|
||||
outputs=outputs,
|
||||
iterator_list_value=iterator_list_value,
|
||||
iter_run_map=iter_run_map,
|
||||
error=e,
|
||||
)
|
||||
|
||||
def _get_iterator_variable(self) -> ArraySegment | NoneSegment:
|
||||
variable = self.graph_runtime_state.variable_pool.get(self._node_data.iterator_selector)
|
||||
|
||||
if not variable:
|
||||
raise IteratorVariableNotFoundError(f"iterator variable {self._node_data.iterator_selector} not found")
|
||||
|
||||
if not isinstance(variable, ArraySegment) and not isinstance(variable, NoneSegment):
|
||||
raise InvalidIteratorValueError(f"invalid iterator value: {variable}, please provide a list.")
|
||||
|
||||
return variable
|
||||
|
||||
def _is_empty_iteration(self, variable: ArraySegment | NoneSegment) -> TypeIs[NoneSegment | EmptyArraySegment]:
|
||||
return isinstance(variable, NoneSegment) or len(variable.value) == 0
|
||||
|
||||
def _handle_empty_iteration(self, variable: ArraySegment | NoneSegment) -> Generator[NodeEventBase, None, None]:
|
||||
# Try our best to preserve the type information.
|
||||
if isinstance(variable, ArraySegment):
|
||||
output = variable.model_copy(update={"value": []})
|
||||
else:
|
||||
output = ArrayAnySegment(value=[])
|
||||
|
||||
yield StreamCompletedEvent(
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
# TODO(QuantumGhost): is it possible to compute the type of `output`
|
||||
# from graph definition?
|
||||
outputs={"output": output},
|
||||
)
|
||||
)
|
||||
|
||||
def _validate_and_get_iterator_list(self, variable: ArraySegment) -> Sequence[object]:
|
||||
iterator_list_value = variable.to_object()
|
||||
|
||||
if not isinstance(iterator_list_value, list):
|
||||
raise InvalidIteratorValueError(f"Invalid iterator value: {iterator_list_value}, please provide a list.")
|
||||
|
||||
return cast(list[object], iterator_list_value)
|
||||
|
||||
def _validate_start_node(self) -> None:
|
||||
if not self._node_data.start_node_id:
|
||||
raise StartNodeIdNotFoundError(f"field start_node_id in iteration {self._node_id} not found")
|
||||
|
||||
def _execute_iterations(
|
||||
self,
|
||||
iterator_list_value: Sequence[object],
|
||||
outputs: list[object],
|
||||
iter_run_map: dict[str, float],
|
||||
) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]:
|
||||
if self._node_data.is_parallel:
|
||||
# Parallel mode execution
|
||||
yield from self._execute_parallel_iterations(
|
||||
iterator_list_value=iterator_list_value,
|
||||
outputs=outputs,
|
||||
iter_run_map=iter_run_map,
|
||||
)
|
||||
else:
|
||||
# Sequential mode execution
|
||||
for index, item in enumerate(iterator_list_value):
|
||||
iter_start_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
yield IterationNextEvent(index=index)
|
||||
|
|
@ -154,45 +217,146 @@ class IterationNode(Node):
|
|||
self.graph_runtime_state.total_tokens += graph_engine.graph_runtime_state.total_tokens
|
||||
iter_run_map[str(index)] = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds()
|
||||
|
||||
yield IterationSucceededEvent(
|
||||
start_at=started_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens,
|
||||
WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map,
|
||||
},
|
||||
)
|
||||
def _execute_parallel_iterations(
|
||||
self,
|
||||
iterator_list_value: Sequence[object],
|
||||
outputs: list[object],
|
||||
iter_run_map: dict[str, float],
|
||||
) -> Generator[GraphNodeEventBase | NodeEventBase, None, None]:
|
||||
# Initialize outputs list with None values to maintain order
|
||||
outputs.extend([None] * len(iterator_list_value))
|
||||
|
||||
# Yield final success event
|
||||
yield StreamCompletedEvent(
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
outputs={"output": outputs},
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens,
|
||||
},
|
||||
# Determine the number of parallel workers
|
||||
max_workers = min(self._node_data.parallel_nums, len(iterator_list_value))
|
||||
|
||||
with ThreadPoolExecutor(max_workers=max_workers) as executor:
|
||||
# Submit all iteration tasks
|
||||
future_to_index: dict[Future[tuple[datetime, list[GraphNodeEventBase], object | None, int]], int] = {}
|
||||
for index, item in enumerate(iterator_list_value):
|
||||
yield IterationNextEvent(index=index)
|
||||
future = executor.submit(
|
||||
self._execute_single_iteration_parallel,
|
||||
index=index,
|
||||
item=item,
|
||||
)
|
||||
)
|
||||
except IterationNodeError as e:
|
||||
yield IterationFailedEvent(
|
||||
start_at=started_at,
|
||||
inputs=inputs,
|
||||
future_to_index[future] = index
|
||||
|
||||
# Process completed iterations as they finish
|
||||
for future in as_completed(future_to_index):
|
||||
index = future_to_index[future]
|
||||
try:
|
||||
result = future.result()
|
||||
iter_start_at, events, output_value, tokens_used = result
|
||||
|
||||
# Update outputs at the correct index
|
||||
outputs[index] = output_value
|
||||
|
||||
# Yield all events from this iteration
|
||||
yield from events
|
||||
|
||||
# Update tokens and timing
|
||||
self.graph_runtime_state.total_tokens += tokens_used
|
||||
iter_run_map[str(index)] = (datetime.now(UTC).replace(tzinfo=None) - iter_start_at).total_seconds()
|
||||
|
||||
except Exception as e:
|
||||
# Handle errors based on error_handle_mode
|
||||
match self._node_data.error_handle_mode:
|
||||
case ErrorHandleMode.TERMINATED:
|
||||
# Cancel remaining futures and re-raise
|
||||
for f in future_to_index:
|
||||
if f != future:
|
||||
f.cancel()
|
||||
raise IterationNodeError(str(e))
|
||||
case ErrorHandleMode.CONTINUE_ON_ERROR:
|
||||
outputs[index] = None
|
||||
case ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
|
||||
outputs[index] = None # Will be filtered later
|
||||
|
||||
# Remove None values if in REMOVE_ABNORMAL_OUTPUT mode
|
||||
if self._node_data.error_handle_mode == ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
|
||||
outputs[:] = [output for output in outputs if output is not None]
|
||||
|
||||
def _execute_single_iteration_parallel(
|
||||
self,
|
||||
index: int,
|
||||
item: object,
|
||||
) -> tuple[datetime, list[GraphNodeEventBase], object | None, int]:
|
||||
"""Execute a single iteration in parallel mode and return results."""
|
||||
iter_start_at = datetime.now(UTC).replace(tzinfo=None)
|
||||
events: list[GraphNodeEventBase] = []
|
||||
outputs_temp: list[object] = []
|
||||
|
||||
graph_engine = self._create_graph_engine(index, item)
|
||||
|
||||
# Collect events instead of yielding them directly
|
||||
for event in self._run_single_iter(
|
||||
variable_pool=graph_engine.graph_runtime_state.variable_pool,
|
||||
outputs=outputs_temp,
|
||||
graph_engine=graph_engine,
|
||||
):
|
||||
events.append(event)
|
||||
|
||||
# Get the output value from the temporary outputs list
|
||||
output_value = outputs_temp[0] if outputs_temp else None
|
||||
|
||||
return iter_start_at, events, output_value, graph_engine.graph_runtime_state.total_tokens
|
||||
|
||||
def _handle_iteration_success(
|
||||
self,
|
||||
started_at: datetime,
|
||||
inputs: dict[str, Sequence[object]],
|
||||
outputs: list[object],
|
||||
iterator_list_value: Sequence[object],
|
||||
iter_run_map: dict[str, float],
|
||||
) -> Generator[NodeEventBase, None, None]:
|
||||
yield IterationSucceededEvent(
|
||||
start_at=started_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens,
|
||||
WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map,
|
||||
},
|
||||
)
|
||||
|
||||
# Yield final success event
|
||||
yield StreamCompletedEvent(
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.SUCCEEDED,
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens,
|
||||
WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map,
|
||||
},
|
||||
error=str(e),
|
||||
)
|
||||
yield StreamCompletedEvent(
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
error=str(e),
|
||||
)
|
||||
)
|
||||
|
||||
def _handle_iteration_failure(
|
||||
self,
|
||||
started_at: datetime,
|
||||
inputs: dict[str, Sequence[object]],
|
||||
outputs: list[object],
|
||||
iterator_list_value: Sequence[object],
|
||||
iter_run_map: dict[str, float],
|
||||
error: IterationNodeError,
|
||||
) -> Generator[NodeEventBase, None, None]:
|
||||
yield IterationFailedEvent(
|
||||
start_at=started_at,
|
||||
inputs=inputs,
|
||||
outputs={"output": outputs},
|
||||
steps=len(iterator_list_value),
|
||||
metadata={
|
||||
WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: self.graph_runtime_state.total_tokens,
|
||||
WorkflowNodeExecutionMetadataKey.ITERATION_DURATION_MAP: iter_run_map,
|
||||
},
|
||||
error=str(error),
|
||||
)
|
||||
yield StreamCompletedEvent(
|
||||
node_run_result=NodeRunResult(
|
||||
status=WorkflowNodeExecutionStatus.FAILED,
|
||||
error=str(error),
|
||||
)
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def _extract_variable_selector_to_variable_mapping(
|
||||
|
|
@ -305,9 +469,9 @@ class IterationNode(Node):
|
|||
self,
|
||||
*,
|
||||
variable_pool: VariablePool,
|
||||
outputs: list,
|
||||
outputs: list[object],
|
||||
graph_engine: "GraphEngine",
|
||||
) -> Generator[Union[GraphNodeEventBase, StreamCompletedEvent], None, None]:
|
||||
) -> Generator[GraphNodeEventBase, None, None]:
|
||||
rst = graph_engine.run()
|
||||
# get current iteration index
|
||||
index_variable = variable_pool.get([self._node_id, "index"])
|
||||
|
|
@ -338,7 +502,7 @@ class IterationNode(Node):
|
|||
case ErrorHandleMode.REMOVE_ABNORMAL_OUTPUT:
|
||||
return
|
||||
|
||||
def _create_graph_engine(self, index: int, item: Any):
|
||||
def _create_graph_engine(self, index: int, item: object):
|
||||
# Import dependencies
|
||||
from core.workflow.entities import GraphInitParams, GraphRuntimeState
|
||||
from core.workflow.graph import Graph
|
||||
|
|
@ -387,18 +551,9 @@ class IterationNode(Node):
|
|||
|
||||
# Create a new GraphEngine for this iteration
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id=self.tenant_id,
|
||||
app_id=self.app_id,
|
||||
workflow_id=self.workflow_id,
|
||||
user_id=self.user_id,
|
||||
user_from=self.user_from,
|
||||
invoke_from=self.invoke_from,
|
||||
call_depth=self.workflow_call_depth,
|
||||
graph=iteration_graph,
|
||||
graph_config=self.graph_config,
|
||||
graph_runtime_state=graph_runtime_state_copy,
|
||||
max_execution_steps=10000, # Use default or config value
|
||||
max_execution_time=600, # Use default or config value
|
||||
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import re
|
|||
import time
|
||||
from collections import defaultdict
|
||||
from collections.abc import Mapping, Sequence
|
||||
from typing import TYPE_CHECKING, Any, Optional, cast
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
|
||||
from sqlalchemy import Float, and_, func, or_, select, text
|
||||
from sqlalchemy import cast as sqlalchemy_cast
|
||||
|
|
@ -568,7 +568,7 @@ class KnowledgeRetrievalNode(Node):
|
|||
return automatic_metadata_filters
|
||||
|
||||
def _process_metadata_filter_func(
|
||||
self, sequence: int, condition: str, metadata_name: str, value: Optional[Any], filters: list[Any]
|
||||
self, sequence: int, condition: str, metadata_name: str, value: Any, filters: list[Any]
|
||||
) -> list[Any]:
|
||||
if value is None and condition not in ("empty", "not empty"):
|
||||
return filters
|
||||
|
|
|
|||
|
|
@ -959,7 +959,7 @@ class LLMNode(Node):
|
|||
return variable_mapping
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
return {
|
||||
"type": "llm",
|
||||
"config": {
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
from typing import Annotated, Any, Literal, Optional
|
||||
from typing import Annotated, Any, Literal
|
||||
|
||||
from pydantic import AfterValidator, BaseModel, Field, field_validator
|
||||
|
||||
|
|
@ -41,7 +41,7 @@ class LoopNodeData(BaseLoopNodeData):
|
|||
loop_count: int # Maximum number of loops
|
||||
break_conditions: list[Condition] # Conditions to break the loop
|
||||
logical_operator: Literal["and", "or"]
|
||||
loop_variables: Optional[list[LoopVariableData]] = Field(default_factory=list[LoopVariableData])
|
||||
loop_variables: list[LoopVariableData] | None = Field(default_factory=list[LoopVariableData])
|
||||
outputs: dict[str, Any] = Field(default_factory=dict)
|
||||
|
||||
@field_validator("outputs", mode="before")
|
||||
|
|
@ -74,7 +74,7 @@ class LoopState(BaseLoopState):
|
|||
"""
|
||||
|
||||
outputs: list[Any] = Field(default_factory=list)
|
||||
current_output: Any | None = None
|
||||
current_output: Any = None
|
||||
|
||||
class MetaData(BaseLoopState.MetaData):
|
||||
"""
|
||||
|
|
@ -83,7 +83,7 @@ class LoopState(BaseLoopState):
|
|||
|
||||
loop_length: int
|
||||
|
||||
def get_last_output(self) -> Any | None:
|
||||
def get_last_output(self) -> Any:
|
||||
"""
|
||||
Get last output.
|
||||
"""
|
||||
|
|
@ -91,7 +91,7 @@ class LoopState(BaseLoopState):
|
|||
return self.outputs[-1]
|
||||
return None
|
||||
|
||||
def get_current_output(self) -> Any | None:
|
||||
def get_current_output(self) -> Any:
|
||||
"""
|
||||
Get current output.
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ from collections.abc import Callable, Generator, Mapping, Sequence
|
|||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, Literal, cast
|
||||
|
||||
from configs import dify_config
|
||||
from core.variables import Segment, SegmentType
|
||||
from core.workflow.enums import (
|
||||
ErrorStrategy,
|
||||
|
|
@ -444,18 +443,9 @@ class LoopNode(Node):
|
|||
|
||||
# Create a new GraphEngine for this iteration
|
||||
graph_engine = GraphEngine(
|
||||
tenant_id=self.tenant_id,
|
||||
app_id=self.app_id,
|
||||
workflow_id=self.workflow_id,
|
||||
user_id=self.user_id,
|
||||
user_from=self.user_from,
|
||||
invoke_from=self.invoke_from,
|
||||
call_depth=self.workflow_call_depth,
|
||||
graph=loop_graph,
|
||||
graph_config=self.graph_config,
|
||||
graph_runtime_state=graph_runtime_state_copy,
|
||||
max_execution_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS,
|
||||
max_execution_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME,
|
||||
command_channel=InMemoryChannel(), # Use InMemoryChannel for sub-graphs
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -118,7 +118,7 @@ class ParameterExtractorNode(Node):
|
|||
_model_config: ModelConfigWithCredentialsEntity | None = None
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
return {
|
||||
"model": {
|
||||
"prompt_templates": {
|
||||
|
|
|
|||
|
|
@ -271,7 +271,7 @@ class QuestionClassifierNode(Node):
|
|||
return variable_mapping
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
"""
|
||||
Get default config of node.
|
||||
:param filters: filter by node config parameters (not used in this implementation).
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ class TemplateTransformNode(Node):
|
|||
return self._node_data
|
||||
|
||||
@classmethod
|
||||
def get_default_config(cls, filters: dict | None = None):
|
||||
def get_default_config(cls, filters: Mapping[str, object] | None = None) -> Mapping[str, object]:
|
||||
"""
|
||||
Get default config of node.
|
||||
:param filters: filter by node config parameters.
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ from core.workflow.enums import (
|
|||
WorkflowNodeExecutionMetadataKey,
|
||||
WorkflowNodeExecutionStatus,
|
||||
)
|
||||
from core.workflow.node_events import NodeRunResult, StreamChunkEvent, StreamCompletedEvent
|
||||
from core.workflow.node_events import NodeEventBase, NodeRunResult, StreamChunkEvent, StreamCompletedEvent
|
||||
from core.workflow.nodes.base.entities import BaseNodeData, RetryConfig
|
||||
from core.workflow.nodes.base.node import Node
|
||||
from core.workflow.nodes.base.variable_template_parser import VariableTemplateParser
|
||||
|
|
@ -55,7 +55,7 @@ class ToolNode(Node):
|
|||
def version(cls) -> str:
|
||||
return "1"
|
||||
|
||||
def _run(self) -> Generator:
|
||||
def _run(self) -> Generator[NodeEventBase, None, None]:
|
||||
"""
|
||||
Run the tool node
|
||||
"""
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ class VariableOperationItem(BaseModel):
|
|||
# 2. For VARIABLE input_type: Initially contains the selector of the source variable.
|
||||
# 3. During the variable updating procedure: The `value` field is reassigned to hold
|
||||
# the resolved actual value that will be applied to the target variable.
|
||||
value: Any | None = None
|
||||
value: Any = None
|
||||
|
||||
|
||||
class VariableAssignerNodeData(BaseNodeData):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import logging
|
|||
import time
|
||||
import uuid
|
||||
from collections.abc import Generator, Mapping, Sequence
|
||||
from typing import Any, Optional
|
||||
from typing import Any
|
||||
|
||||
from configs import dify_config
|
||||
from core.app.apps.exc import GenerateTaskStoppedError
|
||||
|
|
@ -43,7 +43,7 @@ class WorkflowEntry:
|
|||
call_depth: int,
|
||||
variable_pool: VariablePool,
|
||||
graph_runtime_state: GraphRuntimeState,
|
||||
command_channel: Optional[CommandChannel] = None,
|
||||
command_channel: CommandChannel | None = None,
|
||||
) -> None:
|
||||
"""
|
||||
Init workflow entry
|
||||
|
|
@ -73,18 +73,9 @@ class WorkflowEntry:
|
|||
|
||||
self.command_channel = command_channel
|
||||
self.graph_engine = GraphEngine(
|
||||
tenant_id=tenant_id,
|
||||
app_id=app_id,
|
||||
workflow_id=workflow_id,
|
||||
user_id=user_id,
|
||||
user_from=user_from,
|
||||
invoke_from=invoke_from,
|
||||
call_depth=call_depth,
|
||||
graph=graph,
|
||||
graph_config=graph_config,
|
||||
graph_runtime_state=graph_runtime_state,
|
||||
max_execution_steps=dify_config.WORKFLOW_MAX_EXECUTION_STEPS,
|
||||
max_execution_time=dify_config.WORKFLOW_MAX_EXECUTION_TIME,
|
||||
command_channel=command_channel,
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,9 @@ class WorkflowRuntimeTypeConverter:
|
|||
|
||||
def to_json_encodable(self, value: Mapping[str, Any] | None) -> Mapping[str, Any] | None:
|
||||
result = self._to_json_encodable_recursive(value)
|
||||
return result if isinstance(result, Mapping) or result is None else dict(result)
|
||||
if isinstance(result, Mapping) or result is None:
|
||||
return result
|
||||
return {}
|
||||
|
||||
def _to_json_encodable_recursive(self, value: Any):
|
||||
if value is None:
|
||||
|
|
|
|||
|
|
@ -846,7 +846,7 @@ class Conversation(Base):
|
|||
)
|
||||
|
||||
@property
|
||||
def app(self) -> Optional[App]:
|
||||
def app(self) -> App | None:
|
||||
with Session(db.engine, expire_on_commit=False) as session:
|
||||
return session.query(App).where(App.id == self.app_id).first()
|
||||
|
||||
|
|
@ -1140,7 +1140,7 @@ class Message(Base):
|
|||
)
|
||||
|
||||
@property
|
||||
def retriever_resources(self) -> Any | list[Any]:
|
||||
def retriever_resources(self) -> Any:
|
||||
return self.message_metadata_dict.get("retriever_resources") if self.message_metadata else []
|
||||
|
||||
@property
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import json
|
||||
from collections.abc import Mapping
|
||||
from datetime import datetime
|
||||
from typing import TYPE_CHECKING, Any, cast
|
||||
from urllib.parse import urlparse
|
||||
|
|
@ -314,11 +315,11 @@ class MCPToolProvider(Base):
|
|||
return [MCPTool(**tool) for tool in json.loads(self.tools)]
|
||||
|
||||
@property
|
||||
def provider_icon(self) -> dict[str, str] | str:
|
||||
def provider_icon(self) -> Mapping[str, str] | str:
|
||||
from core.file import helpers as file_helpers
|
||||
|
||||
try:
|
||||
return cast(dict[str, str], json.loads(self.icon))
|
||||
return json.loads(self.icon)
|
||||
except json.JSONDecodeError:
|
||||
return file_helpers.get_signed_file_url(self.icon)
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
"core/ops",
|
||||
"core/tools",
|
||||
"core/model_runtime",
|
||||
"core/workflow",
|
||||
"core/workflow/nodes",
|
||||
"core/app/app_config/easy_ui_based_app/dataset"
|
||||
],
|
||||
"typeCheckingMode": "strict",
|
||||
|
|
|
|||
|
|
@ -398,14 +398,11 @@ class DatasetService:
|
|||
if not dataset:
|
||||
raise ValueError("Dataset not found")
|
||||
# check if dataset name is exists
|
||||
if (
|
||||
db.session.query(Dataset)
|
||||
.filter(
|
||||
Dataset.id != dataset_id,
|
||||
Dataset.name == data.get("name", dataset.name),
|
||||
Dataset.tenant_id == dataset.tenant_id,
|
||||
)
|
||||
.first()
|
||||
|
||||
if DatasetService._has_dataset_same_name(
|
||||
tenant_id=dataset.tenant_id,
|
||||
dataset_id=dataset_id,
|
||||
name=data.get("name", dataset.name),
|
||||
):
|
||||
raise ValueError("Dataset name already exists")
|
||||
|
||||
|
|
@ -418,6 +415,19 @@ class DatasetService:
|
|||
else:
|
||||
return DatasetService._update_internal_dataset(dataset, data, user)
|
||||
|
||||
@staticmethod
|
||||
def _has_dataset_same_name(tenant_id: str, dataset_id: str, name: str):
|
||||
dataset = (
|
||||
db.session.query(Dataset)
|
||||
.filter(
|
||||
Dataset.id != dataset_id,
|
||||
Dataset.name == name,
|
||||
Dataset.tenant_id == tenant_id,
|
||||
)
|
||||
.first()
|
||||
)
|
||||
return dataset is not None
|
||||
|
||||
@staticmethod
|
||||
def _update_external_dataset(dataset, data, user):
|
||||
"""
|
||||
|
|
@ -866,6 +876,7 @@ class DatasetService:
|
|||
embedding_model.provider, embedding_model.model
|
||||
)
|
||||
dataset.collection_binding_id = dataset_collection_binding.id
|
||||
dataset.indexing_technique = knowledge_configuration.indexing_technique
|
||||
except LLMBadRequestError:
|
||||
raise ValueError(
|
||||
"No Embedding Model available. Please configure a valid provider "
|
||||
|
|
|
|||
|
|
@ -202,15 +202,14 @@ workflow:
|
|||
human_description:
|
||||
en_US: the file to be parsed(support pdf, ppt, pptx, doc, docx, png, jpg,
|
||||
jpeg)
|
||||
ja_JP: the file to be parsed(support pdf, ppt, pptx, doc, docx, png, jpg,
|
||||
jpeg)
|
||||
ja_JP: 解析するファイル(pdf, ppt, pptx, doc, docx, png, jpg, jpegをサポート)
|
||||
pt_BR: o arquivo a ser analisado (suporta pdf, ppt, pptx, doc, docx, png,
|
||||
jpg, jpeg)
|
||||
zh_Hans: 用于解析的文件(支持 pdf, ppt, pptx, doc, docx, png, jpg, jpeg)
|
||||
label:
|
||||
en_US: file
|
||||
ja_JP: file
|
||||
pt_BR: file
|
||||
ja_JP: ファイル
|
||||
pt_BR: arquivo
|
||||
zh_Hans: file
|
||||
llm_description: the file to be parsed (support pdf, ppt, pptx, doc, docx,
|
||||
png, jpg, jpeg)
|
||||
|
|
@ -432,13 +431,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input Variable
|
||||
ja_JP: Input Variable
|
||||
pt_BR: Input Variable
|
||||
ja_JP: 入力変数
|
||||
pt_BR: Variável de entrada
|
||||
zh_Hans: 输入变量
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -456,13 +455,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The delimiter of the chunks.
|
||||
ja_JP: The delimiter of the chunks.
|
||||
pt_BR: The delimiter of the chunks.
|
||||
ja_JP: チャンクの区切り記号。
|
||||
pt_BR: O delimitador dos blocos.
|
||||
zh_Hans: 块的分隔符。
|
||||
label:
|
||||
en_US: Delimiter
|
||||
ja_JP: Delimiter
|
||||
pt_BR: Delimiter
|
||||
ja_JP: 区切り記号
|
||||
pt_BR: DDelimitador
|
||||
zh_Hans: 分隔符
|
||||
llm_description: The delimiter of the chunks, the format of the delimiter
|
||||
must be a string.
|
||||
|
|
@ -481,13 +480,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The maximum chunk length.
|
||||
ja_JP: The maximum chunk length.
|
||||
pt_BR: The maximum chunk length.
|
||||
ja_JP: 最大長のチャンク。
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度。
|
||||
label:
|
||||
en_US: Maximum Chunk Length
|
||||
ja_JP: Maximum Chunk Length
|
||||
pt_BR: Maximum Chunk Length
|
||||
ja_JP: チャンク最大長
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度
|
||||
llm_description: The maximum chunk length, the format of the chunk size
|
||||
must be an integer.
|
||||
|
|
@ -506,13 +505,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The chunk overlap length.
|
||||
ja_JP: The chunk overlap length.
|
||||
pt_BR: The chunk overlap length.
|
||||
ja_JP: チャンクの重複長
|
||||
pt_BR: O comprimento de sobreposição dos fragmentos
|
||||
zh_Hans: 块的重叠长度。
|
||||
label:
|
||||
en_US: Chunk Overlap Length
|
||||
ja_JP: Chunk Overlap Length
|
||||
pt_BR: Chunk Overlap Length
|
||||
ja_JP: チャンク重複長
|
||||
pt_BR: Comprimento de sobreposição do bloco
|
||||
zh_Hans: 块的重叠长度
|
||||
llm_description: The chunk overlap length, the format of the chunk overlap
|
||||
length must be an integer.
|
||||
|
|
@ -531,13 +530,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: Replace consecutive spaces, newlines and tabs
|
||||
pt_BR: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Substituir espaços consecutivos, novas linhas e tabulações
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
label:
|
||||
en_US: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: Replace Consecutive Spaces, Newlines and Tabs
|
||||
pt_BR: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Substituir espaços consecutivos, novas linhas e tabulações
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
llm_description: Replace consecutive spaces, newlines and tabs, the format
|
||||
of the replace must be a boolean.
|
||||
|
|
@ -556,13 +555,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Delete all URLs and email addresses
|
||||
ja_JP: Delete all URLs and email addresses
|
||||
pt_BR: Delete all URLs and email addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Excluir todos os URLs e endereços de e-mail
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
label:
|
||||
en_US: Delete All URLs and Email Addresses
|
||||
ja_JP: Delete All URLs and Email Addresses
|
||||
pt_BR: Delete All URLs and Email Addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Excluir todos os URLs e endereços de e-mail
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
llm_description: Delete all URLs and email addresses, the format of the
|
||||
delete must be a boolean.
|
||||
|
|
|
|||
|
|
@ -202,15 +202,14 @@ workflow:
|
|||
human_description:
|
||||
en_US: the file to be parsed(support pdf, ppt, pptx, doc, docx, png, jpg,
|
||||
jpeg)
|
||||
ja_JP: the file to be parsed(support pdf, ppt, pptx, doc, docx, png, jpg,
|
||||
jpeg)
|
||||
ja_JP: 解析するファイル(pdf, ppt, pptx, doc, docx, png, jpg, jpegをサポート)
|
||||
pt_BR: o arquivo a ser analisado (suporta pdf, ppt, pptx, doc, docx, png,
|
||||
jpg, jpeg)
|
||||
zh_Hans: 用于解析的文件(支持 pdf, ppt, pptx, doc, docx, png, jpg, jpeg)
|
||||
label:
|
||||
en_US: file
|
||||
ja_JP: file
|
||||
pt_BR: file
|
||||
ja_JP: ファイル
|
||||
pt_BR: arquivo
|
||||
zh_Hans: file
|
||||
llm_description: the file to be parsed (support pdf, ppt, pptx, doc, docx,
|
||||
png, jpg, jpeg)
|
||||
|
|
@ -432,13 +431,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input Variable
|
||||
ja_JP: Input Variable
|
||||
pt_BR: Input Variable
|
||||
ja_JP: 入力変数
|
||||
pt_BR: Variável de entrada
|
||||
zh_Hans: 输入变量
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -456,13 +455,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The delimiter of the chunks.
|
||||
ja_JP: The delimiter of the chunks.
|
||||
pt_BR: The delimiter of the chunks.
|
||||
ja_JP: チャンクの区切り記号。
|
||||
pt_BR: O delimitador dos pedaços.
|
||||
zh_Hans: 块的分隔符。
|
||||
label:
|
||||
en_US: Delimiter
|
||||
ja_JP: Delimiter
|
||||
pt_BR: Delimiter
|
||||
ja_JP: 区切り記号
|
||||
pt_BR: Delimitador
|
||||
zh_Hans: 分隔符
|
||||
llm_description: The delimiter of the chunks, the format of the delimiter
|
||||
must be a string.
|
||||
|
|
@ -481,13 +480,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The maximum chunk length.
|
||||
ja_JP: The maximum chunk length.
|
||||
pt_BR: The maximum chunk length.
|
||||
ja_JP: 最大長のチャンク。
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度。
|
||||
label:
|
||||
en_US: Maximum Chunk Length
|
||||
ja_JP: Maximum Chunk Length
|
||||
pt_BR: Maximum Chunk Length
|
||||
ja_JP: チャンク最大長
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度
|
||||
llm_description: The maximum chunk length, the format of the chunk size
|
||||
must be an integer.
|
||||
|
|
@ -506,12 +505,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The chunk overlap length.
|
||||
ja_JP: The chunk overlap length.
|
||||
ja_JP: チャンクの重複長
|
||||
pt_BR: The chunk overlap length.
|
||||
zh_Hans: 块的重叠长度。
|
||||
label:
|
||||
en_US: Chunk Overlap Length
|
||||
ja_JP: Chunk Overlap Length
|
||||
ja_JP: チャンク重複長
|
||||
pt_BR: Chunk Overlap Length
|
||||
zh_Hans: 块的重叠长度
|
||||
llm_description: The chunk overlap length, the format of the chunk overlap
|
||||
|
|
@ -531,12 +530,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace consecutive spaces, newlines and tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
label:
|
||||
en_US: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace Consecutive Spaces, Newlines and Tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
llm_description: Replace consecutive spaces, newlines and tabs, the format
|
||||
|
|
@ -556,12 +555,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Delete all URLs and email addresses
|
||||
ja_JP: Delete all URLs and email addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete all URLs and email addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
label:
|
||||
en_US: Delete All URLs and Email Addresses
|
||||
ja_JP: Delete All URLs and Email Addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete All URLs and Email Addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
llm_description: Delete all URLs and email addresses, the format of the
|
||||
|
|
|
|||
|
|
@ -201,15 +201,14 @@ workflow:
|
|||
human_description:
|
||||
en_US: the file to be parsed(support pdf, ppt, pptx, doc, docx, png, jpg,
|
||||
jpeg)
|
||||
ja_JP: the file to be parsed(support pdf, ppt, pptx, doc, docx, png, jpg,
|
||||
jpeg)
|
||||
ja_JP: 解析するファイル(pdf, ppt, pptx, doc, docx, png, jpg, jpegをサポート)
|
||||
pt_BR: o arquivo a ser analisado (suporta pdf, ppt, pptx, doc, docx, png,
|
||||
jpg, jpeg)
|
||||
zh_Hans: 用于解析的文件(支持 pdf, ppt, pptx, doc, docx, png, jpg, jpeg)
|
||||
label:
|
||||
en_US: file
|
||||
ja_JP: file
|
||||
pt_BR: file
|
||||
ja_JP: ファイル
|
||||
pt_BR: arquivo
|
||||
zh_Hans: file
|
||||
llm_description: the file to be parsed (support pdf, ppt, pptx, doc, docx,
|
||||
png, jpg, jpeg)
|
||||
|
|
@ -427,13 +426,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input text
|
||||
ja_JP: Input text
|
||||
pt_BR: Input text
|
||||
ja_JP: 入力テキスト
|
||||
pt_BR: Texto de entrada
|
||||
zh_Hans: 输入文本
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -451,12 +450,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Maximum length for chunking
|
||||
ja_JP: Maximum length for chunking
|
||||
ja_JP: チャンク分割の最大長
|
||||
pt_BR: Comprimento máximo para divisão
|
||||
zh_Hans: 用于分块的最大长度
|
||||
label:
|
||||
en_US: Maximum Length
|
||||
ja_JP: Maximum Length
|
||||
ja_JP: 最大長
|
||||
pt_BR: Comprimento Máximo
|
||||
zh_Hans: 最大长度
|
||||
llm_description: Maximum length allowed per chunk
|
||||
|
|
@ -478,12 +477,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Separator used for chunking
|
||||
ja_JP: Separator used for chunking
|
||||
ja_JP: チャンク分割に使用する区切り文字
|
||||
pt_BR: Separador usado para divisão
|
||||
zh_Hans: 用于分块的分隔符
|
||||
label:
|
||||
en_US: Chunk Separator
|
||||
ja_JP: Chunk Separator
|
||||
ja_JP: チャンク区切り文字
|
||||
pt_BR: Separador de Divisão
|
||||
zh_Hans: 分块分隔符
|
||||
llm_description: The separator used to split chunks
|
||||
|
|
@ -502,12 +501,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Maximum length for subchunking
|
||||
ja_JP: Maximum length for subchunking
|
||||
ja_JP: サブチャンク分割の最大長
|
||||
pt_BR: Comprimento máximo para subdivisão
|
||||
zh_Hans: 用于子分块的最大长度
|
||||
label:
|
||||
en_US: Subchunk Maximum Length
|
||||
ja_JP: Subchunk Maximum Length
|
||||
ja_JP: サブチャンク最大長
|
||||
pt_BR: Comprimento Máximo de Subdivisão
|
||||
zh_Hans: 子分块最大长度
|
||||
llm_description: Maximum length allowed per subchunk
|
||||
|
|
@ -526,12 +525,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Separator used for subchunking
|
||||
ja_JP: Separator used for subchunking
|
||||
ja_JP: サブチャンク分割に使用する区切り文字
|
||||
pt_BR: Separador usado para subdivisão
|
||||
zh_Hans: 用于子分块的分隔符
|
||||
label:
|
||||
en_US: Subchunk Separator
|
||||
ja_JP: Subchunk Separator
|
||||
ja_JP: サブチャンキング用セパレーター
|
||||
pt_BR: Separador de Subdivisão
|
||||
zh_Hans: 子分块分隔符
|
||||
llm_description: The separator used to split subchunks
|
||||
|
|
@ -552,16 +551,15 @@ workflow:
|
|||
en_US: Split text into paragraphs based on separator and maximum chunk
|
||||
length, using split text as parent block or entire document as parent
|
||||
block and directly retrieve.
|
||||
ja_JP: Split text into paragraphs based on separator and maximum chunk
|
||||
length, using split text as parent block or entire document as parent
|
||||
block and directly retrieve.
|
||||
ja_JP: セパレーターと最大チャンク長に基づいてテキストを段落に分割し、分割されたテキスト
|
||||
を親ブロックとして使用するか、文書全体を親ブロックとして使用して直接取得します。
|
||||
pt_BR: Dividir texto em parágrafos com base no separador e no comprimento
|
||||
máximo do bloco, usando o texto dividido como bloco pai ou documento
|
||||
completo como bloco pai e diretamente recuperá-lo.
|
||||
zh_Hans: 根据分隔符和最大块长度将文本拆分为段落,使用拆分文本作为检索的父块或整个文档用作父块并直接检索。
|
||||
label:
|
||||
en_US: Parent Mode
|
||||
ja_JP: Parent Mode
|
||||
ja_JP: 親子モード
|
||||
pt_BR: Modo Pai
|
||||
zh_Hans: 父块模式
|
||||
llm_description: Split text into paragraphs based on separator and maximum
|
||||
|
|
@ -574,14 +572,14 @@ workflow:
|
|||
- icon: ''
|
||||
label:
|
||||
en_US: Paragraph
|
||||
ja_JP: Paragraph
|
||||
ja_JP: 段落
|
||||
pt_BR: Parágrafo
|
||||
zh_Hans: 段落
|
||||
value: paragraph
|
||||
- icon: ''
|
||||
label:
|
||||
en_US: Full Document
|
||||
ja_JP: Full Document
|
||||
ja_JP: 全文
|
||||
pt_BR: Documento Completo
|
||||
zh_Hans: 全文
|
||||
value: full_doc
|
||||
|
|
@ -596,12 +594,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Whether to remove extra spaces in the text
|
||||
ja_JP: Whether to remove extra spaces in the text
|
||||
ja_JP: テキスト内の余分なスペースを削除するかどうか
|
||||
pt_BR: Se deve remover espaços extras no texto
|
||||
zh_Hans: 是否移除文本中的多余空格
|
||||
label:
|
||||
en_US: Remove Extra Spaces
|
||||
ja_JP: Remove Extra Spaces
|
||||
ja_JP: 余分なスペースを削除
|
||||
pt_BR: Remover Espaços Extras
|
||||
zh_Hans: 移除多余空格
|
||||
llm_description: Whether to remove extra spaces in the text
|
||||
|
|
@ -620,12 +618,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Whether to remove URLs and emails in the text
|
||||
ja_JP: Whether to remove URLs and emails in the text
|
||||
ja_JP: テキスト内のURLやメールアドレスを削除するかどうか
|
||||
pt_BR: Se deve remover URLs e e-mails no texto
|
||||
zh_Hans: 是否移除文本中的URL和电子邮件地址
|
||||
label:
|
||||
en_US: Remove URLs and Emails
|
||||
ja_JP: Remove URLs and Emails
|
||||
ja_JP: URLとメールアドレスを削除
|
||||
pt_BR: Remover URLs e E-mails
|
||||
zh_Hans: 移除URL和电子邮件地址
|
||||
llm_description: Whether to remove URLs and emails in the text
|
||||
|
|
|
|||
|
|
@ -99,13 +99,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input Variable
|
||||
ja_JP: Input Variable
|
||||
pt_BR: Input Variable
|
||||
ja_JP: 入力変数
|
||||
pt_BR: Variável de entrada
|
||||
zh_Hans: 输入变量
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -123,13 +123,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The delimiter of the chunks.
|
||||
ja_JP: The delimiter of the chunks.
|
||||
pt_BR: The delimiter of the chunks.
|
||||
ja_JP: チャンクの区切り記号。
|
||||
pt_BR: O delimitador dos pedaços.
|
||||
zh_Hans: 块的分隔符。
|
||||
label:
|
||||
en_US: Delimiter
|
||||
ja_JP: Delimiter
|
||||
pt_BR: Delimiter
|
||||
ja_JP: 区切り記号
|
||||
pt_BR: Delimitador
|
||||
zh_Hans: 分隔符
|
||||
llm_description: The delimiter of the chunks, the format of the delimiter
|
||||
must be a string.
|
||||
|
|
@ -148,13 +148,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The maximum chunk length.
|
||||
ja_JP: The maximum chunk length.
|
||||
pt_BR: The maximum chunk length.
|
||||
ja_JP: 最大長のチャンク。
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度。
|
||||
label:
|
||||
en_US: Maximum Chunk Length
|
||||
ja_JP: Maximum Chunk Length
|
||||
pt_BR: Maximum Chunk Length
|
||||
ja_JP: チャンク最大長
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度
|
||||
llm_description: The maximum chunk length, the format of the chunk size
|
||||
must be an integer.
|
||||
|
|
@ -173,12 +173,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The chunk overlap length.
|
||||
ja_JP: The chunk overlap length.
|
||||
ja_JP: チャンクの重複長
|
||||
pt_BR: The chunk overlap length.
|
||||
zh_Hans: 块的重叠长度。
|
||||
label:
|
||||
en_US: Chunk Overlap Length
|
||||
ja_JP: Chunk Overlap Length
|
||||
ja_JP: チャンク重複長
|
||||
pt_BR: Chunk Overlap Length
|
||||
zh_Hans: 块的重叠长度
|
||||
llm_description: The chunk overlap length, the format of the chunk overlap
|
||||
|
|
@ -198,12 +198,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace consecutive spaces, newlines and tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
label:
|
||||
en_US: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace Consecutive Spaces, Newlines and Tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
llm_description: Replace consecutive spaces, newlines and tabs, the format
|
||||
|
|
@ -223,12 +223,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Delete all URLs and email addresses
|
||||
ja_JP: Delete all URLs and email addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete all URLs and email addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
label:
|
||||
en_US: Delete All URLs and Email Addresses
|
||||
ja_JP: Delete All URLs and Email Addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete All URLs and Email Addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
llm_description: Delete all URLs and email addresses, the format of the
|
||||
|
|
|
|||
|
|
@ -99,13 +99,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input Variable
|
||||
ja_JP: Input Variable
|
||||
pt_BR: Input Variable
|
||||
ja_JP: 入力変数
|
||||
pt_BR: Variável de entrada
|
||||
zh_Hans: 输入变量
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -123,13 +123,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The delimiter of the chunks.
|
||||
ja_JP: The delimiter of the chunks.
|
||||
pt_BR: The delimiter of the chunks.
|
||||
ja_JP: チャンクの区切り記号。
|
||||
pt_BR: O delimitador dos pedaços.
|
||||
zh_Hans: 块的分隔符。
|
||||
label:
|
||||
en_US: Delimiter
|
||||
ja_JP: Delimiter
|
||||
pt_BR: Delimiter
|
||||
ja_JP: 区切り記号
|
||||
pt_BR: Delimitador
|
||||
zh_Hans: 分隔符
|
||||
llm_description: The delimiter of the chunks, the format of the delimiter
|
||||
must be a string.
|
||||
|
|
@ -148,13 +148,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The maximum chunk length.
|
||||
ja_JP: The maximum chunk length.
|
||||
pt_BR: The maximum chunk length.
|
||||
ja_JP: 最大長のチャンク。
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度。
|
||||
label:
|
||||
en_US: Maximum Chunk Length
|
||||
ja_JP: Maximum Chunk Length
|
||||
pt_BR: Maximum Chunk Length
|
||||
ja_JP: チャンク最大長
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度
|
||||
llm_description: The maximum chunk length, the format of the chunk size
|
||||
must be an integer.
|
||||
|
|
@ -173,12 +173,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The chunk overlap length.
|
||||
ja_JP: The chunk overlap length.
|
||||
ja_JP: チャンクの重複長
|
||||
pt_BR: The chunk overlap length.
|
||||
zh_Hans: 块的重叠长度。
|
||||
label:
|
||||
en_US: Chunk Overlap Length
|
||||
ja_JP: Chunk Overlap Length
|
||||
ja_JP: チャンク重複長
|
||||
pt_BR: Chunk Overlap Length
|
||||
zh_Hans: 块的重叠长度
|
||||
llm_description: The chunk overlap length, the format of the chunk overlap
|
||||
|
|
@ -198,12 +198,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace consecutive spaces, newlines and tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
label:
|
||||
en_US: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace Consecutive Spaces, Newlines and Tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
llm_description: Replace consecutive spaces, newlines and tabs, the format
|
||||
|
|
@ -223,12 +223,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Delete all URLs and email addresses
|
||||
ja_JP: Delete all URLs and email addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete all URLs and email addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
label:
|
||||
en_US: Delete All URLs and Email Addresses
|
||||
ja_JP: Delete All URLs and Email Addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete All URLs and Email Addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
llm_description: Delete all URLs and email addresses, the format of the
|
||||
|
|
|
|||
|
|
@ -118,13 +118,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input text
|
||||
ja_JP: Input text
|
||||
pt_BR: Input text
|
||||
ja_JP: 入力テキスト
|
||||
pt_BR: Texto de entrada
|
||||
zh_Hans: 输入文本
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -142,12 +142,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Maximum length for chunking
|
||||
ja_JP: Maximum length for chunking
|
||||
ja_JP: チャンク分割の最大長
|
||||
pt_BR: Comprimento máximo para divisão
|
||||
zh_Hans: 用于分块的最大长度
|
||||
label:
|
||||
en_US: Maximum Length
|
||||
ja_JP: Maximum Length
|
||||
ja_JP: 最大長
|
||||
pt_BR: Comprimento Máximo
|
||||
zh_Hans: 最大长度
|
||||
llm_description: Maximum length allowed per chunk
|
||||
|
|
@ -169,12 +169,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Separator used for chunking
|
||||
ja_JP: Separator used for chunking
|
||||
ja_JP: チャンク分割に使用する区切り文字
|
||||
pt_BR: Separador usado para divisão
|
||||
zh_Hans: 用于分块的分隔符
|
||||
label:
|
||||
en_US: Chunk Separator
|
||||
ja_JP: Chunk Separator
|
||||
ja_JP: チャンク区切り文字
|
||||
pt_BR: Separador de Divisão
|
||||
zh_Hans: 分块分隔符
|
||||
llm_description: The separator used to split chunks
|
||||
|
|
@ -193,12 +193,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Maximum length for subchunking
|
||||
ja_JP: Maximum length for subchunking
|
||||
ja_JP: サブチャンク分割の最大長
|
||||
pt_BR: Comprimento máximo para subdivisão
|
||||
zh_Hans: 用于子分块的最大长度
|
||||
label:
|
||||
en_US: Subchunk Maximum Length
|
||||
ja_JP: Subchunk Maximum Length
|
||||
ja_JP: サブチャンク最大長
|
||||
pt_BR: Comprimento Máximo de Subdivisão
|
||||
zh_Hans: 子分块最大长度
|
||||
llm_description: Maximum length allowed per subchunk
|
||||
|
|
@ -217,12 +217,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Separator used for subchunking
|
||||
ja_JP: Separator used for subchunking
|
||||
ja_JP: サブチャンク分割に使用する区切り文字
|
||||
pt_BR: Separador usado para subdivisão
|
||||
zh_Hans: 用于子分块的分隔符
|
||||
label:
|
||||
en_US: Subchunk Separator
|
||||
ja_JP: Subchunk Separator
|
||||
ja_JP: サブチャンキング用セパレーター
|
||||
pt_BR: Separador de Subdivisão
|
||||
zh_Hans: 子分块分隔符
|
||||
llm_description: The separator used to split subchunks
|
||||
|
|
@ -243,16 +243,15 @@ workflow:
|
|||
en_US: Split text into paragraphs based on separator and maximum chunk
|
||||
length, using split text as parent block or entire document as parent
|
||||
block and directly retrieve.
|
||||
ja_JP: Split text into paragraphs based on separator and maximum chunk
|
||||
length, using split text as parent block or entire document as parent
|
||||
block and directly retrieve.
|
||||
ja_JP: セパレーターと最大チャンク長に基づいてテキストを段落に分割し、分割されたテキスト
|
||||
を親ブロックとして使用するか、文書全体を親ブロックとして使用して直接取得します。
|
||||
pt_BR: Dividir texto em parágrafos com base no separador e no comprimento
|
||||
máximo do bloco, usando o texto dividido como bloco pai ou documento
|
||||
completo como bloco pai e diretamente recuperá-lo.
|
||||
zh_Hans: 根据分隔符和最大块长度将文本拆分为段落,使用拆分文本作为检索的父块或整个文档用作父块并直接检索。
|
||||
label:
|
||||
en_US: Parent Mode
|
||||
ja_JP: Parent Mode
|
||||
ja_JP: 親子モード
|
||||
pt_BR: Modo Pai
|
||||
zh_Hans: 父块模式
|
||||
llm_description: Split text into paragraphs based on separator and maximum
|
||||
|
|
@ -265,14 +264,14 @@ workflow:
|
|||
- icon: ''
|
||||
label:
|
||||
en_US: Paragraph
|
||||
ja_JP: Paragraph
|
||||
ja_JP: 段落
|
||||
pt_BR: Parágrafo
|
||||
zh_Hans: 段落
|
||||
value: paragraph
|
||||
- icon: ''
|
||||
label:
|
||||
en_US: Full Document
|
||||
ja_JP: Full Document
|
||||
ja_JP: 全文
|
||||
pt_BR: Documento Completo
|
||||
zh_Hans: 全文
|
||||
value: full_doc
|
||||
|
|
@ -287,12 +286,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Whether to remove extra spaces in the text
|
||||
ja_JP: Whether to remove extra spaces in the text
|
||||
ja_JP: テキスト内の余分なスペースを削除するかどうか
|
||||
pt_BR: Se deve remover espaços extras no texto
|
||||
zh_Hans: 是否移除文本中的多余空格
|
||||
label:
|
||||
en_US: Remove Extra Spaces
|
||||
ja_JP: Remove Extra Spaces
|
||||
ja_JP: 余分なスペースを削除
|
||||
pt_BR: Remover Espaços Extras
|
||||
zh_Hans: 移除多余空格
|
||||
llm_description: Whether to remove extra spaces in the text
|
||||
|
|
@ -311,12 +310,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Whether to remove URLs and emails in the text
|
||||
ja_JP: Whether to remove URLs and emails in the text
|
||||
ja_JP: テキスト内のURLやメールアドレスを削除するかどうか
|
||||
pt_BR: Se deve remover URLs e e-mails no texto
|
||||
zh_Hans: 是否移除文本中的URL和电子邮件地址
|
||||
label:
|
||||
en_US: Remove URLs and Emails
|
||||
ja_JP: Remove URLs and Emails
|
||||
ja_JP: URLとメールアドレスを削除
|
||||
pt_BR: Remover URLs e E-mails
|
||||
zh_Hans: 移除URL和电子邮件地址
|
||||
llm_description: Whether to remove URLs and emails in the text
|
||||
|
|
|
|||
|
|
@ -238,13 +238,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input Variable
|
||||
ja_JP: Input Variable
|
||||
pt_BR: Input Variable
|
||||
ja_JP: 入力変数
|
||||
pt_BR: Variável de entrada
|
||||
zh_Hans: 输入变量
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -262,13 +262,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The delimiter of the chunks.
|
||||
ja_JP: The delimiter of the chunks.
|
||||
pt_BR: The delimiter of the chunks.
|
||||
ja_JP: チャンクの区切り記号。
|
||||
pt_BR: O delimitador dos pedaços.
|
||||
zh_Hans: 块的分隔符。
|
||||
label:
|
||||
en_US: Delimiter
|
||||
ja_JP: Delimiter
|
||||
pt_BR: Delimiter
|
||||
ja_JP: 区切り記号
|
||||
pt_BR: Delimitador
|
||||
zh_Hans: 分隔符
|
||||
llm_description: The delimiter of the chunks, the format of the delimiter
|
||||
must be a string.
|
||||
|
|
@ -287,13 +287,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The maximum chunk length.
|
||||
ja_JP: The maximum chunk length.
|
||||
pt_BR: The maximum chunk length.
|
||||
ja_JP: 最大長のチャンク。
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度。
|
||||
label:
|
||||
en_US: Maximum Chunk Length
|
||||
ja_JP: Maximum Chunk Length
|
||||
pt_BR: Maximum Chunk Length
|
||||
ja_JP: チャンク最大長
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度
|
||||
llm_description: The maximum chunk length, the format of the chunk size
|
||||
must be an integer.
|
||||
|
|
@ -312,12 +312,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The chunk overlap length.
|
||||
ja_JP: The chunk overlap length.
|
||||
ja_JP: チャンクの重複長
|
||||
pt_BR: The chunk overlap length.
|
||||
zh_Hans: 块的重叠长度。
|
||||
label:
|
||||
en_US: Chunk Overlap Length
|
||||
ja_JP: Chunk Overlap Length
|
||||
ja_JP: チャンク重複長
|
||||
pt_BR: Chunk Overlap Length
|
||||
zh_Hans: 块的重叠长度
|
||||
llm_description: The chunk overlap length, the format of the chunk overlap
|
||||
|
|
@ -337,12 +337,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace consecutive spaces, newlines and tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
label:
|
||||
en_US: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace Consecutive Spaces, Newlines and Tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
llm_description: Replace consecutive spaces, newlines and tabs, the format
|
||||
|
|
@ -362,12 +362,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Delete all URLs and email addresses
|
||||
ja_JP: Delete all URLs and email addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete all URLs and email addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
label:
|
||||
en_US: Delete All URLs and Email Addresses
|
||||
ja_JP: Delete All URLs and Email Addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete All URLs and Email Addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
llm_description: Delete all URLs and email addresses, the format of the
|
||||
|
|
|
|||
|
|
@ -238,13 +238,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input Variable
|
||||
ja_JP: Input Variable
|
||||
pt_BR: Input Variable
|
||||
ja_JP: 入力変数
|
||||
pt_BR: Variável de entrada
|
||||
zh_Hans: 输入变量
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -262,13 +262,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The delimiter of the chunks.
|
||||
ja_JP: The delimiter of the chunks.
|
||||
pt_BR: The delimiter of the chunks.
|
||||
ja_JP: チャンクの区切り記号。
|
||||
pt_BR: O delimitador dos pedaços.
|
||||
zh_Hans: 块的分隔符。
|
||||
label:
|
||||
en_US: Delimiter
|
||||
ja_JP: Delimiter
|
||||
pt_BR: Delimiter
|
||||
ja_JP: 区切り記号
|
||||
pt_BR: Delimitador
|
||||
zh_Hans: 分隔符
|
||||
llm_description: The delimiter of the chunks, the format of the delimiter
|
||||
must be a string.
|
||||
|
|
@ -287,13 +287,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The maximum chunk length.
|
||||
ja_JP: The maximum chunk length.
|
||||
pt_BR: The maximum chunk length.
|
||||
ja_JP: 最大長のチャンク。
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度。
|
||||
label:
|
||||
en_US: Maximum Chunk Length
|
||||
ja_JP: Maximum Chunk Length
|
||||
pt_BR: Maximum Chunk Length
|
||||
ja_JP: チャンク最大長
|
||||
pt_BR: O comprimento máximo do bloco
|
||||
zh_Hans: 最大块的长度
|
||||
llm_description: The maximum chunk length, the format of the chunk size
|
||||
must be an integer.
|
||||
|
|
@ -312,12 +312,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The chunk overlap length.
|
||||
ja_JP: The chunk overlap length.
|
||||
ja_JP: チャンクの重複長。
|
||||
pt_BR: The chunk overlap length.
|
||||
zh_Hans: 块的重叠长度。
|
||||
label:
|
||||
en_US: Chunk Overlap Length
|
||||
ja_JP: Chunk Overlap Length
|
||||
ja_JP: チャンク重複長
|
||||
pt_BR: Chunk Overlap Length
|
||||
zh_Hans: 块的重叠长度
|
||||
llm_description: The chunk overlap length, the format of the chunk overlap
|
||||
|
|
@ -337,12 +337,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: Replace consecutive spaces, newlines and tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace consecutive spaces, newlines and tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
label:
|
||||
en_US: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: Replace Consecutive Spaces, Newlines and Tabs
|
||||
ja_JP: 連続のスペース、改行、まだはタブを置換する
|
||||
pt_BR: Replace Consecutive Spaces, Newlines and Tabs
|
||||
zh_Hans: 替换连续的空格、换行符和制表符
|
||||
llm_description: Replace consecutive spaces, newlines and tabs, the format
|
||||
|
|
@ -362,12 +362,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Delete all URLs and email addresses
|
||||
ja_JP: Delete all URLs and email addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete all URLs and email addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
label:
|
||||
en_US: Delete All URLs and Email Addresses
|
||||
ja_JP: Delete All URLs and Email Addresses
|
||||
ja_JP: すべてのURLとメールアドレスを削除する
|
||||
pt_BR: Delete All URLs and Email Addresses
|
||||
zh_Hans: 删除所有URL和电子邮件地址
|
||||
llm_description: Delete all URLs and email addresses, the format of the
|
||||
|
|
|
|||
|
|
@ -121,13 +121,13 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: The text you want to chunk.
|
||||
ja_JP: The text you want to chunk.
|
||||
pt_BR: The text you want to chunk.
|
||||
ja_JP: チャンク化したいテキスト。
|
||||
pt_BR: O texto que você deseja dividir.
|
||||
zh_Hans: 你想要分块的文本。
|
||||
label:
|
||||
en_US: Input text
|
||||
ja_JP: Input text
|
||||
pt_BR: Input text
|
||||
ja_JP: 入力テキスト
|
||||
pt_BR: Texto de entrada
|
||||
zh_Hans: 输入文本
|
||||
llm_description: The text you want to chunk.
|
||||
max: null
|
||||
|
|
@ -145,12 +145,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Maximum length for chunking
|
||||
ja_JP: Maximum length for chunking
|
||||
ja_JP: チャンク分割の最大長
|
||||
pt_BR: Comprimento máximo para divisão
|
||||
zh_Hans: 用于分块的最大长度
|
||||
label:
|
||||
en_US: Maximum Length
|
||||
ja_JP: Maximum Length
|
||||
ja_JP: 最大長
|
||||
pt_BR: Comprimento Máximo
|
||||
zh_Hans: 最大长度
|
||||
llm_description: Maximum length allowed per chunk
|
||||
|
|
@ -172,12 +172,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Separator used for chunking
|
||||
ja_JP: Separator used for chunking
|
||||
ja_JP: チャンク分割に使用する区切り文字
|
||||
pt_BR: Separador usado para divisão
|
||||
zh_Hans: 用于分块的分隔符
|
||||
label:
|
||||
en_US: Chunk Separator
|
||||
ja_JP: Chunk Separator
|
||||
ja_JP: チャンク区切り文字
|
||||
pt_BR: Separador de Divisão
|
||||
zh_Hans: 分块分隔符
|
||||
llm_description: The separator used to split chunks
|
||||
|
|
@ -196,12 +196,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Maximum length for subchunking
|
||||
ja_JP: Maximum length for subchunking
|
||||
ja_JP: サブチャンク分割の最大長
|
||||
pt_BR: Comprimento máximo para subdivisão
|
||||
zh_Hans: 用于子分块的最大长度
|
||||
label:
|
||||
en_US: Subchunk Maximum Length
|
||||
ja_JP: Subchunk Maximum Length
|
||||
ja_JP: サブチャンク最大長
|
||||
pt_BR: Comprimento Máximo de Subdivisão
|
||||
zh_Hans: 子分块最大长度
|
||||
llm_description: Maximum length allowed per subchunk
|
||||
|
|
@ -220,12 +220,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Separator used for subchunking
|
||||
ja_JP: Separator used for subchunking
|
||||
ja_JP: サブチャンク分割に使用する区切り文字
|
||||
pt_BR: Separador usado para subdivisão
|
||||
zh_Hans: 用于子分块的分隔符
|
||||
label:
|
||||
en_US: Subchunk Separator
|
||||
ja_JP: Subchunk Separator
|
||||
ja_JP: サブチャンキング用セパレーター
|
||||
pt_BR: Separador de Subdivisão
|
||||
zh_Hans: 子分块分隔符
|
||||
llm_description: The separator used to split subchunks
|
||||
|
|
@ -246,16 +246,15 @@ workflow:
|
|||
en_US: Split text into paragraphs based on separator and maximum chunk
|
||||
length, using split text as parent block or entire document as parent
|
||||
block and directly retrieve.
|
||||
ja_JP: Split text into paragraphs based on separator and maximum chunk
|
||||
length, using split text as parent block or entire document as parent
|
||||
block and directly retrieve.
|
||||
ja_JP: セパレーターと最大チャンク長に基づいてテキストを段落に分割し、分割されたテキスト
|
||||
を親ブロックとして使用するか、文書全体を親ブロックとして使用して直接取得します。
|
||||
pt_BR: Dividir texto em parágrafos com base no separador e no comprimento
|
||||
máximo do bloco, usando o texto dividido como bloco pai ou documento
|
||||
completo como bloco pai e diretamente recuperá-lo.
|
||||
zh_Hans: 根据分隔符和最大块长度将文本拆分为段落,使用拆分文本作为检索的父块或整个文档用作父块并直接检索。
|
||||
label:
|
||||
en_US: Parent Mode
|
||||
ja_JP: Parent Mode
|
||||
ja_JP: 親子モード
|
||||
pt_BR: Modo Pai
|
||||
zh_Hans: 父块模式
|
||||
llm_description: Split text into paragraphs based on separator and maximum
|
||||
|
|
@ -268,14 +267,14 @@ workflow:
|
|||
- icon: ''
|
||||
label:
|
||||
en_US: Paragraph
|
||||
ja_JP: Paragraph
|
||||
ja_JP: 段落
|
||||
pt_BR: Parágrafo
|
||||
zh_Hans: 段落
|
||||
value: paragraph
|
||||
- icon: ''
|
||||
label:
|
||||
en_US: Full Document
|
||||
ja_JP: Full Document
|
||||
ja_JP: 全文
|
||||
pt_BR: Documento Completo
|
||||
zh_Hans: 全文
|
||||
value: full_doc
|
||||
|
|
@ -290,12 +289,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Whether to remove extra spaces in the text
|
||||
ja_JP: Whether to remove extra spaces in the text
|
||||
ja_JP: テキスト内の余分なスペースを削除するかどうか
|
||||
pt_BR: Se deve remover espaços extras no texto
|
||||
zh_Hans: 是否移除文本中的多余空格
|
||||
label:
|
||||
en_US: Remove Extra Spaces
|
||||
ja_JP: Remove Extra Spaces
|
||||
ja_JP: 余分なスペースを削除
|
||||
pt_BR: Remover Espaços Extras
|
||||
zh_Hans: 移除多余空格
|
||||
llm_description: Whether to remove extra spaces in the text
|
||||
|
|
@ -314,12 +313,12 @@ workflow:
|
|||
form: llm
|
||||
human_description:
|
||||
en_US: Whether to remove URLs and emails in the text
|
||||
ja_JP: Whether to remove URLs and emails in the text
|
||||
ja_JP: テキスト内のURLやメールアドレスを削除するかどうか
|
||||
pt_BR: Se deve remover URLs e e-mails no texto
|
||||
zh_Hans: 是否移除文本中的URL和电子邮件地址
|
||||
label:
|
||||
en_US: Remove URLs and Emails
|
||||
ja_JP: Remove URLs and Emails
|
||||
ja_JP: URLとメールアドレスを削除
|
||||
pt_BR: Remover URLs e E-mails
|
||||
zh_Hans: 移除URL和电子邮件地址
|
||||
llm_description: Whether to remove URLs and emails in the text
|
||||
|
|
|
|||
|
|
@ -1,6 +1,7 @@
|
|||
import json
|
||||
import logging
|
||||
from typing import Any, Union, cast
|
||||
from collections.abc import Mapping
|
||||
from typing import Any, Union
|
||||
|
||||
from yarl import URL
|
||||
|
||||
|
|
@ -39,7 +40,9 @@ class ToolTransformService:
|
|||
return str(url_prefix % {"tenant_id": tenant_id, "filename": filename})
|
||||
|
||||
@classmethod
|
||||
def get_tool_provider_icon_url(cls, provider_type: str, provider_name: str, icon: str | dict) -> Union[str, dict]:
|
||||
def get_tool_provider_icon_url(
|
||||
cls, provider_type: str, provider_name: str, icon: str | Mapping[str, str]
|
||||
) -> str | Mapping[str, str]:
|
||||
"""
|
||||
get tool provider icon url
|
||||
"""
|
||||
|
|
@ -52,7 +55,7 @@ class ToolTransformService:
|
|||
elif provider_type in {ToolProviderType.API.value, ToolProviderType.WORKFLOW.value}:
|
||||
try:
|
||||
if isinstance(icon, str):
|
||||
return cast(dict, json.loads(icon))
|
||||
return json.loads(icon)
|
||||
return icon
|
||||
except Exception:
|
||||
return {"background": "#252525", "content": "\ud83d\ude01"}
|
||||
|
|
@ -119,7 +122,7 @@ class ToolTransformService:
|
|||
name=provider_controller.entity.identity.name,
|
||||
description=provider_controller.entity.identity.description,
|
||||
icon=provider_controller.entity.identity.icon,
|
||||
icon_dark=provider_controller.entity.identity.icon_dark,
|
||||
icon_dark=provider_controller.entity.identity.icon_dark or "",
|
||||
label=provider_controller.entity.identity.label,
|
||||
type=ToolProviderType.BUILT_IN,
|
||||
masked_credentials={},
|
||||
|
|
@ -141,9 +144,10 @@ class ToolTransformService:
|
|||
)
|
||||
}
|
||||
|
||||
masked_creds = {}
|
||||
for name in schema:
|
||||
if result.masked_credentials:
|
||||
result.masked_credentials[name] = ""
|
||||
masked_creds[name] = ""
|
||||
result.masked_credentials = masked_creds
|
||||
|
||||
# check if the provider need credentials
|
||||
if not provider_controller.need_credentials:
|
||||
|
|
@ -221,7 +225,7 @@ class ToolTransformService:
|
|||
name=provider_controller.entity.identity.name,
|
||||
description=provider_controller.entity.identity.description,
|
||||
icon=provider_controller.entity.identity.icon,
|
||||
icon_dark=provider_controller.entity.identity.icon_dark,
|
||||
icon_dark=provider_controller.entity.identity.icon_dark or "",
|
||||
label=provider_controller.entity.identity.label,
|
||||
type=ToolProviderType.WORKFLOW,
|
||||
masked_credentials={},
|
||||
|
|
@ -334,7 +338,7 @@ class ToolTransformService:
|
|||
|
||||
@staticmethod
|
||||
def convert_tool_entity_to_api_entity(
|
||||
tool: Union[ApiToolBundle, WorkflowTool, Tool],
|
||||
tool: ApiToolBundle | WorkflowTool | Tool,
|
||||
tenant_id: str,
|
||||
labels: list[str] | None = None,
|
||||
) -> ToolApiEntity:
|
||||
|
|
@ -388,7 +392,7 @@ class ToolTransformService:
|
|||
parameters=merged_parameters,
|
||||
labels=labels or [],
|
||||
)
|
||||
elif isinstance(tool, ApiToolBundle):
|
||||
else:
|
||||
return ToolApiEntity(
|
||||
author=tool.author,
|
||||
name=tool.operation_id or "",
|
||||
|
|
@ -397,9 +401,6 @@ class ToolTransformService:
|
|||
parameters=tool.parameters,
|
||||
labels=labels or [],
|
||||
)
|
||||
else:
|
||||
# Handle WorkflowTool case
|
||||
raise ValueError(f"Unsupported tool type: {type(tool)}")
|
||||
|
||||
@staticmethod
|
||||
def convert_builtin_provider_to_credential_entity(
|
||||
|
|
|
|||
|
|
@ -563,12 +563,12 @@ class WorkflowService:
|
|||
# This will prevent validation errors from breaking the workflow
|
||||
return []
|
||||
|
||||
def get_default_block_configs(self) -> list[dict]:
|
||||
def get_default_block_configs(self) -> Sequence[Mapping[str, object]]:
|
||||
"""
|
||||
Get default block configs
|
||||
"""
|
||||
# return default block config
|
||||
default_block_configs = []
|
||||
default_block_configs: list[Mapping[str, object]] = []
|
||||
for node_class_mapping in NODE_TYPE_CLASSES_MAPPING.values():
|
||||
node_class = node_class_mapping[LATEST_VERSION]
|
||||
default_config = node_class.get_default_config()
|
||||
|
|
@ -577,7 +577,9 @@ class WorkflowService:
|
|||
|
||||
return default_block_configs
|
||||
|
||||
def get_default_block_config(self, node_type: str, filters: dict | None = None) -> dict | None:
|
||||
def get_default_block_config(
|
||||
self, node_type: str, filters: Mapping[str, object] | None = None
|
||||
) -> Mapping[str, object]:
|
||||
"""
|
||||
Get default config of node.
|
||||
:param node_type: node type
|
||||
|
|
@ -588,12 +590,12 @@ class WorkflowService:
|
|||
|
||||
# return default block config
|
||||
if node_type_enum not in NODE_TYPE_CLASSES_MAPPING:
|
||||
return None
|
||||
return {}
|
||||
|
||||
node_class = NODE_TYPE_CLASSES_MAPPING[node_type_enum][LATEST_VERSION]
|
||||
default_config = node_class.get_default_config(filters=filters)
|
||||
if not default_config:
|
||||
return None
|
||||
return {}
|
||||
|
||||
return default_config
|
||||
|
||||
|
|
@ -807,11 +809,13 @@ class WorkflowService:
|
|||
WorkflowNodeExecutionStatus.EXCEPTION,
|
||||
)
|
||||
error = node_run_result.error if not run_succeeded else None
|
||||
|
||||
return node, node_run_result, run_succeeded, error
|
||||
|
||||
except WorkflowNodeRunFailedError as e:
|
||||
return e._node, None, False, e._error # type: ignore
|
||||
node = e.node
|
||||
run_succeeded = False
|
||||
node_run_result = None
|
||||
error = e.error
|
||||
return node, node_run_result, run_succeeded, error
|
||||
|
||||
def _apply_error_strategy(self, node: Node, node_run_result: NodeRunResult) -> NodeRunResult:
|
||||
"""Apply error strategy when node execution fails."""
|
||||
|
|
|
|||
|
|
@ -89,6 +89,7 @@ def test_execute_code(setup_code_executor_mock):
|
|||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"type": "code",
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
|
|
@ -135,6 +136,7 @@ def test_execute_code_output_validator(setup_code_executor_mock):
|
|||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"type": "code",
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "string",
|
||||
|
|
@ -180,6 +182,7 @@ def test_execute_code_output_validator_depth():
|
|||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"type": "code",
|
||||
"outputs": {
|
||||
"string_validator": {
|
||||
"type": "string",
|
||||
|
|
@ -298,6 +301,7 @@ def test_execute_code_output_object_list():
|
|||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"type": "code",
|
||||
"outputs": {
|
||||
"object_list": {
|
||||
"type": "array[object]",
|
||||
|
|
@ -358,7 +362,8 @@ def test_execute_code_output_object_list():
|
|||
node._transform_result(result, node._node_data.outputs)
|
||||
|
||||
|
||||
def test_execute_code_scientific_notation():
|
||||
@pytest.mark.parametrize("setup_code_executor_mock", [["none"]], indirect=True)
|
||||
def test_execute_code_scientific_notation(setup_code_executor_mock):
|
||||
code = """
|
||||
def main():
|
||||
return {
|
||||
|
|
@ -370,6 +375,7 @@ def test_execute_code_scientific_notation():
|
|||
code_config = {
|
||||
"id": "code",
|
||||
"data": {
|
||||
"type": "code",
|
||||
"outputs": {
|
||||
"result": {
|
||||
"type": "number",
|
||||
|
|
|
|||
|
|
@ -77,6 +77,7 @@ def test_get(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -110,6 +111,7 @@ def test_no_auth(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -139,6 +141,7 @@ def test_custom_authorization_header(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -231,6 +234,7 @@ def test_bearer_authorization_with_custom_header_ignored(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -271,6 +275,7 @@ def test_basic_authorization_with_custom_header_ignored(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -310,6 +315,7 @@ def test_custom_authorization_with_empty_api_key(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -343,6 +349,7 @@ def test_template(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -378,6 +385,7 @@ def test_json(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "post",
|
||||
|
|
@ -420,6 +428,7 @@ def test_x_www_form_urlencoded(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "post",
|
||||
|
|
@ -467,6 +476,7 @@ def test_form_data(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "post",
|
||||
|
|
@ -517,6 +527,7 @@ def test_none_data(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "post",
|
||||
|
|
@ -550,6 +561,7 @@ def test_mock_404(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -579,6 +591,7 @@ def test_multi_colons_parse(setup_http_mock):
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
@ -635,6 +648,7 @@ def test_nested_object_variable_selector(setup_http_mock):
|
|||
{
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "http-request",
|
||||
"title": "http",
|
||||
"desc": "",
|
||||
"method": "get",
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ def test_execute_code(setup_code_executor_mock):
|
|||
config = {
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "template-transform",
|
||||
"title": "123",
|
||||
"variables": [
|
||||
{
|
||||
|
|
|
|||
|
|
@ -70,6 +70,7 @@ def test_tool_variable_invoke():
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "tool",
|
||||
"title": "a",
|
||||
"desc": "a",
|
||||
"provider_id": "time",
|
||||
|
|
@ -101,6 +102,7 @@ def test_tool_mixed_invoke():
|
|||
config={
|
||||
"id": "1",
|
||||
"data": {
|
||||
"type": "tool",
|
||||
"title": "a",
|
||||
"desc": "a",
|
||||
"provider_id": "time",
|
||||
|
|
|
|||
|
|
@ -454,7 +454,7 @@ class TestToolTransformService:
|
|||
name=fake.company(),
|
||||
description=I18nObject(en_US=fake.text(max_nb_chars=100)),
|
||||
icon='{"background": "#FF6B6B", "content": "🔧"}',
|
||||
icon_dark=None,
|
||||
icon_dark="",
|
||||
label=I18nObject(en_US=fake.company()),
|
||||
type=ToolProviderType.API,
|
||||
masked_credentials={},
|
||||
|
|
@ -473,8 +473,8 @@ class TestToolTransformService:
|
|||
assert provider.icon["background"] == "#FF6B6B"
|
||||
assert provider.icon["content"] == "🔧"
|
||||
|
||||
# Verify dark icon remains None
|
||||
assert provider.icon_dark is None
|
||||
# Verify dark icon remains empty string
|
||||
assert provider.icon_dark == ""
|
||||
|
||||
def test_builtin_provider_to_user_provider_success(
|
||||
self, db_session_with_containers, mock_external_service_dependencies
|
||||
|
|
@ -628,7 +628,7 @@ class TestToolTransformService:
|
|||
assert result is not None
|
||||
assert result.is_team_authorization is True
|
||||
assert result.allow_delete is False
|
||||
assert result.masked_credentials == {}
|
||||
assert result.masked_credentials == {"api_key": ""}
|
||||
|
||||
def test_api_provider_to_controller_success(self, db_session_with_containers, mock_external_service_dependencies):
|
||||
"""
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue