diff --git a/.gitignore b/.gitignore index 7bd919f095..e0db6b0a19 100644 --- a/.gitignore +++ b/.gitignore @@ -209,6 +209,7 @@ api/.vscode .history .idea/ +web/migration/ # pnpm /.pnpm-store diff --git a/api/controllers/console/app/generator.py b/api/controllers/console/app/generator.py index b4fc44767a..b13b94f67d 100644 --- a/api/controllers/console/app/generator.py +++ b/api/controllers/console/app/generator.py @@ -55,6 +55,35 @@ class InstructionTemplatePayload(BaseModel): type: str = Field(..., description="Instruction template type") +class ContextGeneratePayload(BaseModel): + """Payload for generating extractor code node.""" + + workflow_id: str = Field(..., description="Workflow ID") + node_id: str = Field(..., description="Current tool/llm node ID") + parameter_name: str = Field(..., description="Parameter name to generate code for") + language: str = Field(default="python3", description="Code language (python3/javascript)") + prompt_messages: list[dict[str, Any]] = Field( + ..., description="Multi-turn conversation history, last message is the current instruction" + ) + model_config_data: dict[str, Any] = Field(..., alias="model_config", description="Model configuration") + + +class SuggestedQuestionsPayload(BaseModel): + """Payload for generating suggested questions.""" + + workflow_id: str = Field(..., description="Workflow ID") + node_id: str = Field(..., description="Current tool/llm node ID") + parameter_name: str = Field(..., description="Parameter name") + language: str = Field( + default="English", description="Language for generated questions (e.g. English, Chinese, Japanese)" + ) + model_config_data: dict[str, Any] | None = Field( + default=None, + alias="model_config", + description="Model configuration (optional, uses system default if not provided)", + ) + + def reg(cls: type[BaseModel]): console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) @@ -64,6 +93,8 @@ reg(RuleCodeGeneratePayload) reg(RuleStructuredOutputPayload) reg(InstructionGeneratePayload) reg(InstructionTemplatePayload) +reg(ContextGeneratePayload) +reg(SuggestedQuestionsPayload) @console_ns.route("/rule-generate") @@ -278,3 +309,74 @@ class InstructionGenerationTemplateApi(Resource): return {"data": INSTRUCTION_GENERATE_TEMPLATE_CODE} case _: raise ValueError(f"Invalid type: {args.type}") + + +@console_ns.route("/context-generate") +class ContextGenerateApi(Resource): + @console_ns.doc("generate_with_context") + @console_ns.doc(description="Generate with multi-turn conversation context") + @console_ns.expect(console_ns.models[ContextGeneratePayload.__name__]) + @console_ns.response(200, "Content generated successfully") + @console_ns.response(400, "Invalid request parameters or workflow not found") + @console_ns.response(402, "Provider quota exceeded") + @setup_required + @login_required + @account_initialization_required + def post(self): + from core.llm_generator.utils import deserialize_prompt_messages + + args = ContextGeneratePayload.model_validate(console_ns.payload) + _, current_tenant_id = current_account_with_tenant() + + prompt_messages = deserialize_prompt_messages(args.prompt_messages) + + try: + return LLMGenerator.generate_with_context( + tenant_id=current_tenant_id, + workflow_id=args.workflow_id, + node_id=args.node_id, + parameter_name=args.parameter_name, + language=args.language, + prompt_messages=prompt_messages, + model_config=args.model_config_data, + ) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) + + +@console_ns.route("/context-generate/suggested-questions") +class SuggestedQuestionsApi(Resource): + @console_ns.doc("generate_suggested_questions") + @console_ns.doc(description="Generate suggested questions for context generation") + @console_ns.expect(console_ns.models[SuggestedQuestionsPayload.__name__]) + @console_ns.response(200, "Questions generated successfully") + @setup_required + @login_required + @account_initialization_required + def post(self): + args = SuggestedQuestionsPayload.model_validate(console_ns.payload) + _, current_tenant_id = current_account_with_tenant() + + try: + return LLMGenerator.generate_suggested_questions( + tenant_id=current_tenant_id, + workflow_id=args.workflow_id, + node_id=args.node_id, + parameter_name=args.parameter_name, + language=args.language, + model_config=args.model_config_data, + ) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + except QuotaExceededError: + raise ProviderQuotaExceededError() + except ModelCurrentlyNotSupportError: + raise ProviderModelCurrentlyNotSupportError() + except InvokeError as e: + raise CompletionRequestError(e.description) diff --git a/api/controllers/console/app/workflow.py b/api/controllers/console/app/workflow.py index b4f2ef0ba8..f3534b7e9a 100644 --- a/api/controllers/console/app/workflow.py +++ b/api/controllers/console/app/workflow.py @@ -46,6 +46,8 @@ from models.workflow import Workflow from services.app_generate_service import AppGenerateService from services.errors.app import WorkflowHashNotEqualError from services.errors.llm import InvokeRateLimitError +from services.workflow.entities import MentionGraphRequest, MentionParameterSchema +from services.workflow.mention_graph_service import MentionGraphService from services.workflow_service import DraftWorkflowDeletionError, WorkflowInUseError, WorkflowService logger = logging.getLogger(__name__) @@ -188,6 +190,15 @@ class DraftWorkflowTriggerRunAllPayload(BaseModel): node_ids: list[str] +class MentionGraphPayload(BaseModel): + """Request payload for generating mention graph.""" + + parent_node_id: str = Field(description="ID of the parent node that uses the extracted value") + parameter_key: str = Field(description="Key of the parameter being extracted") + context_source: list[str] = Field(description="Variable selector for the context source") + parameter_schema: dict[str, Any] = Field(description="Schema of the parameter to extract") + + def reg(cls: type[BaseModel]): console_ns.schema_model(cls.__name__, cls.model_json_schema(ref_template=DEFAULT_REF_TEMPLATE_SWAGGER_2_0)) @@ -205,6 +216,7 @@ reg(WorkflowListQuery) reg(WorkflowUpdatePayload) reg(DraftWorkflowTriggerRunPayload) reg(DraftWorkflowTriggerRunAllPayload) +reg(MentionGraphPayload) # TODO(QuantumGhost): Refactor existing node run API to handle file parameter parsing @@ -1166,3 +1178,54 @@ class DraftWorkflowTriggerRunAllApi(Resource): "status": "error", } ), 400 + + +@console_ns.route("/apps//workflows/draft/mention-graph") +class MentionGraphApi(Resource): + """ + API for generating Mention LLM node graph structures. + + This endpoint creates a complete graph structure containing an LLM node + configured to extract values from list[PromptMessage] variables. + """ + + @console_ns.doc("generate_mention_graph") + @console_ns.doc(description="Generate a Mention LLM node graph structure") + @console_ns.doc(params={"app_id": "Application ID"}) + @console_ns.expect(console_ns.models[MentionGraphPayload.__name__]) + @console_ns.response(200, "Mention graph generated successfully") + @console_ns.response(400, "Invalid request parameters") + @console_ns.response(403, "Permission denied") + @setup_required + @login_required + @account_initialization_required + @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) + @edit_permission_required + def post(self, app_model: App): + """ + Generate a Mention LLM node graph structure. + + Returns a complete graph structure containing a single LLM node + configured for extracting values from list[PromptMessage] context. + """ + + payload = MentionGraphPayload.model_validate(console_ns.payload or {}) + + parameter_schema = MentionParameterSchema( + name=payload.parameter_schema.get("name", payload.parameter_key), + type=payload.parameter_schema.get("type", "string"), + description=payload.parameter_schema.get("description", ""), + ) + + request = MentionGraphRequest( + parent_node_id=payload.parent_node_id, + parameter_key=payload.parameter_key, + context_source=payload.context_source, + parameter_schema=parameter_schema, + ) + + with Session(db.engine) as session: + service = MentionGraphService(session) + response = service.generate_mention_graph(tenant_id=app_model.tenant_id, request=request) + + return response.model_dump() diff --git a/api/controllers/console/app/workflow_draft_variable.py b/api/controllers/console/app/workflow_draft_variable.py index 3382b65acc..3ff388d330 100644 --- a/api/controllers/console/app/workflow_draft_variable.py +++ b/api/controllers/console/app/workflow_draft_variable.py @@ -17,7 +17,7 @@ from controllers.console.wraps import account_initialization_required, edit_perm from controllers.web.error import InvalidArgumentError, NotFoundError from core.file import helpers as file_helpers from core.variables.segment_group import SegmentGroup -from core.variables.segments import ArrayFileSegment, FileSegment, Segment +from core.variables.segments import ArrayFileSegment, ArrayPromptMessageSegment, FileSegment, Segment from core.variables.types import SegmentType from core.workflow.constants import CONVERSATION_VARIABLE_NODE_ID, SYSTEM_VARIABLE_NODE_ID from extensions.ext_database import db @@ -58,6 +58,8 @@ def _convert_values_to_json_serializable_object(value: Segment): return value.value.model_dump() elif isinstance(value, ArrayFileSegment): return [i.model_dump() for i in value.value] + elif isinstance(value, ArrayPromptMessageSegment): + return value.to_object() elif isinstance(value, SegmentGroup): return [_convert_values_to_json_serializable_object(i) for i in value.value] else: diff --git a/api/core/app/apps/advanced_chat/generate_response_converter.py b/api/core/app/apps/advanced_chat/generate_response_converter.py index 02ec96f209..2c3df9e910 100644 --- a/api/core/app/apps/advanced_chat/generate_response_converter.py +++ b/api/core/app/apps/advanced_chat/generate_response_converter.py @@ -82,7 +82,7 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk @classmethod @@ -110,7 +110,7 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): } if isinstance(sub_stream_response, MessageEndStreamResponse): - sub_stream_response_dict = sub_stream_response.model_dump(mode="json") + sub_stream_response_dict = sub_stream_response.model_dump(mode="json", exclude_none=True) metadata = sub_stream_response_dict.get("metadata", {}) sub_stream_response_dict["metadata"] = cls._get_simple_metadata(metadata) response_chunk.update(sub_stream_response_dict) @@ -120,6 +120,6 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): elif isinstance(sub_stream_response, NodeStartStreamResponse | NodeFinishStreamResponse): response_chunk.update(sub_stream_response.to_ignore_detail_dict()) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk diff --git a/api/core/app/apps/agent_chat/generate_response_converter.py b/api/core/app/apps/agent_chat/generate_response_converter.py index e35e9d9408..f328425fb7 100644 --- a/api/core/app/apps/agent_chat/generate_response_converter.py +++ b/api/core/app/apps/agent_chat/generate_response_converter.py @@ -81,7 +81,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk @classmethod @@ -109,7 +109,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): } if isinstance(sub_stream_response, MessageEndStreamResponse): - sub_stream_response_dict = sub_stream_response.model_dump(mode="json") + sub_stream_response_dict = sub_stream_response.model_dump(mode="json", exclude_none=True) metadata = sub_stream_response_dict.get("metadata", {}) sub_stream_response_dict["metadata"] = cls._get_simple_metadata(metadata) response_chunk.update(sub_stream_response_dict) @@ -117,6 +117,6 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk diff --git a/api/core/app/apps/chat/generate_response_converter.py b/api/core/app/apps/chat/generate_response_converter.py index 3aa1161fd8..da02f6b750 100644 --- a/api/core/app/apps/chat/generate_response_converter.py +++ b/api/core/app/apps/chat/generate_response_converter.py @@ -81,7 +81,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk @classmethod @@ -109,7 +109,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): } if isinstance(sub_stream_response, MessageEndStreamResponse): - sub_stream_response_dict = sub_stream_response.model_dump(mode="json") + sub_stream_response_dict = sub_stream_response.model_dump(mode="json", exclude_none=True) metadata = sub_stream_response_dict.get("metadata", {}) sub_stream_response_dict["metadata"] = cls._get_simple_metadata(metadata) response_chunk.update(sub_stream_response_dict) @@ -117,6 +117,6 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 0f3f9972c3..2ed4ca1d8c 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -70,6 +70,8 @@ class _NodeSnapshot: """Empty string means the node is not executing inside an iteration.""" loop_id: str = "" """Empty string means the node is not executing inside a loop.""" + mention_parent_id: str = "" + """Empty string means the node is not an extractor node.""" class WorkflowResponseConverter: @@ -131,6 +133,7 @@ class WorkflowResponseConverter: start_at=event.start_at, iteration_id=event.in_iteration_id or "", loop_id=event.in_loop_id or "", + mention_parent_id=event.in_mention_parent_id or "", ) node_execution_id = NodeExecutionId(event.node_execution_id) self._node_snapshots[node_execution_id] = snapshot @@ -287,6 +290,7 @@ class WorkflowResponseConverter: created_at=int(snapshot.start_at.timestamp()), iteration_id=event.in_iteration_id, loop_id=event.in_loop_id, + mention_parent_id=event.in_mention_parent_id, agent_strategy=event.agent_strategy, ), ) @@ -373,6 +377,7 @@ class WorkflowResponseConverter: files=self.fetch_files_from_node_outputs(event.outputs or {}), iteration_id=event.in_iteration_id, loop_id=event.in_loop_id, + mention_parent_id=event.in_mention_parent_id, ), ) @@ -422,6 +427,7 @@ class WorkflowResponseConverter: files=self.fetch_files_from_node_outputs(event.outputs or {}), iteration_id=event.in_iteration_id, loop_id=event.in_loop_id, + mention_parent_id=event.in_mention_parent_id, retry_index=event.retry_index, ), ) diff --git a/api/core/app/apps/completion/generate_response_converter.py b/api/core/app/apps/completion/generate_response_converter.py index a4f574642d..cff0235b66 100644 --- a/api/core/app/apps/completion/generate_response_converter.py +++ b/api/core/app/apps/completion/generate_response_converter.py @@ -79,7 +79,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk @classmethod @@ -106,7 +106,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): } if isinstance(sub_stream_response, MessageEndStreamResponse): - sub_stream_response_dict = sub_stream_response.model_dump(mode="json") + sub_stream_response_dict = sub_stream_response.model_dump(mode="json", exclude_none=True) metadata = sub_stream_response_dict.get("metadata", {}) if not isinstance(metadata, dict): metadata = {} @@ -116,6 +116,6 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk diff --git a/api/core/app/apps/pipeline/generate_response_converter.py b/api/core/app/apps/pipeline/generate_response_converter.py index cfacd8640d..d1aee51293 100644 --- a/api/core/app/apps/pipeline/generate_response_converter.py +++ b/api/core/app/apps/pipeline/generate_response_converter.py @@ -60,7 +60,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(cast(dict, data)) else: - response_chunk.update(sub_stream_response.model_dump()) + response_chunk.update(sub_stream_response.model_dump(exclude_none=True)) yield response_chunk @classmethod @@ -91,5 +91,5 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): elif isinstance(sub_stream_response, NodeStartStreamResponse | NodeFinishStreamResponse): response_chunk.update(cast(dict, sub_stream_response.to_ignore_detail_dict())) else: - response_chunk.update(sub_stream_response.model_dump()) + response_chunk.update(sub_stream_response.model_dump(exclude_none=True)) yield response_chunk diff --git a/api/core/app/apps/workflow/generate_response_converter.py b/api/core/app/apps/workflow/generate_response_converter.py index c64f44a603..6d774be6f7 100644 --- a/api/core/app/apps/workflow/generate_response_converter.py +++ b/api/core/app/apps/workflow/generate_response_converter.py @@ -60,7 +60,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): data = cls._error_to_stream_response(sub_stream_response.err) response_chunk.update(data) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk @classmethod @@ -91,5 +91,5 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): elif isinstance(sub_stream_response, NodeStartStreamResponse | NodeFinishStreamResponse): response_chunk.update(sub_stream_response.to_ignore_detail_dict()) else: - response_chunk.update(sub_stream_response.model_dump(mode="json")) + response_chunk.update(sub_stream_response.model_dump(mode="json", exclude_none=True)) yield response_chunk diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index b306a0376e..900d4dd657 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -385,6 +385,7 @@ class WorkflowBasedAppRunner: start_at=event.start_at, in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + in_mention_parent_id=event.in_mention_parent_id, inputs=inputs, process_data=process_data, outputs=outputs, @@ -405,6 +406,7 @@ class WorkflowBasedAppRunner: start_at=event.start_at, in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + in_mention_parent_id=event.in_mention_parent_id, agent_strategy=event.agent_strategy, provider_type=event.provider_type, provider_id=event.provider_id, @@ -428,6 +430,7 @@ class WorkflowBasedAppRunner: execution_metadata=execution_metadata, in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + in_mention_parent_id=event.in_mention_parent_id, ) ) elif isinstance(event, NodeRunFailedEvent): @@ -444,6 +447,7 @@ class WorkflowBasedAppRunner: execution_metadata=event.node_run_result.metadata, in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + in_mention_parent_id=event.in_mention_parent_id, ) ) elif isinstance(event, NodeRunExceptionEvent): @@ -460,6 +464,7 @@ class WorkflowBasedAppRunner: execution_metadata=event.node_run_result.metadata, in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + in_mention_parent_id=event.in_mention_parent_id, ) ) elif isinstance(event, NodeRunStreamChunkEvent): @@ -477,6 +482,7 @@ class WorkflowBasedAppRunner: chunk_type=QueueChunkType(event.chunk_type.value), tool_call=event.tool_call, tool_result=event.tool_result, + in_mention_parent_id=event.in_mention_parent_id, ) ) elif isinstance(event, NodeRunRetrieverResourceEvent): @@ -485,6 +491,7 @@ class WorkflowBasedAppRunner: retriever_resources=event.retriever_resources, in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + in_mention_parent_id=event.in_mention_parent_id, ) ) elif isinstance(event, NodeRunAgentLogEvent): diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index fdc4014caa..e3a21bac56 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -201,6 +201,8 @@ class QueueTextChunkEvent(AppQueueEvent): """iteration id if node is in iteration""" in_loop_id: str | None = None """loop id if node is in loop""" + in_mention_parent_id: str | None = None + """parent node id if this is an extractor node event""" # Extended fields for Agent/Tool streaming chunk_type: ChunkType = ChunkType.TEXT @@ -250,6 +252,8 @@ class QueueRetrieverResourcesEvent(AppQueueEvent): """iteration id if node is in iteration""" in_loop_id: str | None = None """loop id if node is in loop""" + in_mention_parent_id: str | None = None + """parent node id if this is an extractor node event""" class QueueAnnotationReplyEvent(AppQueueEvent): @@ -327,6 +331,8 @@ class QueueNodeStartedEvent(AppQueueEvent): node_run_index: int = 1 # FIXME(-LAN-): may not used in_iteration_id: str | None = None in_loop_id: str | None = None + in_mention_parent_id: str | None = None + """parent node id if this is an extractor node event""" start_at: datetime agent_strategy: AgentNodeStrategyInit | None = None @@ -349,6 +355,8 @@ class QueueNodeSucceededEvent(AppQueueEvent): """iteration id if node is in iteration""" in_loop_id: str | None = None """loop id if node is in loop""" + in_mention_parent_id: str | None = None + """parent node id if this is an extractor node event""" start_at: datetime inputs: Mapping[str, object] = Field(default_factory=dict) @@ -404,6 +412,8 @@ class QueueNodeExceptionEvent(AppQueueEvent): """iteration id if node is in iteration""" in_loop_id: str | None = None """loop id if node is in loop""" + in_mention_parent_id: str | None = None + """parent node id if this is an extractor node event""" start_at: datetime inputs: Mapping[str, object] = Field(default_factory=dict) @@ -428,6 +438,8 @@ class QueueNodeFailedEvent(AppQueueEvent): """iteration id if node is in iteration""" in_loop_id: str | None = None """loop id if node is in loop""" + in_mention_parent_id: str | None = None + """parent node id if this is an extractor node event""" start_at: datetime inputs: Mapping[str, object] = Field(default_factory=dict) diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 0998510b60..60af3256bd 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -294,6 +294,7 @@ class NodeStartStreamResponse(StreamResponse): extras: dict[str, object] = Field(default_factory=dict) iteration_id: str | None = None loop_id: str | None = None + mention_parent_id: str | None = None agent_strategy: AgentNodeStrategyInit | None = None event: StreamEvent = StreamEvent.NODE_STARTED @@ -317,6 +318,7 @@ class NodeStartStreamResponse(StreamResponse): "extras": {}, "iteration_id": self.data.iteration_id, "loop_id": self.data.loop_id, + "mention_parent_id": self.data.mention_parent_id, }, } @@ -352,6 +354,7 @@ class NodeFinishStreamResponse(StreamResponse): files: Sequence[Mapping[str, Any]] | None = [] iteration_id: str | None = None loop_id: str | None = None + mention_parent_id: str | None = None event: StreamEvent = StreamEvent.NODE_FINISHED workflow_run_id: str @@ -381,6 +384,7 @@ class NodeFinishStreamResponse(StreamResponse): "files": [], "iteration_id": self.data.iteration_id, "loop_id": self.data.loop_id, + "mention_parent_id": self.data.mention_parent_id, }, } @@ -416,6 +420,7 @@ class NodeRetryStreamResponse(StreamResponse): files: Sequence[Mapping[str, Any]] | None = [] iteration_id: str | None = None loop_id: str | None = None + mention_parent_id: str | None = None retry_index: int = 0 event: StreamEvent = StreamEvent.NODE_RETRY @@ -446,6 +451,7 @@ class NodeRetryStreamResponse(StreamResponse): "files": [], "iteration_id": self.data.iteration_id, "loop_id": self.data.loop_id, + "mention_parent_id": self.data.mention_parent_id, "retry_index": self.data.retry_index, }, } diff --git a/api/core/file/file_manager.py b/api/core/file/file_manager.py index 120fb73cdb..93c1a9be99 100644 --- a/api/core/file/file_manager.py +++ b/api/core/file/file_manager.py @@ -1,4 +1,5 @@ import base64 +import logging from collections.abc import Mapping from configs import dify_config @@ -10,7 +11,10 @@ from core.model_runtime.entities import ( TextPromptMessageContent, VideoPromptMessageContent, ) -from core.model_runtime.entities.message_entities import PromptMessageContentUnionTypes +from core.model_runtime.entities.message_entities import ( + MultiModalPromptMessageContent, + PromptMessageContentUnionTypes, +) from core.tools.signature import sign_tool_file from extensions.ext_storage import storage @@ -18,6 +22,8 @@ from . import helpers from .enums import FileAttribute from .models import File, FileTransferMethod, FileType +logger = logging.getLogger(__name__) + def get_attr(*, file: File, attr: FileAttribute): match attr: @@ -89,6 +95,8 @@ def to_prompt_message_content( "format": f.extension.removeprefix("."), "mime_type": f.mime_type, "filename": f.filename or "", + # Encoded file reference for context restoration: "transfer_method:related_id" or "remote:url" + "file_ref": _encode_file_ref(f), } if f.type == FileType.IMAGE: params["detail"] = image_detail_config or ImagePromptMessageContent.DETAIL.LOW @@ -96,6 +104,17 @@ def to_prompt_message_content( return prompt_class_map[f.type].model_validate(params) +def _encode_file_ref(f: File) -> str | None: + """Encode file reference as 'transfer_method:id_or_url' string.""" + if f.transfer_method == FileTransferMethod.REMOTE_URL: + return f"remote:{f.remote_url}" if f.remote_url else None + elif f.transfer_method == FileTransferMethod.LOCAL_FILE: + return f"local:{f.related_id}" if f.related_id else None + elif f.transfer_method == FileTransferMethod.TOOL_FILE: + return f"tool:{f.related_id}" if f.related_id else None + return None + + def download(f: File, /): if f.transfer_method in ( FileTransferMethod.TOOL_FILE, @@ -164,3 +183,128 @@ def _to_url(f: File, /): return sign_tool_file(tool_file_id=f.related_id, extension=f.extension) else: raise ValueError(f"Unsupported transfer method: {f.transfer_method}") + + +def restore_multimodal_content( + content: MultiModalPromptMessageContent, +) -> MultiModalPromptMessageContent: + """ + Restore base64_data or url for multimodal content from file_ref. + + file_ref format: "transfer_method:id_or_url" (e.g., "local:abc123", "remote:https://...") + + Args: + content: MultiModalPromptMessageContent with file_ref field + + Returns: + MultiModalPromptMessageContent with restored base64_data or url + """ + # Skip if no file reference or content already has data + if not content.file_ref: + return content + if content.base64_data or content.url: + return content + + try: + file = _build_file_from_ref( + file_ref=content.file_ref, + file_format=content.format, + mime_type=content.mime_type, + filename=content.filename, + ) + if not file: + return content + + # Restore content based on config + if dify_config.MULTIMODAL_SEND_FORMAT == "base64": + restored_base64 = _get_encoded_string(file) + return content.model_copy(update={"base64_data": restored_base64}) + else: + restored_url = _to_url(file) + return content.model_copy(update={"url": restored_url}) + + except Exception as e: + logger.warning("Failed to restore multimodal content: %s", e) + return content + + +def _build_file_from_ref( + file_ref: str, + file_format: str | None, + mime_type: str | None, + filename: str | None, +) -> File | None: + """ + Build a File object from encoded file_ref string. + + Args: + file_ref: Encoded reference "transfer_method:id_or_url" + file_format: The file format/extension (without dot) + mime_type: The mime type + filename: The filename + + Returns: + File object with storage_key loaded, or None if not found + """ + from sqlalchemy import select + from sqlalchemy.orm import Session + + from extensions.ext_database import db + from models.model import UploadFile + from models.tools import ToolFile + + # Parse file_ref: "method:value" + if ":" not in file_ref: + logger.warning("Invalid file_ref format: %s", file_ref) + return None + + method, value = file_ref.split(":", 1) + extension = f".{file_format}" if file_format else None + + if method == "remote": + return File( + tenant_id="", + type=FileType.IMAGE, + transfer_method=FileTransferMethod.REMOTE_URL, + remote_url=value, + extension=extension, + mime_type=mime_type, + filename=filename, + storage_key="", + ) + + # Query database for storage_key + with Session(db.engine) as session: + if method == "local": + stmt = select(UploadFile).where(UploadFile.id == value) + upload_file = session.scalar(stmt) + if upload_file: + return File( + tenant_id=upload_file.tenant_id, + type=FileType(upload_file.extension) + if hasattr(FileType, upload_file.extension.upper()) + else FileType.IMAGE, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id=value, + extension=extension or ("." + upload_file.extension if upload_file.extension else None), + mime_type=mime_type or upload_file.mime_type, + filename=filename or upload_file.name, + storage_key=upload_file.key, + ) + elif method == "tool": + stmt = select(ToolFile).where(ToolFile.id == value) + tool_file = session.scalar(stmt) + if tool_file: + return File( + tenant_id=tool_file.tenant_id, + type=FileType.IMAGE, + transfer_method=FileTransferMethod.TOOL_FILE, + related_id=value, + extension=extension, + mime_type=mime_type or tool_file.mimetype, + filename=filename or tool_file.name, + storage_key=tool_file.file_key, + ) + + logger.warning("File not found for file_ref: %s", file_ref) + return None diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index be1e306d47..fd769f6a83 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -1,8 +1,8 @@ import json import logging import re -from collections.abc import Sequence -from typing import Protocol, cast +from collections.abc import Mapping, Sequence +from typing import Any, Protocol, cast import json_repair @@ -393,6 +393,488 @@ class LLMGenerator: logger.exception("Failed to invoke LLM model, model: %s", model_config.get("name")) return {"output": "", "error": f"An unexpected error occurred: {str(e)}"} + @classmethod + def generate_with_context( + cls, + tenant_id: str, + workflow_id: str, + node_id: str, + parameter_name: str, + language: str, + prompt_messages: list[PromptMessage], + model_config: dict, + ) -> dict: + """ + Generate extractor code node based on conversation context. + + Args: + tenant_id: Tenant/workspace ID + workflow_id: Workflow ID + node_id: Current tool/llm node ID + parameter_name: Parameter name to generate code for + language: Code language (python3/javascript) + prompt_messages: Multi-turn conversation history (last message is instruction) + model_config: Model configuration (provider, name, completion_params) + + Returns: + dict with CodeNodeData format: + - variables: Input variable selectors + - code_language: Code language + - code: Generated code + - outputs: Output definitions + - message: Explanation + - error: Error message if any + """ + from sqlalchemy import select + from sqlalchemy.orm import Session + + from services.workflow_service import WorkflowService + + # Get workflow + with Session(db.engine) as session: + stmt = select(App).where(App.id == workflow_id) + app = session.scalar(stmt) + if not app: + return cls._error_response(f"App {workflow_id} not found") + + workflow = WorkflowService().get_draft_workflow(app_model=app) + if not workflow: + return cls._error_response(f"Workflow for app {workflow_id} not found") + + # Get upstream nodes via edge backtracking + upstream_nodes = cls._get_upstream_nodes(workflow.graph_dict, node_id) + + # Get current node info + current_node = cls._get_node_by_id(workflow.graph_dict, node_id) + if not current_node: + return cls._error_response(f"Node {node_id} not found") + + # Get parameter info + parameter_info = cls._get_parameter_info( + tenant_id=tenant_id, + node_data=current_node.get("data", {}), + parameter_name=parameter_name, + ) + + # Build system prompt + system_prompt = cls._build_extractor_system_prompt( + upstream_nodes=upstream_nodes, + current_node=current_node, + parameter_info=parameter_info, + language=language, + ) + + # Construct complete prompt_messages with system prompt + complete_messages: list[PromptMessage] = [ + SystemPromptMessage(content=system_prompt), + *prompt_messages, + ] + + from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output + + # Get model instance and schema + provider = model_config.get("provider", "") + model_name = model_config.get("name", "") + model_instance = ModelManager().get_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + provider=provider, + model=model_name, + ) + + model_schema = model_instance.model_type_instance.get_model_schema(model_name, model_instance.credentials) + if not model_schema: + return cls._error_response(f"Model schema not found for {model_name}") + + model_parameters = model_config.get("completion_params", {}) + json_schema = cls._get_code_node_json_schema() + + try: + response = invoke_llm_with_structured_output( + provider=provider, + model_schema=model_schema, + model_instance=model_instance, + prompt_messages=complete_messages, + json_schema=json_schema, + model_parameters=model_parameters, + stream=False, + tenant_id=tenant_id, + ) + + return cls._parse_code_node_output( + response.structured_output, language, parameter_info.get("type", "string") + ) + + except InvokeError as e: + return cls._error_response(str(e)) + except Exception as e: + logger.exception("Failed to generate with context, model: %s", model_config.get("name")) + return cls._error_response(f"An unexpected error occurred: {str(e)}") + + @classmethod + def _error_response(cls, error: str) -> dict: + """Return error response in CodeNodeData format.""" + return { + "variables": [], + "code_language": "python3", + "code": "", + "outputs": {}, + "message": "", + "error": error, + } + + @classmethod + def generate_suggested_questions( + cls, + tenant_id: str, + workflow_id: str, + node_id: str, + parameter_name: str, + language: str, + model_config: dict | None = None, + ) -> dict: + """ + Generate suggested questions for context generation. + + Returns dict with questions array and error field. + """ + from sqlalchemy import select + from sqlalchemy.orm import Session + + from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output + from services.workflow_service import WorkflowService + + # Get workflow context (reuse existing logic) + with Session(db.engine) as session: + stmt = select(App).where(App.id == workflow_id) + app = session.scalar(stmt) + if not app: + return {"questions": [], "error": f"App {workflow_id} not found"} + + workflow = WorkflowService().get_draft_workflow(app_model=app) + if not workflow: + return {"questions": [], "error": f"Workflow for app {workflow_id} not found"} + + upstream_nodes = cls._get_upstream_nodes(workflow.graph_dict, node_id) + current_node = cls._get_node_by_id(workflow.graph_dict, node_id) + if not current_node: + return {"questions": [], "error": f"Node {node_id} not found"} + + parameter_info = cls._get_parameter_info( + tenant_id=tenant_id, + node_data=current_node.get("data", {}), + parameter_name=parameter_name, + ) + + # Build prompt + system_prompt = cls._build_suggested_questions_prompt( + upstream_nodes=upstream_nodes, + current_node=current_node, + parameter_info=parameter_info, + language=language, + ) + + prompt_messages: list[PromptMessage] = [ + SystemPromptMessage(content=system_prompt), + ] + + # Get model instance - use default if model_config not provided + model_manager = ModelManager() + if model_config: + provider = model_config.get("provider", "") + model_name = model_config.get("name", "") + model_instance = model_manager.get_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + provider=provider, + model=model_name, + ) + else: + model_instance = model_manager.get_default_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + ) + model_name = model_instance.model + + model_schema = model_instance.model_type_instance.get_model_schema(model_name, model_instance.credentials) + if not model_schema: + return {"questions": [], "error": f"Model schema not found for {model_name}"} + + completion_params = model_config.get("completion_params", {}) if model_config else {} + model_parameters = {**completion_params, "max_tokens": 256} + json_schema = cls._get_suggested_questions_json_schema() + + try: + response = invoke_llm_with_structured_output( + provider=model_instance.provider, + model_schema=model_schema, + model_instance=model_instance, + prompt_messages=prompt_messages, + json_schema=json_schema, + model_parameters=model_parameters, + stream=False, + tenant_id=tenant_id, + ) + + questions = response.structured_output.get("questions", []) if response.structured_output else [] + return {"questions": questions, "error": ""} + + except InvokeError as e: + return {"questions": [], "error": str(e)} + except Exception as e: + logger.exception("Failed to generate suggested questions, model: %s", model_name) + return {"questions": [], "error": f"An unexpected error occurred: {str(e)}"} + + @classmethod + def _build_suggested_questions_prompt( + cls, + upstream_nodes: list[dict], + current_node: dict, + parameter_info: dict, + language: str = "English", + ) -> str: + """Build minimal prompt for suggested questions generation.""" + # Simplify upstream nodes to reduce tokens + sources = [f"{n['title']}({','.join(n.get('outputs', {}).keys())})" for n in upstream_nodes[:5]] + param_type = parameter_info.get("type", "string") + param_desc = parameter_info.get("description", "")[:100] + + return f"""Suggest 3 code generation questions for extracting data. +Sources: {", ".join(sources)} +Target: {parameter_info.get("name")}({param_type}) - {param_desc} +Output 3 short, practical questions in {language}.""" + + @classmethod + def _get_suggested_questions_json_schema(cls) -> dict: + """Return JSON Schema for suggested questions.""" + return { + "type": "object", + "properties": { + "questions": { + "type": "array", + "items": {"type": "string"}, + "minItems": 3, + "maxItems": 3, + "description": "3 suggested questions", + }, + }, + "required": ["questions"], + } + + @classmethod + def _get_code_node_json_schema(cls) -> dict: + """Return JSON Schema for structured output.""" + return { + "type": "object", + "properties": { + "variables": { + "type": "array", + "items": { + "type": "object", + "properties": { + "variable": {"type": "string", "description": "Variable name in code"}, + "value_selector": { + "type": "array", + "items": {"type": "string"}, + "description": "Path like [node_id, output_name]", + }, + }, + "required": ["variable", "value_selector"], + }, + }, + "code": {"type": "string", "description": "Generated code with main function"}, + "outputs": { + "type": "object", + "additionalProperties": { + "type": "object", + "properties": {"type": {"type": "string"}}, + }, + "description": "Output definitions, key is output name", + }, + "explanation": {"type": "string", "description": "Brief explanation of the code"}, + }, + "required": ["variables", "code", "outputs", "explanation"], + } + + @classmethod + def _get_upstream_nodes(cls, graph_dict: Mapping[str, Any], node_id: str) -> list[dict]: + """ + Get all upstream nodes via edge backtracking. + + Traverses the graph backwards from node_id to collect all reachable nodes. + """ + from collections import defaultdict + + nodes = {n["id"]: n for n in graph_dict.get("nodes", [])} + edges = graph_dict.get("edges", []) + + # Build reverse adjacency list + reverse_adj: dict[str, list[str]] = defaultdict(list) + for edge in edges: + reverse_adj[edge["target"]].append(edge["source"]) + + # BFS to find all upstream nodes + visited: set[str] = set() + queue = [node_id] + upstream: list[dict] = [] + + while queue: + current = queue.pop(0) + for source in reverse_adj.get(current, []): + if source not in visited: + visited.add(source) + queue.append(source) + if source in nodes: + upstream.append(cls._extract_node_info(nodes[source])) + + return upstream + + @classmethod + def _get_node_by_id(cls, graph_dict: Mapping[str, Any], node_id: str) -> dict | None: + """Get node by ID from graph.""" + for node in graph_dict.get("nodes", []): + if node["id"] == node_id: + return node + return None + + @classmethod + def _extract_node_info(cls, node: dict) -> dict: + """Extract minimal node info with outputs based on node type.""" + node_type = node["data"]["type"] + node_data = node.get("data", {}) + + # Build outputs based on node type (only type, no description to reduce tokens) + outputs: dict[str, str] = {} + match node_type: + case "start": + for var in node_data.get("variables", []): + name = var.get("variable", var.get("name", "")) + outputs[name] = var.get("type", "string") + case "llm": + outputs["text"] = "string" + case "code": + for name, output in node_data.get("outputs", {}).items(): + outputs[name] = output.get("type", "string") + case "http-request": + outputs = {"body": "string", "status_code": "number", "headers": "object"} + case "knowledge-retrieval": + outputs["result"] = "array[object]" + case "tool": + outputs = {"text": "string", "json": "object"} + case _: + outputs["output"] = "string" + + info: dict = { + "id": node["id"], + "title": node_data.get("title", node["id"]), + "outputs": outputs, + } + # Only include description if not empty + desc = node_data.get("desc", "") + if desc: + info["desc"] = desc + + return info + + @classmethod + def _get_parameter_info(cls, tenant_id: str, node_data: dict, parameter_name: str) -> dict: + """Get parameter info from tool schema using ToolManager.""" + default_info = {"name": parameter_name, "type": "string", "description": ""} + + if node_data.get("type") != "tool": + return default_info + + try: + from core.app.entities.app_invoke_entities import InvokeFrom + from core.tools.entities.tool_entities import ToolProviderType + from core.tools.tool_manager import ToolManager + + provider_type_str = node_data.get("provider_type", "") + provider_type = ToolProviderType(provider_type_str) if provider_type_str else ToolProviderType.BUILT_IN + + tool_runtime = ToolManager.get_tool_runtime( + provider_type=provider_type, + provider_id=node_data.get("provider_id", ""), + tool_name=node_data.get("tool_name", ""), + tenant_id=tenant_id, + invoke_from=InvokeFrom.DEBUGGER, + ) + + parameters = tool_runtime.get_merged_runtime_parameters() + for param in parameters: + if param.name == parameter_name: + return { + "name": param.name, + "type": param.type.value if hasattr(param.type, "value") else str(param.type), + "description": param.llm_description + or (param.human_description.en_US if param.human_description else ""), + "required": param.required, + } + except Exception as e: + logger.debug("Failed to get parameter info from ToolManager: %s", e) + + return default_info + + @classmethod + def _build_extractor_system_prompt( + cls, + upstream_nodes: list[dict], + current_node: dict, + parameter_info: dict, + language: str, + ) -> str: + """Build system prompt for extractor code generation.""" + upstream_json = json.dumps(upstream_nodes, indent=2, ensure_ascii=False) + param_type = parameter_info.get("type", "string") + return f"""You are a code generator for workflow automation. + +Generate {language} code to extract/transform upstream node outputs for the target parameter. + +## Upstream Nodes +{upstream_json} + +## Target +Node: {current_node["data"].get("title", current_node["id"])} +Parameter: {parameter_info.get("name")} ({param_type}) - {parameter_info.get("description", "")} + +## Requirements +- Write a main function that returns type: {param_type} +- Use value_selector format: ["node_id", "output_name"] +""" + + @classmethod + def _parse_code_node_output(cls, content: Mapping[str, Any] | None, language: str, parameter_type: str) -> dict: + """ + Parse structured output to CodeNodeData format. + + Args: + content: Structured output dict from invoke_llm_with_structured_output + language: Code language + parameter_type: Expected parameter type + + Returns dict with variables, code_language, code, outputs, message, error. + """ + if content is None: + return cls._error_response("Empty or invalid response from LLM") + + # Validate and normalize variables + variables = [ + {"variable": v.get("variable", ""), "value_selector": v.get("value_selector", [])} + for v in content.get("variables", []) + if isinstance(v, dict) + ] + + outputs = content.get("outputs", {"result": {"type": parameter_type}}) + + return { + "variables": variables, + "code_language": language, + "code": content.get("code", ""), + "outputs": outputs, + "message": content.get("explanation", ""), + "error": "", + } + @staticmethod def instruction_modify_legacy( tenant_id: str, flow_id: str, current: str, instruction: str, model_config: dict, ideal_output: str | None diff --git a/api/core/llm_generator/output_parser/file_ref.py b/api/core/llm_generator/output_parser/file_ref.py new file mode 100644 index 0000000000..83489e6a79 --- /dev/null +++ b/api/core/llm_generator/output_parser/file_ref.py @@ -0,0 +1,188 @@ +""" +File reference detection and conversion for structured output. + +This module provides utilities to: +1. Detect file reference fields in JSON Schema (format: "dify-file-ref") +2. Convert file ID strings to File objects after LLM returns +""" + +import uuid +from collections.abc import Mapping +from typing import Any + +from core.file import File +from core.variables.segments import ArrayFileSegment, FileSegment +from factories.file_factory import build_from_mapping + +FILE_REF_FORMAT = "dify-file-ref" + + +def is_file_ref_property(schema: dict) -> bool: + """Check if a schema property is a file reference.""" + return schema.get("type") == "string" and schema.get("format") == FILE_REF_FORMAT + + +def detect_file_ref_fields(schema: Mapping[str, Any], path: str = "") -> list[str]: + """ + Recursively detect file reference fields in schema. + + Args: + schema: JSON Schema to analyze + path: Current path in the schema (used for recursion) + + Returns: + List of JSON paths containing file refs, e.g., ["image_id", "files[*]"] + """ + file_ref_paths: list[str] = [] + schema_type = schema.get("type") + + if schema_type == "object": + for prop_name, prop_schema in schema.get("properties", {}).items(): + current_path = f"{path}.{prop_name}" if path else prop_name + + if is_file_ref_property(prop_schema): + file_ref_paths.append(current_path) + elif isinstance(prop_schema, dict): + file_ref_paths.extend(detect_file_ref_fields(prop_schema, current_path)) + + elif schema_type == "array": + items_schema = schema.get("items", {}) + array_path = f"{path}[*]" if path else "[*]" + + if is_file_ref_property(items_schema): + file_ref_paths.append(array_path) + elif isinstance(items_schema, dict): + file_ref_paths.extend(detect_file_ref_fields(items_schema, array_path)) + + return file_ref_paths + + +def convert_file_refs_in_output( + output: Mapping[str, Any], + json_schema: Mapping[str, Any], + tenant_id: str, +) -> dict[str, Any]: + """ + Convert file ID strings to File objects based on schema. + + Args: + output: The structured_output from LLM result + json_schema: The original JSON schema (to detect file ref fields) + tenant_id: Tenant ID for file lookup + + Returns: + Output with file references converted to File objects + """ + file_ref_paths = detect_file_ref_fields(json_schema) + if not file_ref_paths: + return dict(output) + + result = _deep_copy_dict(output) + + for path in file_ref_paths: + _convert_path_in_place(result, path.split("."), tenant_id) + + return result + + +def _deep_copy_dict(obj: Mapping[str, Any]) -> dict[str, Any]: + """Deep copy a mapping to a mutable dict.""" + result: dict[str, Any] = {} + for key, value in obj.items(): + if isinstance(value, Mapping): + result[key] = _deep_copy_dict(value) + elif isinstance(value, list): + result[key] = [_deep_copy_dict(item) if isinstance(item, Mapping) else item for item in value] + else: + result[key] = value + return result + + +def _convert_path_in_place(obj: dict, path_parts: list[str], tenant_id: str) -> None: + """Convert file refs at the given path in place, wrapping in Segment types.""" + if not path_parts: + return + + current = path_parts[0] + remaining = path_parts[1:] + + # Handle array notation like "files[*]" + if current.endswith("[*]"): + key = current[:-3] if current != "[*]" else None + target = obj.get(key) if key else obj + + if isinstance(target, list): + if remaining: + # Nested array with remaining path - recurse into each item + for item in target: + if isinstance(item, dict): + _convert_path_in_place(item, remaining, tenant_id) + else: + # Array of file IDs - convert all and wrap in ArrayFileSegment + files: list[File] = [] + for item in target: + file = _convert_file_id(item, tenant_id) + if file is not None: + files.append(file) + # Replace the array with ArrayFileSegment + if key: + obj[key] = ArrayFileSegment(value=files) + return + + if not remaining: + # Leaf node - convert the value and wrap in FileSegment + if current in obj: + file = _convert_file_id(obj[current], tenant_id) + if file is not None: + obj[current] = FileSegment(value=file) + else: + obj[current] = None + else: + # Recurse into nested object + if current in obj and isinstance(obj[current], dict): + _convert_path_in_place(obj[current], remaining, tenant_id) + + +def _convert_file_id(file_id: Any, tenant_id: str) -> File | None: + """ + Convert a file ID string to a File object. + + Tries multiple file sources in order: + 1. ToolFile (files generated by tools/workflows) + 2. UploadFile (files uploaded by users) + """ + if not isinstance(file_id, str): + return None + + # Validate UUID format + try: + uuid.UUID(file_id) + except ValueError: + return None + + # Try ToolFile first (files generated by tools/workflows) + try: + return build_from_mapping( + mapping={ + "transfer_method": "tool_file", + "tool_file_id": file_id, + }, + tenant_id=tenant_id, + ) + except ValueError: + pass + + # Try UploadFile (files uploaded by users) + try: + return build_from_mapping( + mapping={ + "transfer_method": "local_file", + "upload_file_id": file_id, + }, + tenant_id=tenant_id, + ) + except ValueError: + pass + + # File not found in any source + return None diff --git a/api/core/llm_generator/output_parser/structured_output.py b/api/core/llm_generator/output_parser/structured_output.py index 686529c3ca..250acf14fd 100644 --- a/api/core/llm_generator/output_parser/structured_output.py +++ b/api/core/llm_generator/output_parser/structured_output.py @@ -8,6 +8,7 @@ import json_repair from pydantic import TypeAdapter, ValidationError from core.llm_generator.output_parser.errors import OutputParserError +from core.llm_generator.output_parser.file_ref import convert_file_refs_in_output from core.llm_generator.prompts import STRUCTURED_OUTPUT_PROMPT from core.model_manager import ModelInstance from core.model_runtime.callbacks.base_callback import Callback @@ -57,6 +58,7 @@ def invoke_llm_with_structured_output( stream: Literal[True], user: str | None = None, callbacks: list[Callback] | None = None, + tenant_id: str | None = None, ) -> Generator[LLMResultChunkWithStructuredOutput, None, None]: ... @overload def invoke_llm_with_structured_output( @@ -72,6 +74,7 @@ def invoke_llm_with_structured_output( stream: Literal[False], user: str | None = None, callbacks: list[Callback] | None = None, + tenant_id: str | None = None, ) -> LLMResultWithStructuredOutput: ... @overload def invoke_llm_with_structured_output( @@ -87,6 +90,7 @@ def invoke_llm_with_structured_output( stream: bool = True, user: str | None = None, callbacks: list[Callback] | None = None, + tenant_id: str | None = None, ) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: ... def invoke_llm_with_structured_output( *, @@ -101,20 +105,28 @@ def invoke_llm_with_structured_output( stream: bool = True, user: str | None = None, callbacks: list[Callback] | None = None, + tenant_id: str | None = None, ) -> LLMResultWithStructuredOutput | Generator[LLMResultChunkWithStructuredOutput, None, None]: """ - Invoke large language model with structured output - 1. This method invokes model_instance.invoke_llm with json_schema - 2. Try to parse the result as structured output + Invoke large language model with structured output. + This method invokes model_instance.invoke_llm with json_schema and parses + the result as structured output. + + :param provider: model provider name + :param model_schema: model schema entity + :param model_instance: model instance to invoke :param prompt_messages: prompt messages - :param json_schema: json schema + :param json_schema: json schema for structured output :param model_parameters: model parameters :param tools: tools for tool calling :param stop: stop words :param stream: is stream response :param user: unique user id :param callbacks: callbacks + :param tenant_id: tenant ID for file reference conversion. When provided and + json_schema contains file reference fields (format: "dify-file-ref"), + file IDs in the output will be automatically converted to File objects. :return: full response or stream response chunk generator result """ @@ -153,8 +165,18 @@ def invoke_llm_with_structured_output( f"Failed to parse structured output, LLM result is not a string: {llm_result.message.content}" ) + structured_output = _parse_structured_output(llm_result.message.content) + + # Convert file references if tenant_id is provided + if tenant_id is not None: + structured_output = convert_file_refs_in_output( + output=structured_output, + json_schema=json_schema, + tenant_id=tenant_id, + ) + return LLMResultWithStructuredOutput( - structured_output=_parse_structured_output(llm_result.message.content), + structured_output=structured_output, model=llm_result.model, message=llm_result.message, usage=llm_result.usage, @@ -186,8 +208,18 @@ def invoke_llm_with_structured_output( delta=event.delta, ) + structured_output = _parse_structured_output(result_text) + + # Convert file references if tenant_id is provided + if tenant_id is not None: + structured_output = convert_file_refs_in_output( + output=structured_output, + json_schema=json_schema, + tenant_id=tenant_id, + ) + yield LLMResultChunkWithStructuredOutput( - structured_output=_parse_structured_output(result_text), + structured_output=structured_output, model=model_schema.model, prompt_messages=prompt_messages, system_fingerprint=system_fingerprint, diff --git a/api/core/llm_generator/utils.py b/api/core/llm_generator/utils.py new file mode 100644 index 0000000000..86c9091dd4 --- /dev/null +++ b/api/core/llm_generator/utils.py @@ -0,0 +1,45 @@ +"""Utility functions for LLM generator.""" + +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + PromptMessage, + PromptMessageRole, + SystemPromptMessage, + ToolPromptMessage, + UserPromptMessage, +) + + +def deserialize_prompt_messages(messages: list[dict]) -> list[PromptMessage]: + """ + Deserialize list of dicts to list[PromptMessage]. + + Expected format: + [ + {"role": "user", "content": "..."}, + {"role": "assistant", "content": "..."}, + ] + """ + result: list[PromptMessage] = [] + for msg in messages: + role = PromptMessageRole.value_of(msg["role"]) + content = msg.get("content", "") + + match role: + case PromptMessageRole.USER: + result.append(UserPromptMessage(content=content)) + case PromptMessageRole.ASSISTANT: + result.append(AssistantPromptMessage(content=content)) + case PromptMessageRole.SYSTEM: + result.append(SystemPromptMessage(content=content)) + case PromptMessageRole.TOOL: + result.append(ToolPromptMessage(content=content, tool_call_id=msg.get("tool_call_id", ""))) + + return result + + +def serialize_prompt_messages(messages: list[PromptMessage]) -> list[dict]: + """ + Serialize list[PromptMessage] to list of dicts. + """ + return [{"role": msg.role.value, "content": msg.content} for msg in messages] diff --git a/api/core/memory/README.md b/api/core/memory/README.md new file mode 100644 index 0000000000..055ce0fe3b --- /dev/null +++ b/api/core/memory/README.md @@ -0,0 +1,267 @@ +# Memory Module + +This module provides memory management for LLM conversations, enabling context retention across dialogue turns. + +## Overview + +The memory module contains two types of memory implementations: + +1. **TokenBufferMemory** - Conversation-level memory (existing) +2. **NodeTokenBufferMemory** - Node-level memory (**Chatflow only**) + +> **Note**: `NodeTokenBufferMemory` is only available in **Chatflow** (advanced-chat mode). +> This is because it requires both `conversation_id` and `node_id`, which are only present in Chatflow. +> Standard Workflow mode does not have `conversation_id` and therefore cannot use node-level memory. + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ Memory Architecture │ +├─────────────────────────────────────────────────────────────────────────────┤ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────-┐ │ +│ │ TokenBufferMemory │ │ +│ │ Scope: Conversation │ │ +│ │ Storage: Database (Message table) │ │ +│ │ Key: conversation_id │ │ +│ └─────────────────────────────────────────────────────────────────────-┘ │ +│ │ +│ ┌─────────────────────────────────────────────────────────────────────-┐ │ +│ │ NodeTokenBufferMemory │ │ +│ │ Scope: Node within Conversation │ │ +│ │ Storage: WorkflowNodeExecutionModel.outputs["context"] │ │ +│ │ Key: (conversation_id, node_id, workflow_run_id) │ │ +│ └─────────────────────────────────────────────────────────────────────-┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +--- + +## TokenBufferMemory (Existing) + +### Purpose + +`TokenBufferMemory` retrieves conversation history from the `Message` table and converts it to `PromptMessage` objects for LLM context. + +### Key Features + +- **Conversation-scoped**: All messages within a conversation are candidates +- **Thread-aware**: Uses `parent_message_id` to extract only the current thread (supports regeneration scenarios) +- **Token-limited**: Truncates history to fit within `max_token_limit` +- **File support**: Handles `MessageFile` attachments (images, documents, etc.) + +### Data Flow + +``` +Message Table TokenBufferMemory LLM + │ │ │ + │ SELECT * FROM messages │ │ + │ WHERE conversation_id = ? │ │ + │ ORDER BY created_at DESC │ │ + ├─────────────────────────────────▶│ │ + │ │ │ + │ extract_thread_messages() │ + │ │ │ + │ build_prompt_message_with_files() │ + │ │ │ + │ truncate by max_token_limit │ + │ │ │ + │ │ Sequence[PromptMessage] + │ ├───────────────────────▶│ + │ │ │ +``` + +### Thread Extraction + +When a user regenerates a response, a new thread is created: + +``` +Message A (user) + └── Message A' (assistant) + └── Message B (user) + └── Message B' (assistant) + └── Message A'' (assistant, regenerated) ← New thread + └── Message C (user) + └── Message C' (assistant) +``` + +`extract_thread_messages()` traces back from the latest message using `parent_message_id` to get only the current thread: `[A, A'', C, C']` + +### Usage + +```python +from core.memory.token_buffer_memory import TokenBufferMemory + +memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance) +history = memory.get_history_prompt_messages(max_token_limit=2000, message_limit=100) +``` + +--- + +## NodeTokenBufferMemory + +### Purpose + +`NodeTokenBufferMemory` provides **node-scoped memory** within a conversation. Each LLM node in a workflow can maintain its own independent conversation history. + +### Use Cases + +1. **Multi-LLM Workflows**: Different LLM nodes need separate context +2. **Iterative Processing**: An LLM node in a loop needs to accumulate context across iterations +3. **Specialized Agents**: Each agent node maintains its own dialogue history + +### Design: Zero Extra Storage + +**Key insight**: LLM node already saves complete context in `outputs["context"]`. + +Each LLM node execution outputs: +```python +outputs = { + "text": clean_text, + "context": self._build_context(prompt_messages, clean_text), # Complete dialogue history! + ... +} +``` + +This `outputs["context"]` contains: +- All previous user/assistant messages (excluding system prompt) +- The current assistant response + +**No separate storage needed** - we just read from the last execution's `outputs["context"]`. + +### Benefits + +| Aspect | Old Design (Object Storage) | New Design (outputs["context"]) | +|--------|----------------------------|--------------------------------| +| Storage | Separate JSON file | Already in WorkflowNodeExecutionModel | +| Concurrency | Race condition risk | No issue (each execution is INSERT) | +| Cleanup | Need separate cleanup task | Follows node execution lifecycle | +| Migration | Required | None | +| Complexity | High | Low | + +### Data Flow + +``` +WorkflowNodeExecutionModel NodeTokenBufferMemory LLM Node + │ │ │ + │ │◀── get_history_prompt_messages() + │ │ │ + │ SELECT outputs FROM │ │ + │ workflow_node_executions │ │ + │ WHERE workflow_run_id = ? │ │ + │ AND node_id = ? │ │ + │◀─────────────────────────────────┤ │ + │ │ │ + │ outputs["context"] │ │ + ├─────────────────────────────────▶│ │ + │ │ │ + │ deserialize PromptMessages │ + │ │ │ + │ truncate by max_token_limit │ + │ │ │ + │ │ Sequence[PromptMessage] │ + │ ├──────────────────────────▶│ + │ │ │ +``` + +### Thread Tracking + +Thread extraction still uses `Message` table's `parent_message_id` structure: + +1. Query `Message` table for conversation → get thread's `workflow_run_ids` +2. Get the last completed `workflow_run_id` in the thread +3. Query `WorkflowNodeExecutionModel` for that execution's `outputs["context"]` + +### API + +```python +class NodeTokenBufferMemory: + def __init__( + self, + app_id: str, + conversation_id: str, + node_id: str, + tenant_id: str, + model_instance: ModelInstance, + ): + """Initialize node-level memory.""" + ... + + def get_history_prompt_messages( + self, + *, + max_token_limit: int = 2000, + message_limit: int | None = None, + ) -> Sequence[PromptMessage]: + """ + Retrieve history as PromptMessage sequence. + + Reads from last completed execution's outputs["context"]. + """ + ... + + # Legacy methods (no-op, kept for compatibility) + def add_messages(self, *args, **kwargs) -> None: pass + def flush(self) -> None: pass + def clear(self) -> None: pass +``` + +### Configuration + +Add to `MemoryConfig` in `core/workflow/nodes/llm/entities.py`: + +```python +class MemoryMode(StrEnum): + CONVERSATION = "conversation" # Use TokenBufferMemory (default) + NODE = "node" # Use NodeTokenBufferMemory (Chatflow only) + +class MemoryConfig(BaseModel): + role_prefix: RolePrefix | None = None + window: MemoryWindowConfig | None = None + query_prompt_template: str | None = None + mode: MemoryMode = MemoryMode.CONVERSATION +``` + +**Mode Behavior:** + +| Mode | Memory Class | Scope | Availability | +| -------------- | --------------------- | ------------------------ | ------------- | +| `conversation` | TokenBufferMemory | Entire conversation | All app modes | +| `node` | NodeTokenBufferMemory | Per-node in conversation | Chatflow only | + +> When `mode=node` is used in a non-Chatflow context (no conversation_id), it falls back to no memory. + +--- + +## Comparison + +| Feature | TokenBufferMemory | NodeTokenBufferMemory | +| -------------- | ------------------------ | ---------------------------------- | +| Scope | Conversation | Node within Conversation | +| Storage | Database (Message table) | WorkflowNodeExecutionModel.outputs | +| Thread Support | Yes | Yes | +| File Support | Yes (via MessageFile) | Yes (via context serialization) | +| Token Limit | Yes | Yes | +| Use Case | Standard chat apps | Complex workflows | + +--- + +## Extending to Other Nodes + +Currently only **LLM Node** outputs `context` in its outputs. To enable node memory for other nodes: + +1. Add `outputs["context"] = self._build_context(prompt_messages, response)` in the node +2. The `NodeTokenBufferMemory` will automatically pick it up + +Nodes that could potentially support this: +- `question_classifier` +- `parameter_extractor` +- `agent` + +--- + +## Future Considerations + +1. **Cleanup**: Node memory lifecycle follows `WorkflowNodeExecutionModel`, which already has cleanup mechanisms +2. **Compression**: For very long conversations, consider summarization strategies +3. **Extension**: Other nodes may benefit from node-level memory diff --git a/api/core/memory/__init__.py b/api/core/memory/__init__.py new file mode 100644 index 0000000000..d0e2babde2 --- /dev/null +++ b/api/core/memory/__init__.py @@ -0,0 +1,11 @@ +from core.memory.base import BaseMemory +from core.memory.node_token_buffer_memory import ( + NodeTokenBufferMemory, +) +from core.memory.token_buffer_memory import TokenBufferMemory + +__all__ = [ + "BaseMemory", + "NodeTokenBufferMemory", + "TokenBufferMemory", +] diff --git a/api/core/memory/base.py b/api/core/memory/base.py new file mode 100644 index 0000000000..af6e8eeda3 --- /dev/null +++ b/api/core/memory/base.py @@ -0,0 +1,83 @@ +""" +Base memory interfaces and types. + +This module defines the common protocol for memory implementations. +""" + +from abc import ABC, abstractmethod +from collections.abc import Sequence + +from core.model_runtime.entities import ImagePromptMessageContent, PromptMessage + + +class BaseMemory(ABC): + """ + Abstract base class for memory implementations. + + Provides a common interface for both conversation-level and node-level memory. + """ + + @abstractmethod + def get_history_prompt_messages( + self, + *, + max_token_limit: int = 2000, + message_limit: int | None = None, + ) -> Sequence[PromptMessage]: + """ + Get history prompt messages. + + :param max_token_limit: Maximum tokens for history + :param message_limit: Maximum number of messages + :return: Sequence of PromptMessage for LLM context + """ + pass + + def get_history_prompt_text( + self, + human_prefix: str = "Human", + ai_prefix: str = "Assistant", + max_token_limit: int = 2000, + message_limit: int | None = None, + ) -> str: + """ + Get history prompt as formatted text. + + :param human_prefix: Prefix for human messages + :param ai_prefix: Prefix for assistant messages + :param max_token_limit: Maximum tokens for history + :param message_limit: Maximum number of messages + :return: Formatted history text + """ + from core.model_runtime.entities import ( + PromptMessageRole, + TextPromptMessageContent, + ) + + prompt_messages = self.get_history_prompt_messages( + max_token_limit=max_token_limit, + message_limit=message_limit, + ) + + string_messages = [] + for m in prompt_messages: + if m.role == PromptMessageRole.USER: + role = human_prefix + elif m.role == PromptMessageRole.ASSISTANT: + role = ai_prefix + else: + continue + + if isinstance(m.content, list): + inner_msg = "" + for content in m.content: + if isinstance(content, TextPromptMessageContent): + inner_msg += f"{content.data}\n" + elif isinstance(content, ImagePromptMessageContent): + inner_msg += "[image]\n" + string_messages.append(f"{role}: {inner_msg.strip()}") + else: + message = f"{role}: {m.content}" + string_messages.append(message) + + return "\n".join(string_messages) diff --git a/api/core/memory/node_token_buffer_memory.py b/api/core/memory/node_token_buffer_memory.py new file mode 100644 index 0000000000..ec6b04b13e --- /dev/null +++ b/api/core/memory/node_token_buffer_memory.py @@ -0,0 +1,197 @@ +""" +Node-level Token Buffer Memory for Chatflow. + +This module provides node-scoped memory within a conversation. +Each LLM node in a workflow can maintain its own independent conversation history. + +Note: This is only available in Chatflow (advanced-chat mode) because it requires +both conversation_id and node_id. + +Design: +- History is read directly from WorkflowNodeExecutionModel.outputs["context"] +- No separate storage needed - the context is already saved during node execution +- Thread tracking leverages Message table's parent_message_id structure +""" + +import logging +from collections.abc import Sequence +from typing import cast + +from sqlalchemy import select +from sqlalchemy.orm import Session + +from core.file import file_manager +from core.memory.base import BaseMemory +from core.model_manager import ModelInstance +from core.model_runtime.entities import ( + AssistantPromptMessage, + MultiModalPromptMessageContent, + PromptMessage, + PromptMessageRole, + SystemPromptMessage, + ToolPromptMessage, + UserPromptMessage, +) +from core.model_runtime.entities.message_entities import PromptMessageContentUnionTypes +from core.prompt.utils.extract_thread_messages import extract_thread_messages +from extensions.ext_database import db +from models.model import Message +from models.workflow import WorkflowNodeExecutionModel + +logger = logging.getLogger(__name__) + + +class NodeTokenBufferMemory(BaseMemory): + """ + Node-level Token Buffer Memory. + + Provides node-scoped memory within a conversation. Each LLM node can maintain + its own independent conversation history. + + Key design: History is read directly from WorkflowNodeExecutionModel.outputs["context"], + which is already saved during node execution. No separate storage needed. + """ + + def __init__( + self, + app_id: str, + conversation_id: str, + node_id: str, + tenant_id: str, + model_instance: ModelInstance, + ): + self.app_id = app_id + self.conversation_id = conversation_id + self.node_id = node_id + self.tenant_id = tenant_id + self.model_instance = model_instance + + def _get_thread_workflow_run_ids(self) -> list[str]: + """ + Get workflow_run_ids for the current thread by querying Message table. + Returns workflow_run_ids in chronological order (oldest first). + """ + with Session(db.engine, expire_on_commit=False) as session: + stmt = ( + select(Message) + .where(Message.conversation_id == self.conversation_id) + .order_by(Message.created_at.desc()) + .limit(500) + ) + messages = list(session.scalars(stmt).all()) + + if not messages: + return [] + + # Extract thread messages using existing logic + thread_messages = extract_thread_messages(messages) + + # For newly created message, its answer is temporarily empty, skip it + if thread_messages and not thread_messages[0].answer and thread_messages[0].answer_tokens == 0: + thread_messages.pop(0) + + # Reverse to get chronological order, extract workflow_run_ids + return [msg.workflow_run_id for msg in reversed(thread_messages) if msg.workflow_run_id] + + def _deserialize_prompt_message(self, msg_dict: dict) -> PromptMessage: + """Deserialize a dict to PromptMessage based on role.""" + role = msg_dict.get("role") + if role in (PromptMessageRole.USER, "user"): + return UserPromptMessage.model_validate(msg_dict) + elif role in (PromptMessageRole.ASSISTANT, "assistant"): + return AssistantPromptMessage.model_validate(msg_dict) + elif role in (PromptMessageRole.SYSTEM, "system"): + return SystemPromptMessage.model_validate(msg_dict) + elif role in (PromptMessageRole.TOOL, "tool"): + return ToolPromptMessage.model_validate(msg_dict) + else: + return PromptMessage.model_validate(msg_dict) + + def _deserialize_context(self, context_data: list[dict]) -> list[PromptMessage]: + """Deserialize context data from outputs to list of PromptMessage.""" + messages = [] + for msg_dict in context_data: + try: + msg = self._deserialize_prompt_message(msg_dict) + msg = self._restore_multimodal_content(msg) + messages.append(msg) + except Exception as e: + logger.warning("Failed to deserialize prompt message: %s", e) + return messages + + def _restore_multimodal_content(self, message: PromptMessage) -> PromptMessage: + """ + Restore multimodal content (base64 or url) from file_ref. + + When context is saved, base64_data is cleared to save storage space. + This method restores the content by parsing file_ref (format: "method:id_or_url"). + """ + content = message.content + if content is None or isinstance(content, str): + return message + + # Process list content, restoring multimodal data from file references + restored_content: list[PromptMessageContentUnionTypes] = [] + for item in content: + if isinstance(item, MultiModalPromptMessageContent): + # restore_multimodal_content preserves the concrete subclass type + restored_item = file_manager.restore_multimodal_content(item) + restored_content.append(cast(PromptMessageContentUnionTypes, restored_item)) + else: + restored_content.append(item) + + return message.model_copy(update={"content": restored_content}) + + def get_history_prompt_messages( + self, + *, + max_token_limit: int = 2000, + message_limit: int | None = None, + ) -> Sequence[PromptMessage]: + """ + Retrieve history as PromptMessage sequence. + History is read directly from the last completed node execution's outputs["context"]. + """ + _ = message_limit # unused, kept for interface compatibility + + thread_workflow_run_ids = self._get_thread_workflow_run_ids() + if not thread_workflow_run_ids: + return [] + + # Get the last completed workflow_run_id (contains accumulated context) + last_run_id = thread_workflow_run_ids[-1] + + with Session(db.engine, expire_on_commit=False) as session: + stmt = select(WorkflowNodeExecutionModel).where( + WorkflowNodeExecutionModel.workflow_run_id == last_run_id, + WorkflowNodeExecutionModel.node_id == self.node_id, + WorkflowNodeExecutionModel.status == "succeeded", + ) + execution = session.scalars(stmt).first() + + if not execution: + return [] + + outputs = execution.outputs_dict + if not outputs: + return [] + + context_data = outputs.get("context") + + if not context_data or not isinstance(context_data, list): + return [] + + prompt_messages = self._deserialize_context(context_data) + if not prompt_messages: + return [] + + # Truncate by token limit + try: + current_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) + while current_tokens > max_token_limit and len(prompt_messages) > 1: + prompt_messages.pop(0) + current_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) + except Exception as e: + logger.warning("Failed to count tokens for truncation: %s", e) + + return prompt_messages diff --git a/api/core/memory/token_buffer_memory.py b/api/core/memory/token_buffer_memory.py index 3ebbb60f85..58ffe04240 100644 --- a/api/core/memory/token_buffer_memory.py +++ b/api/core/memory/token_buffer_memory.py @@ -5,12 +5,12 @@ from sqlalchemy.orm import sessionmaker from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.file import file_manager +from core.memory.base import BaseMemory from core.model_manager import ModelInstance from core.model_runtime.entities import ( AssistantPromptMessage, ImagePromptMessageContent, PromptMessage, - PromptMessageRole, TextPromptMessageContent, UserPromptMessage, ) @@ -24,7 +24,7 @@ from repositories.api_workflow_run_repository import APIWorkflowRunRepository from repositories.factory import DifyAPIRepositoryFactory -class TokenBufferMemory: +class TokenBufferMemory(BaseMemory): def __init__( self, conversation: Conversation, @@ -115,10 +115,14 @@ class TokenBufferMemory: return AssistantPromptMessage(content=prompt_message_contents) def get_history_prompt_messages( - self, max_token_limit: int = 2000, message_limit: int | None = None + self, + *, + max_token_limit: int = 2000, + message_limit: int | None = None, ) -> Sequence[PromptMessage]: """ Get history prompt messages. + :param max_token_limit: max token limit :param message_limit: message limit """ @@ -200,44 +204,3 @@ class TokenBufferMemory: curr_message_tokens = self.model_instance.get_llm_num_tokens(prompt_messages) return prompt_messages - - def get_history_prompt_text( - self, - human_prefix: str = "Human", - ai_prefix: str = "Assistant", - max_token_limit: int = 2000, - message_limit: int | None = None, - ) -> str: - """ - Get history prompt text. - :param human_prefix: human prefix - :param ai_prefix: ai prefix - :param max_token_limit: max token limit - :param message_limit: message limit - :return: - """ - prompt_messages = self.get_history_prompt_messages(max_token_limit=max_token_limit, message_limit=message_limit) - - string_messages = [] - for m in prompt_messages: - if m.role == PromptMessageRole.USER: - role = human_prefix - elif m.role == PromptMessageRole.ASSISTANT: - role = ai_prefix - else: - continue - - if isinstance(m.content, list): - inner_msg = "" - for content in m.content: - if isinstance(content, TextPromptMessageContent): - inner_msg += f"{content.data}\n" - elif isinstance(content, ImagePromptMessageContent): - inner_msg += "[image]\n" - - string_messages.append(f"{role}: {inner_msg.strip()}") - else: - message = f"{role}: {m.content}" - string_messages.append(message) - - return "\n".join(string_messages) diff --git a/api/core/model_runtime/entities/message_entities.py b/api/core/model_runtime/entities/message_entities.py index 9e46d72893..284f4dba01 100644 --- a/api/core/model_runtime/entities/message_entities.py +++ b/api/core/model_runtime/entities/message_entities.py @@ -91,6 +91,9 @@ class MultiModalPromptMessageContent(PromptMessageContent): mime_type: str = Field(default=..., description="the mime type of multi-modal file") filename: str = Field(default="", description="the filename of multi-modal file") + # File reference for context restoration, format: "transfer_method:related_id" or "remote:url" + file_ref: str | None = Field(default=None, description="Encoded file reference for restoration") + @property def data(self): return self.url or f"data:{self.mime_type};base64,{self.base64_data}" @@ -276,7 +279,5 @@ class ToolPromptMessage(PromptMessage): :return: True if prompt message is empty, False otherwise """ - if not super().is_empty() and not self.tool_call_id: - return False - - return True + # ToolPromptMessage is not empty if it has content OR has a tool_call_id + return super().is_empty() and not self.tool_call_id diff --git a/api/core/prompt/advanced_prompt_transform.py b/api/core/prompt/advanced_prompt_transform.py index d74b2bddf5..ffc2bb0083 100644 --- a/api/core/prompt/advanced_prompt_transform.py +++ b/api/core/prompt/advanced_prompt_transform.py @@ -5,7 +5,7 @@ from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEnti from core.file import file_manager from core.file.models import File from core.helper.code_executor.jinja2.jinja2_formatter import Jinja2Formatter -from core.memory.token_buffer_memory import TokenBufferMemory +from core.memory.base import BaseMemory from core.model_runtime.entities import ( AssistantPromptMessage, PromptMessage, @@ -43,7 +43,7 @@ class AdvancedPromptTransform(PromptTransform): files: Sequence[File], context: str | None, memory_config: MemoryConfig | None, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, model_config: ModelConfigWithCredentialsEntity, image_detail_config: ImagePromptMessageContent.DETAIL | None = None, ) -> list[PromptMessage]: @@ -84,7 +84,7 @@ class AdvancedPromptTransform(PromptTransform): files: Sequence[File], context: str | None, memory_config: MemoryConfig | None, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, model_config: ModelConfigWithCredentialsEntity, image_detail_config: ImagePromptMessageContent.DETAIL | None = None, ) -> list[PromptMessage]: @@ -145,7 +145,7 @@ class AdvancedPromptTransform(PromptTransform): files: Sequence[File], context: str | None, memory_config: MemoryConfig | None, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, model_config: ModelConfigWithCredentialsEntity, image_detail_config: ImagePromptMessageContent.DETAIL | None = None, ) -> list[PromptMessage]: @@ -270,7 +270,7 @@ class AdvancedPromptTransform(PromptTransform): def _set_histories_variable( self, - memory: TokenBufferMemory, + memory: BaseMemory, memory_config: MemoryConfig, raw_prompt: str, role_prefix: MemoryConfig.RolePrefix, diff --git a/api/core/prompt/entities/advanced_prompt_entities.py b/api/core/prompt/entities/advanced_prompt_entities.py index 7094633093..457800bad2 100644 --- a/api/core/prompt/entities/advanced_prompt_entities.py +++ b/api/core/prompt/entities/advanced_prompt_entities.py @@ -1,3 +1,4 @@ +from enum import StrEnum from typing import Literal from pydantic import BaseModel @@ -5,6 +6,13 @@ from pydantic import BaseModel from core.model_runtime.entities.message_entities import PromptMessageRole +class MemoryMode(StrEnum): + """Memory mode for LLM nodes.""" + + CONVERSATION = "conversation" # Use TokenBufferMemory (default, existing behavior) + NODE = "node" # Use NodeTokenBufferMemory (Chatflow only) + + class ChatModelMessage(BaseModel): """ Chat Message. @@ -48,3 +56,4 @@ class MemoryConfig(BaseModel): role_prefix: RolePrefix | None = None window: WindowConfig query_prompt_template: str | None = None + mode: MemoryMode = MemoryMode.CONVERSATION diff --git a/api/core/prompt/prompt_transform.py b/api/core/prompt/prompt_transform.py index a6e873d587..c0031de6bf 100644 --- a/api/core/prompt/prompt_transform.py +++ b/api/core/prompt/prompt_transform.py @@ -1,7 +1,7 @@ from typing import Any from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity -from core.memory.token_buffer_memory import TokenBufferMemory +from core.memory.base import BaseMemory from core.model_manager import ModelInstance from core.model_runtime.entities.message_entities import PromptMessage from core.model_runtime.entities.model_entities import ModelPropertyKey @@ -11,7 +11,7 @@ from core.prompt.entities.advanced_prompt_entities import MemoryConfig class PromptTransform: def _append_chat_histories( self, - memory: TokenBufferMemory, + memory: BaseMemory, memory_config: MemoryConfig, prompt_messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity, @@ -52,7 +52,7 @@ class PromptTransform: def _get_history_messages_from_memory( self, - memory: TokenBufferMemory, + memory: BaseMemory, memory_config: MemoryConfig, max_token_limit: int, human_prefix: str | None = None, @@ -73,7 +73,7 @@ class PromptTransform: return memory.get_history_prompt_text(**kwargs) def _get_history_messages_list_from_memory( - self, memory: TokenBufferMemory, memory_config: MemoryConfig, max_token_limit: int + self, memory: BaseMemory, memory_config: MemoryConfig, max_token_limit: int ) -> list[PromptMessage]: """Get memory messages.""" return list( diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index f8213d9fd7..4d29f419d1 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -1047,6 +1047,8 @@ class ToolManager: continue tool_input = ToolNodeData.ToolInput.model_validate(tool_configurations.get(parameter.name, {})) if tool_input.type == "variable": + if not isinstance(tool_input.value, list): + raise ToolParameterError(f"Invalid variable selector for {parameter.name}") variable = variable_pool.get(tool_input.value) if variable is None: raise ToolParameterError(f"Variable {tool_input.value} does not exist") @@ -1056,6 +1058,11 @@ class ToolManager: elif tool_input.type == "mixed": segment_group = variable_pool.convert_template(str(tool_input.value)) parameter_value = segment_group.text + elif tool_input.type == "mention": + # Mention type not supported in agent mode + raise ToolParameterError( + f"Mention type not supported in agent for parameter '{parameter.name}'" + ) else: raise ToolParameterError(f"Unknown tool input type '{tool_input.type}'") runtime_parameters[parameter.name] = parameter_value diff --git a/api/core/variables/__init__.py b/api/core/variables/__init__.py index 7498224923..e8f3a6d17b 100644 --- a/api/core/variables/__init__.py +++ b/api/core/variables/__init__.py @@ -4,6 +4,7 @@ from .segments import ( ArrayFileSegment, ArrayNumberSegment, ArrayObjectSegment, + ArrayPromptMessageSegment, ArraySegment, ArrayStringSegment, FileSegment, @@ -20,6 +21,7 @@ from .variables import ( ArrayFileVariable, ArrayNumberVariable, ArrayObjectVariable, + ArrayPromptMessageVariable, ArrayStringVariable, ArrayVariable, FileVariable, @@ -42,6 +44,8 @@ __all__ = [ "ArrayNumberVariable", "ArrayObjectSegment", "ArrayObjectVariable", + "ArrayPromptMessageSegment", + "ArrayPromptMessageVariable", "ArraySegment", "ArrayStringSegment", "ArrayStringVariable", diff --git a/api/core/variables/segments.py b/api/core/variables/segments.py index 8330f1fe19..81d7fb15ca 100644 --- a/api/core/variables/segments.py +++ b/api/core/variables/segments.py @@ -6,6 +6,7 @@ from typing import Annotated, Any, TypeAlias from pydantic import BaseModel, ConfigDict, Discriminator, Tag, field_validator from core.file import File +from core.model_runtime.entities import PromptMessage from .types import SegmentType @@ -208,6 +209,15 @@ class ArrayBooleanSegment(ArraySegment): value: Sequence[bool] +class ArrayPromptMessageSegment(ArraySegment): + value_type: SegmentType = SegmentType.ARRAY_PROMPT_MESSAGE + value: Sequence[PromptMessage] + + def to_object(self): + """Convert to JSON-serializable format for database storage and frontend.""" + return [msg.model_dump() for msg in self.value] + + def get_segment_discriminator(v: Any) -> SegmentType | None: if isinstance(v, Segment): return v.value_type @@ -248,6 +258,7 @@ SegmentUnion: TypeAlias = Annotated[ | Annotated[ArrayObjectSegment, Tag(SegmentType.ARRAY_OBJECT)] | Annotated[ArrayFileSegment, Tag(SegmentType.ARRAY_FILE)] | Annotated[ArrayBooleanSegment, Tag(SegmentType.ARRAY_BOOLEAN)] + | Annotated[ArrayPromptMessageSegment, Tag(SegmentType.ARRAY_PROMPT_MESSAGE)] ), Discriminator(get_segment_discriminator), ] diff --git a/api/core/variables/types.py b/api/core/variables/types.py index 13b926c978..ac055ae232 100644 --- a/api/core/variables/types.py +++ b/api/core/variables/types.py @@ -45,6 +45,7 @@ class SegmentType(StrEnum): ARRAY_OBJECT = "array[object]" ARRAY_FILE = "array[file]" ARRAY_BOOLEAN = "array[boolean]" + ARRAY_PROMPT_MESSAGE = "array[message]" NONE = "none" diff --git a/api/core/variables/utils.py b/api/core/variables/utils.py index 8e738f8fd5..799a923084 100644 --- a/api/core/variables/utils.py +++ b/api/core/variables/utils.py @@ -3,8 +3,10 @@ from typing import Any import orjson +from core.model_runtime.entities import PromptMessage + from .segment_group import SegmentGroup -from .segments import ArrayFileSegment, FileSegment, Segment +from .segments import ArrayFileSegment, ArrayPromptMessageSegment, FileSegment, Segment def to_selector(node_id: str, name: str, paths: Iterable[str] = ()) -> Sequence[str]: @@ -16,7 +18,7 @@ def to_selector(node_id: str, name: str, paths: Iterable[str] = ()) -> Sequence[ def segment_orjson_default(o: Any): """Default function for orjson serialization of Segment types""" - if isinstance(o, ArrayFileSegment): + if isinstance(o, (ArrayFileSegment, ArrayPromptMessageSegment)): return [v.model_dump() for v in o.value] elif isinstance(o, FileSegment): return o.value.model_dump() @@ -24,6 +26,8 @@ def segment_orjson_default(o: Any): return [segment_orjson_default(seg) for seg in o.value] elif isinstance(o, Segment): return o.value + elif isinstance(o, PromptMessage): + return o.model_dump() raise TypeError(f"Object of type {type(o).__name__} is not JSON serializable") diff --git a/api/core/variables/variables.py b/api/core/variables/variables.py index a19c53918d..f941594800 100644 --- a/api/core/variables/variables.py +++ b/api/core/variables/variables.py @@ -12,6 +12,7 @@ from .segments import ( ArrayFileSegment, ArrayNumberSegment, ArrayObjectSegment, + ArrayPromptMessageSegment, ArraySegment, ArrayStringSegment, BooleanSegment, @@ -110,6 +111,10 @@ class ArrayBooleanVariable(ArrayBooleanSegment, ArrayVariable): pass +class ArrayPromptMessageVariable(ArrayPromptMessageSegment, ArrayVariable): + pass + + class RAGPipelineVariable(BaseModel): belong_to_node_id: str = Field(description="belong to which node id, shared means public") type: str = Field(description="variable type, text-input, paragraph, select, number, file, file-list") @@ -160,6 +165,7 @@ Variable: TypeAlias = Annotated[ | Annotated[ArrayObjectVariable, Tag(SegmentType.ARRAY_OBJECT)] | Annotated[ArrayFileVariable, Tag(SegmentType.ARRAY_FILE)] | Annotated[ArrayBooleanVariable, Tag(SegmentType.ARRAY_BOOLEAN)] + | Annotated[ArrayPromptMessageVariable, Tag(SegmentType.ARRAY_PROMPT_MESSAGE)] | Annotated[SecretVariable, Tag(SegmentType.SECRET)] ), Discriminator(get_segment_discriminator), diff --git a/api/core/workflow/docs/variable_extraction_design.md b/api/core/workflow/docs/variable_extraction_design.md new file mode 100644 index 0000000000..8022d94766 --- /dev/null +++ b/api/core/workflow/docs/variable_extraction_design.md @@ -0,0 +1,1418 @@ +# Variable Extraction Design + +从 `list[PromptMessage]` 类型变量中通过 LLM 调用提取参数值的功能设计。 + +--- + +## 1. 概述 + +### 1.1 背景 + +目前 LLM 节点会输出 `context`,它是 `list[dict]` 类型,保存了当前对话的 prompt messages(不含 system message)。 + +```python +# LLM Node outputs +outputs = { + "text": "LLM response text", + "context": [ + {"role": "user", "text": "user input", "files": []}, + {"role": "assistant", "text": "assistant response", "files": []}, + ], + # ... +} +``` + +### 1.2 需求 + +允许其他节点(如工具节点)通过特殊语法引用 LLM 节点的 `context`,并附带一个 prompt,再次调用 LLM 来提取所需的参数值。 + +**使用场景示例**: + +``` +工具节点参数 = "@llm1.context | 提取关键词" + +执行流程: +1. 获取 llm1 节点的 context(对话历史) +2. 将 context + 提取 prompt 组合成新的 prompt messages +3. 调用 LLM 获取提取结果 +4. 将结果作为工具节点的参数值 +``` + +### 1.3 节点组概念 + +当 Tool 节点使用了 `@llm1.context` 时,Tool 节点变成一个"节点组": + +``` +┌─────────────────────────────────────────────────────┐ +│ Tool 节点组 (tool1) │ +│ │ +│ ┌───────────────────────────────────────────────┐ │ +│ │ Extraction 子节点 (tool1_ext_1) │ │ +│ │ - 有独立的 node_id │ │ +│ │ - 有独立的日志和流式输出 │ │ +│ │ - 输出存入 variable_pool │ │ +│ └───────────────────────────────────────────────┘ │ +│ │ │ +│ ▼ │ +│ ┌───────────────────────────────────────────────┐ │ +│ │ Tool 主节点 (tool1) │ │ +│ │ - 使用 extraction 的输出作为参数 │ │ +│ │ - 有自己的日志和输出 │ │ +│ └───────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────┘ +``` + +--- + +## 2. 现有调用链分析 + +### 2.1 Graph Engine 执行流程 + +``` +GraphEngine.run() + │ + ▼ +┌───────────────────────────────────────────────────────────────────┐ +│ WorkerPool │ +│ ┌─────────────────────────────────────────────────────────────┐ │ +│ │ Worker Thread │ │ +│ │ │ │ +│ │ Worker._execute_node(node) │ │ +│ │ │ │ │ +│ │ ├─ node.run() │ │ +│ │ │ │ │ │ +│ │ │ ├─ yield NodeRunStartedEvent │ │ +│ │ │ ├─ yield NodeRunStreamChunkEvent (多次) │ │ +│ │ │ └─ yield NodeRunSucceededEvent │ │ +│ │ │ │ │ +│ │ └─ for event in node.run(): │ │ +│ │ event_queue.put(event) ──────────────────────┐ │ │ +│ │ │ │ │ +│ └───────────────────────────────────────────────────────────│─┘ │ +└──────────────────────────────────────────────────────────────│────┘ + │ + ┌──────────────────────────────────────────────────────┘ + │ + ▼ +┌───────────────────────────────────────────────────────────────────┐ +│ Dispatcher Thread │ +│ │ +│ _dispatcher_loop(): │ +│ while True: │ +│ event = event_queue.get() │ +│ event_handler.dispatch(event) │ +│ │ +└───────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌───────────────────────────────────────────────────────────────────┐ +│ EventHandler.dispatch(event) │ +│ │ +│ ┌─ NodeRunStartedEvent ─────────────────────────────────────┐ │ +│ │ → event_collector.collect(event) │ │ +│ └───────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ NodeRunStreamChunkEvent ─────────────────────────────────┐ │ +│ │ → response_coordinator.intercept_event(event) │ │ +│ │ → event_collector.collect(stream_events) │ │ +│ └───────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌─ NodeRunSucceededEvent ───────────────────────────────────┐ │ +│ │ → _store_node_outputs(node_id, outputs) │ │ +│ │ └─ variable_pool.add((node_id, var_name), value) │ │ +│ │ → response_coordinator.intercept_event(event) │ │ +│ │ → edge_processor.process_node_success(node_id) │ │ +│ │ └─ ready_queue.put(next_nodes) │ │ +│ └───────────────────────────────────────────────────────────┘ │ +│ │ +└───────────────────────────────────────────────────────────────────┘ +``` + +### 2.2 关键点 + +1. **事件驱动**:节点通过 yield 事件与引擎通信 +2. **Variable Pool 写入时机**:在 `NodeRunSucceededEvent` 处理时,outputs 被写入 variable_pool +3. **事件收集**:所有事件都通过 `event_collector.collect()` 收集,最终返回给调用方 + +--- + +## 3. 节点内嵌子节点设计 + +### 3.1 设计原则 + +**核心思想**:虚拟节点本质上就是一个完整的节点(如 LLM 节点),应该用完整的节点配置来定义,而不是把配置塞到其他地方。 + +**方案**:在节点配置中添加 `virtual_nodes` 字段,定义该节点依赖的子节点列表。子节点是完整的节点定义,执行时先执行子节点,再执行主节点。 + +### 3.2 DSL 设计 + +```yaml +nodes: + - id: tool1 + type: tool + data: + # 虚拟子节点列表 - 完整的节点定义 + virtual_nodes: + - id: ext_1 # 局部 ID,实际会变成 tool1.ext_1 + type: llm # 就是一个完整的 LLM 节点! + data: + title: "提取关键词" + model: + provider: openai + name: gpt-4o-mini + mode: chat + prompt_template: + - role: user + text: "{{#llm1.context#}}" # 引用上游 context + - role: user + text: "请提取关键词,只返回关键词本身" + + # 主节点参数引用子节点输出 + tool_parameters: + query: + type: variable + value: [ext_1, text] # 引用子节点输出 +``` + +### 3.3 完整示例 + +```yaml +nodes: + # 上游 LLM 节点 + - id: llm1 + type: llm + data: + model: + provider: openai + name: gpt-4 + prompt_template: + - role: user + text: "{{#start.query#}}" + + # Tool 节点 - 包含虚拟子节点 + - id: tool1 + type: tool + data: + # 子节点列表 + virtual_nodes: + - id: ext_1 + type: llm + data: + title: "提取搜索关键词" + model: + provider: openai + name: gpt-4o-mini + prompt_template: + - role: user + text: "{{#llm1.context#}}" + - role: user + text: "请从对话中提取用户想要搜索的关键词" + + - id: ext_2 + type: llm + data: + title: "提取搜索范围" + model: + provider: openai + name: gpt-4o-mini + prompt_template: + - role: user + text: "{{#llm1.context#}}" + - role: user + text: "请提取用户想要的搜索范围(如:最近一周)" + + # 主节点配置 + tool_name: google_search + tool_parameters: + query: + type: variable + value: [ext_1, text] # 引用子节点 ext_1 的输出 + time_range: + type: variable + value: [ext_2, text] # 引用子节点 ext_2 的输出 + limit: + type: constant + value: 10 +``` + +### 3.4 子节点 ID 规则 + +子节点的局部 ID 会被转换为全局 ID: + +| 局部 ID | 父节点 ID | 全局 ID | +|---------|-----------|---------| +| `ext_1` | `tool1` | `tool1.ext_1` | +| `ext_2` | `tool1` | `tool1.ext_2` | + +子节点引用使用局部 ID:`[ext_1, text]` + +### 3.5 实体定义 + +```python +# core/workflow/entities/virtual_node.py + +from pydantic import BaseModel +from typing import Any + + +class VirtualNodeConfig(BaseModel): + """Configuration for a virtual sub-node""" + + # Local ID within parent node (e.g., "ext_1") + id: str + + # Node type (e.g., "llm", "code") + type: str + + # Full node data configuration + data: dict[str, Any] + + +# core/workflow/nodes/base/entities.py + +class BaseNodeData(BaseModel): + """Base class for all node data""" + + title: str + desc: str | None = None + # ... existing fields ... + + # Virtual sub-nodes + virtual_nodes: list[VirtualNodeConfig] = [] +``` + +### 3.6 支持的节点类型 + +以下节点需要输出 `context` 变量以支持 extraction: + +| 节点类型 | NodeType | context 来源 | 模型配置位置 | +| ------------------- | ------------------------------ | ----------------------- | ---------------------------------- | +| LLM | `NodeType.LLM` | 已实现 `_build_context` | `node_data.model` | +| Agent | `NodeType.AGENT` | 需要添加 | `agent_parameters` 中的 model 参数 | +| Question Classify | `NodeType.QUESTION_CLASSIFIER` | 需要添加 | `node_data.model` | +| Parameter Extractor | `NodeType.PARAMETER_EXTRACTOR` | 需要添加 | `node_data.model` | + +**context 结构**(统一格式): + +```python +context = [ + {"role": "user", "text": "用户输入", "files": []}, + {"role": "assistant", "text": "模型回复", "files": []}, +] +``` + +--- + +## 4. 执行流程 + +### 4.1 节点内嵌子节点执行流程 + +``` +Tool 节点组执行 + │ + ├─ node.run() 被调用 + │ + ├─ Step 1: 执行虚拟子节点 + │ │ + │ │ 遍历 node_data.virtual_nodes + │ │ + │ │ ┌─────────────────────────────────────────────────────────┐ + │ │ │ 虚拟节点 (tool1.ext_1) │ + │ │ │ + │ │ yield NodeRunStartedEvent (tool1_ext_1, type=LLM) │ + │ │ yield NodeRunStreamChunkEvent (tool1_ext_1, chunk) │ + │ │ yield NodeRunSucceededEvent (tool1_ext_1, outputs) │ + │ │ │ + │ │ → variable_pool.add((tool1_ext_1, "text"), result) │ + │ └─────────────────────────────────────────────────────────┘ + │ + ├─ Tool 参数解析:使用 {{#tool1_ext_1.text#}} 替代原 @llm1.context + │ + │ ┌─────────────────────────────────────────────────────────┐ + │ │ Tool 主节点 (tool1) │ + │ │ │ + │ │ yield NodeRunStartedEvent (tool1) │ + │ │ yield NodeRunStreamChunkEvent (tool1, tool output) │ + │ │ yield NodeRunSucceededEvent (tool1, outputs) │ + │ └─────────────────────────────────────────────────────────┘ + │ + └─ 完成 +``` + +**优点**: + +- 虚拟节点有独立的 node_id,有独立的日志 +- 虚拟节点的 outputs 存入 variable_pool,可被其他节点引用 +- UI 可以清晰展示两个独立的执行过程 + +**缺点**: + +- 实现稍复杂 +- 需要处理虚拟节点的 ID 生成和关联 + +### 4.2 推荐方案:思路 B + +采用虚拟节点方案,因为: + +1. 符合你说的"节点组"概念 +2. 两个调用都有独立的日志和输出 +3. 更清晰的执行边界 + +### 4.3 执行位置选择 + +在节点 \_run() 方法开始时(推荐) + +```python +# tool_node.py +def _run(self) -> Generator[NodeEventBase, None, None]: + # Step 1: 预处理 - 执行所有 extraction + extraction_results = yield from self._process_extractions() + + # Step 2: 使用 extraction 结果生成参数 + parameters = self._generate_parameters(extraction_results) + + # Step 3: 执行 tool 调用 + ... +``` + +**优点**: + +- 可以 yield 事件 +- 在节点控制范围内 +- 清晰的执行顺序 + +## 5. 详细执行流程 + +### 5.1 完整调用链 + +用户定义的 Tool 节点参数(结构化配置): + +```yaml +# Tool 节点配置 +- id: tool1 + type: tool + data: + tool_name: google_search + inputs: + # extraction 类型输入 + - name: query + type: extraction + value: + source_node_id: llm1 + source_variable: context + extraction_prompt: "提取关键词" + # model 不指定,自动继承 llm1 的模型配置 +``` + +执行流程: + +``` +┌─────────────────────────────────────────────────────────────────────────────┐ +│ Worker Thread │ +│ │ +│ Worker._execute_node(tool_node) │ +│ │ │ +│ └─ for event in tool_node.run(): │ +│ event_queue.put(event) │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ ToolNode.run() │ +│ │ +│ ┌───────────────────────────────────────────────────────────────────────┐ │ +│ │ Step 1: 预处理 - 发现并执行 extractions │ │ +│ │ │ │ +│ │ yield from self._process_extractions() │ │ +│ │ │ │ │ +│ │ ├─ 解析参数,发现 type=extraction 的 input │ │ +│ │ │ │ │ +│ │ ├─ 创建虚拟节点 ID: "tool1_ext_1" │ │ +│ │ │ │ │ +│ │ ├─ yield NodeRunStartedEvent( │ │ +│ │ │ node_id="tool1_ext_1", │ │ +│ │ │ node_type=NodeType.LLM, │ │ +│ │ │ node_title="Extraction: 提取关键词" │ │ +│ │ │ ) │ │ +│ │ │ │ │ +│ │ ├─ 获取 llm1.context 并构建 prompt_messages │ │ +│ │ │ │ │ +│ │ ├─ 调用 LLM (流式) │ │ +│ │ │ for chunk in llm_invoke(): │ │ +│ │ │ yield NodeRunStreamChunkEvent( │ │ +│ │ │ node_id="tool1_ext_1", │ │ +│ │ │ selector=["tool1_ext_1", "text"], │ │ +│ │ │ chunk=chunk │ │ +│ │ │ ) │ │ +│ │ │ │ │ +│ │ ├─ yield NodeRunSucceededEvent( │ │ +│ │ │ node_id="tool1_ext_1", │ │ +│ │ │ outputs={"text": "关键词A, 关键词B"} │ │ +│ │ │ ) │ │ +│ │ │ │ │ +│ │ └─ 返回 extraction_results = {"tool1_ext_1": "关键词A, 关键词B"} │ │ +│ │ │ │ +│ └───────────────────────────────────────────────────────────────────────┘ │ +│ │ +│ ┌───────────────────────────────────────────────────────────────────────┐ │ +│ │ Step 2: 主节点执行 │ │ +│ │ │ │ +│ │ yield NodeRunStartedEvent( │ │ +│ │ node_id="tool1", │ │ +│ │ node_type=NodeType.TOOL │ │ +│ │ ) │ │ +│ │ │ │ +│ │ parameters = _generate_parameters(extraction_results) │ │ +│ │ # param = "关键词A, 关键词B" │ │ +│ │ │ │ +│ │ tool.invoke(parameters) │ │ +│ │ for chunk in tool_output: │ │ +│ │ yield NodeRunStreamChunkEvent( │ │ +│ │ node_id="tool1", │ │ +│ │ selector=["tool1", "text"], │ │ +│ │ chunk=chunk │ │ +│ │ ) │ │ +│ │ │ │ +│ │ yield NodeRunSucceededEvent( │ │ +│ │ node_id="tool1", │ │ +│ │ outputs={"text": "tool output..."} │ │ +│ │ ) │ │ +│ │ │ │ +│ └───────────────────────────────────────────────────────────────────────┘ │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ + │ + ▼ +┌─────────────────────────────────────────────────────────────────────────────┐ +│ Dispatcher Thread │ +│ │ +│ 收到事件序列: │ +│ │ +│ 1. NodeRunStartedEvent(node_id="tool1_ext_1") │ +│ → event_collector.collect() │ +│ │ +│ 2. NodeRunStreamChunkEvent(node_id="tool1_ext_1", chunk="关键词") │ +│ → response_coordinator → event_collector.collect() │ +│ │ +│ 3. NodeRunSucceededEvent(node_id="tool1_ext_1", outputs={...}) │ +│ → _store_node_outputs("tool1_ext_1", outputs) │ +│ └─ variable_pool.add(("tool1_ext_1", "text"), "关键词A, 关键词B") │ +│ → event_collector.collect() │ +│ 注意:不触发 edge_processor,因为这是虚拟节点 │ +│ │ +│ 4. NodeRunStartedEvent(node_id="tool1") │ +│ → event_collector.collect() │ +│ │ +│ 5. NodeRunStreamChunkEvent(node_id="tool1", chunk="tool output") │ +│ → response_coordinator → event_collector.collect() │ +│ │ +│ 6. NodeRunSucceededEvent(node_id="tool1", outputs={...}) │ +│ → _store_node_outputs("tool1", outputs) │ +│ → edge_processor.process_node_success("tool1") │ +│ └─ ready_queue.put(next_nodes) │ +│ │ +└─────────────────────────────────────────────────────────────────────────────┘ +``` + +### 5.2 关键问题:虚拟节点的事件处理 + +虚拟节点(如 `tool1_ext_1`)的事件需要特殊处理: + +```python +# EventHandler 需要区分虚拟节点和真实节点 +def _(self, event: NodeRunSucceededEvent) -> None: + # 存储输出到 variable_pool(虚拟节点也需要) + self._store_node_outputs(event.node_id, event.node_run_result.outputs) + + # 检查是否是虚拟节点(通过 node_id 格式判断:包含 _ext_) + if self._is_virtual_node(event.node_id): + # 虚拟节点不触发边处理,只收集事件 + self._event_collector.collect(event) + return + + # 真实节点:触发边处理,推进工作流 + ready_nodes = self._edge_processor.process_node_success(event.node_id) + ... + +def _is_virtual_node(self, node_id: str) -> bool: + """Check if node_id represents a virtual extraction node.""" + return "_ext_" in node_id +``` + +### 5.3 虚拟节点 ID 命名规则 + +```python +def _generate_extraction_node_id( + parent_node_id: str, + extraction_index: int, +) -> str: + """ + Generate unique ID for extraction virtual node. + + Format: {parent_node_id}_ext_{index} + Example: tool1_ext_1, tool1_ext_2 + """ + return f"{parent_node_id}_ext_{extraction_index}" +``` + +### 5.4 ExtractionExecutor 详细设计 + +**设计原则**: + +1. **直接实例化并运行 LLMNode**:创建真正的 LLMNode 实例并调用 `run()` +2. **完全复用节点逻辑**:LLMNode 的 `_run()`、Node 基类的 `run()` 和异常处理全部复用 +3. **通过重新实例化实现重试**:失败时重新创建 LLMNode 实例并再次运行 +4. **自动获得所有能力**:token 统计、流式输出、完整的 NodeRunResult 格式 + +```python +# core/workflow/nodes/base/extraction_executor.py + +class ExtractionExecutor: + """ + Executes LLM calls for extracting values from PromptMessage-type variables. + + This executor directly instantiates LLMNode instances, fully reusing: + - LLMNode's _run() logic + - Node base class's run() method and exception handling + - All events and token statistics + + Retry is implemented at this level by re-instantiating and re-running the node. + """ + + def __init__( + self, + *, + variable_pool: VariablePool, + graph_config: Mapping[str, Any], + graph_init_params: GraphInitParams, + graph_runtime_state: GraphRuntimeState, + parent_node_id: str, + parent_retry_config: RetryConfig | None = None, + ): + # Store graph context for creating LLMNode instances + self._graph_init_params = graph_init_params + self._graph_runtime_state = graph_runtime_state + # ... + + def _execute_single_extraction( + self, + spec: VariableExtractionSpec, + ext_node_id: str, + ) -> Generator[GraphNodeEventBase, None, str]: + """ + Execute a single extraction by instantiating and running a real LLMNode. + """ + # Create LLMNode instance with minimal config + llm_node = self._create_llm_node( + ext_node_id=ext_node_id, + context=context, + extraction_prompt=spec.extraction_prompt, + model_config=model_config, + spec=spec, + ) + + # Run the node and collect events - FULLY REUSES LLMNode's logic! + for event in llm_node.run(): + # Mark events as virtual + event = self._mark_event_as_virtual(event, spec) + yield event + + if isinstance(event, NodeRunSucceededEvent): + result_text = event.node_run_result.outputs.get("text", "") + elif isinstance(event, NodeRunFailedEvent): + raise LLMInvocationError(Exception(event.error)) + + return result_text + + def _create_llm_node(self, ...) -> LLMNode: + """ + Create a real LLMNode instance for extraction. + Constructs minimal required configuration. + """ + # Build prompt template from context + extraction prompt + prompt_template = [...] # LLMNodeChatModelMessage list + + # Create LLMNode with full graph context + llm_node = LLMNode( + id=ext_node_id, + config=node_config, + graph_init_params=self._graph_init_params, + graph_runtime_state=self._graph_runtime_state, + ) + return llm_node + + def _execute_with_retry(self, spec, ext_node_id) -> Generator[...]: + """ + Retry by re-instantiating and re-running the LLMNode. + """ + for attempt in range(retry_config.max_retries + 1): + try: + return (yield from self._execute_single_extraction(spec, ext_node_id)) + except Exception as e: + if attempt < retry_config.max_retries: + yield NodeRunRetryEvent(...) + time.sleep(retry_config.retry_interval_seconds) + continue + raise +``` + +--- + +## 6. 事件设计 + +### 6.1 复用现有事件类型 + +采用虚拟节点方案后,**不需要新增事件类型**。虚拟节点直接使用现有的: + +- `NodeRunStartedEvent` +- `NodeRunStreamChunkEvent` +- `NodeRunSucceededEvent` +- `NodeRunFailedEvent` + +**区分虚拟节点的方式**:在 `NodeRunStartedEvent` 中添加可选字段: + +```python +# core/workflow/graph_events/node.py + +class NodeRunStartedEvent(GraphNodeEventBase): + node_title: str + predecessor_node_id: str | None = None + agent_strategy: AgentNodeStrategyInit | None = None + start_at: datetime = Field(..., description="node start time") + + # Existing fields for ToolNode + provider_type: str = "" + provider_id: str = "" + + # NEW: Virtual node fields for extraction + is_virtual: bool = False + parent_node_id: str | None = None + extraction_source: str | None = None # e.g., "llm1.context" + extraction_prompt: str | None = None +``` + +**字段说明**: + +| 字段 | 类型 | 说明 | +| ------------------- | ------------- | ------------------------------ | +| `is_virtual` | `bool` | 是否为虚拟节点,默认 `False` | +| `parent_node_id` | `str \| None` | 父节点 ID,如 `"tool1"` | +| `extraction_source` | `str \| None` | 提取来源,如 `"llm1.context"` | +| `extraction_prompt` | `str \| None` | 提取 prompt,如 `"提取关键词"` | + +### 6.2 事件序列示例 + +前端收到的事件序列: + +``` +1. NodeRunStartedEvent + - node_id: "tool1_ext_1" + - node_type: NodeType.LLM + - node_title: "Extraction: 提取关键词" + - is_virtual: true + - parent_node_id: "tool1" + - extraction_source: "llm1.context" + - extraction_prompt: "提取关键词" + +2. NodeRunStreamChunkEvent + - node_id: "tool1_ext_1" + - selector: ["tool1_ext_1", "text"] + - chunk: "关键词" + +3. NodeRunSucceededEvent + - node_id: "tool1_ext_1" + - outputs: {"text": "关键词A, 关键词B"} + +4. NodeRunStartedEvent + - node_id: "tool1" + - node_type: NodeType.TOOL + - node_title: "Search Tool" + - is_virtual: false + +5. NodeRunStreamChunkEvent + - node_id: "tool1" + - selector: ["tool1", "text"] + - chunk: "search result..." + +6. NodeRunSucceededEvent + - node_id: "tool1" + - outputs: {"text": "..."} +``` + +### 6.3 前端展示建议 + +前端可以根据 `is_virtual` 和 `parent_node_id` 字段: + +1. **嵌套展示**:将虚拟节点的输出显示在父节点内部 +2. **分开展示**:作为独立的节点展示,但用 UI 标识关联关系 +3. **折叠展示**:默认折叠虚拟节点,可展开查看详情 + +--- + +## 7. 日志与记录 + +### 7.1 虚拟节点的 NodeRunResult + +虚拟节点有独立的 `NodeRunResult`,结构与普通 LLM 节点一致: + +```python +NodeRunResult( + status=WorkflowNodeExecutionStatus.SUCCEEDED, + inputs={ + "context_source": "llm1.context", + "extraction_prompt": "提取关键词", + }, + process_data={ + "source": "llm1.context", + "prompt": "提取关键词", + "model_mode": "chat", + "prompts": [ + {"role": "user", "text": "原始用户输入"}, + {"role": "assistant", "text": "原始助手回复"}, + {"role": "user", "text": "提取关键词"}, + ], + "usage": { + "prompt_tokens": 100, + "completion_tokens": 20, + "total_tokens": 120, + }, + }, + outputs={ + "text": "关键词A, 关键词B", + }, + metadata={ + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: 120, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: 0.0001, + WorkflowNodeExecutionMetadataKey.CURRENCY: "USD", + }, + llm_usage=LLMUsage( + prompt_tokens=100, + completion_tokens=20, + total_tokens=120, + ), +) +``` + +### 7.2 父节点的 process_data + +父节点(如 ToolNode)可以在 `process_data` 中记录关联的虚拟节点: + +```python +process_data = { + # ... existing fields + "extraction_nodes": ["tool1_ext_1", "tool1_ext_2"], +} +``` + +### 7.3 数据库记录 + +虚拟节点的执行记录会被保存到 `workflow_node_executions` 表: + +| 字段 | 值 | +| ----------- | ----------------------------------------- | +| `node_id` | `"tool1_ext_1"` | +| `node_type` | `"llm"` | +| `title` | `"Extraction: 提取关键词..."` | +| `inputs` | `{"context_source": "llm1.context", ...}` | +| `outputs` | `{"text": "关键词A, 关键词B"}` | +| `status` | `"succeeded"` | + +前端可以通过 `node_id` 中的 `_ext_` 识别虚拟节点,并关联到父节点。 + +--- + +## 8. 集成示例 + +### 8.1 ToolNode 集成 + +```python +# core/workflow/nodes/tool/tool_node.py + +from core.workflow.nodes.base.extraction_executor import ExtractionExecutor + + +class ToolNode(Node[ToolNodeData]): + + def _run(self) -> Generator[NodeEventBase, None, None]: + # Step 1: 创建 ExtractionExecutor(传入父节点的 retry_config) + extraction_executor = ExtractionExecutor( + variable_pool=self.graph_runtime_state.variable_pool, + graph_config=self.graph_config, + graph_init_params=self._graph_init_params, + graph_runtime_state=self.graph_runtime_state, + parent_node_id=self._node_id, + parent_retry_config=self.retry_config, # 继承父节点的重试配置 + ) + + # Step 2: 查找所有 extraction 类型的 inputs + specs = extraction_executor.find_extractions(self.node_data.model_dump()) + + # Step 3: 执行 extractions(yield 虚拟节点事件,包括重试事件) + extraction_results: dict[str, str] = {} + if specs: + try: + extraction_results = yield from extraction_executor.process_extractions(specs) + except ExtractionError as e: + # ExtractionExecutor 已 yield 了 NodeRunFailedEvent + # 根据父节点的 error_strategy 决定如何处理 + if self.error_strategy == ErrorStrategy.DEFAULT_VALUE: + extraction_results = self._get_default_extraction_values(specs) + else: + raise + + # Step 4: 生成参数(使用 extraction 结果作为对应 input 的值) + parameters = self._generate_parameters_with_extractions( + tool_parameters=tool_parameters, + extraction_results=extraction_results, + ) + + # Step 5: 继续正常的 tool 调用流程... + ... + + def _generate_parameters_with_extractions( + self, + *, + tool_parameters: Sequence[ToolParameter], + extraction_results: dict[str, str], # input_name -> extracted_value + ) -> dict[str, Any]: + """Generate parameters, using extraction results for extraction-type inputs.""" + result: dict[str, Any] = {} + + for parameter_name, tool_input in self.node_data.tool_parameters.items(): + # Check if this input is an extraction type (result already in extraction_results) + if parameter_name in extraction_results: + result[parameter_name] = extraction_results[parameter_name] + + elif tool_input.type in {"mixed", "constant"}: + template = str(tool_input.value) + resolved = self.graph_runtime_state.variable_pool.convert_template(template).text + result[parameter_name] = resolved + + elif tool_input.type == "variable": + variable = self.graph_runtime_state.variable_pool.get(tool_input.value) + result[parameter_name] = variable.value if variable else None + + return result + + def _get_default_extraction_values( + self, + specs: list[VariableExtractionSpec], + ) -> dict[str, str]: + """Return default values for failed extractions.""" + return {spec.input_name: "" for spec in specs} +``` + +### 8.2 通用基类集成(可选方案) + +如果多个节点类型都需要支持 extraction,可以在基类中统一处理: + +```python +# core/workflow/nodes/base/node.py + +class Node(Generic[NodeDataT]): + + def run(self) -> Generator[GraphNodeEventBase, None, None]: + # Step 1: 预处理 extractions(如果有) + extraction_results = yield from self._preprocess_extractions() + + # Step 2: 正常执行 + execution_id = self.ensure_execution_id() + # ...existing logic... + + def _preprocess_extractions(self) -> Generator[GraphNodeEventBase, None, dict[str, str]]: + """ + Override in subclasses that support extraction. + Default implementation returns empty dict. + """ + return {} + + def _supports_extraction(self) -> bool: + """Override to return True if node supports extraction.""" + return False +``` + +### 8.3 为其他节点添加 context 输出 + +以下节点需要在 outputs 中添加 `context`: + +```python +# core/workflow/nodes/question_classifier/question_classifier_node.py + +def _run(self) -> NodeRunResult: + # ...existing logic... + + outputs = { + "class_name": result.class_name, + # NEW: Add context for extraction support + "context": self._build_context(prompt_messages, result.text), + } + + return NodeRunResult( + status=WorkflowNodeExecutionStatus.SUCCEEDED, + outputs=outputs, + ) +``` + +```python +# core/workflow/nodes/parameter_extractor/parameter_extractor_node.py + +def _run(self) -> NodeRunResult: + # ...existing logic... + + outputs = { + **extracted_parameters, + # NEW: Add context for extraction support + "context": self._build_context(prompt_messages, assistant_response), + } +``` + +**注意**:`_build_context` 方法可以从 `LLMNode` 中提取为公共函数,或者直接复用: + +```python +# core/workflow/nodes/llm/llm_utils.py + +def build_context( + prompt_messages: Sequence[PromptMessage], + assistant_response: str, + model_mode: str, +) -> list[dict[str, Any]]: + """ + Build context from prompt messages and assistant response. + Excludes system messages and includes the current LLM response. + """ + context_messages = [m for m in prompt_messages if m.role != PromptMessageRole.SYSTEM] + context_messages.append(AssistantPromptMessage(content=assistant_response)) + return PromptMessageUtil.prompt_messages_to_prompt_for_saving( + model_mode=model_mode, prompt_messages=context_messages + ) +``` + +--- + +## 9. 配置选项 + +### 9.1 模型配置策略 + +提取调用使用的模型,按优先级: + +| 优先级 | 来源 | 说明 | +| ------ | ---------- | ------------------------------------ | +| 1 | 显式指定 | `extraction.value.model` 配置 | +| 2 | 源节点配置 | 继承 `source_node_id` 节点的模型配置 | + +### 9.2 ExtractionModelConfig 使用 + +```python +# 在 ExtractionExecutor 中获取模型配置 + +def _get_model_config(self, spec: VariableExtractionSpec) -> dict: + # 如果显式指定了 model,使用它 + if spec.model: + return { + "provider": spec.model.provider, + "name": spec.model.name, + "mode": spec.model.mode.value, + "completion_params": spec.model.completion_params, + } + + # 否则继承源节点的模型配置 + source_model_config = self._get_source_node_model_config(spec.source_node_id) + if source_model_config is None: + raise ModelConfigNotFoundError(spec.source_node_id, spec.source_variable) + + return source_model_config +``` + +### 9.3 模型配置示例 + +**场景 1:继承源节点配置(推荐)** + +```yaml +# 节点配置 +inputs: + - name: query + type: extraction + value: + source_node_id: llm1 + source_variable: context + extraction_prompt: "提取关键词" + # 不指定 model,自动继承 llm1 的模型配置 + +# llm1 节点配置 +data: + model: + provider: openai + name: gpt-4 + mode: chat + completion_params: + temperature: 0.7 +# 结果:使用 openai/gpt-4 +``` + +**场景 2:显式指定模型** + +```yaml +# 节点配置 +inputs: + - name: query + type: extraction + value: + source_node_id: llm1 + source_variable: context + extraction_prompt: "提取关键词" + model: + provider: openai + name: gpt-4o-mini + mode: chat + completion_params: + temperature: 0.3 +# 结果:使用 openai/gpt-4o-mini(忽略源节点配置) +``` + +--- + +## 10. 错误处理与重试机制 + +### 10.1 设计考量 + +**重要说明**:虚拟节点(Extraction 节点)的重试机制**无法**直接复用现有的节点级别重试机制。 + +原因分析: + +- Worker 从 `ready_queue` 取节点时,通过 `graph.nodes[node_id]` 获取节点实例 +- 虚拟节点不在 `graph.nodes` 中 +- `ErrorHandler._handle_retry()` 无法找到虚拟节点进行重试 + +因此,**ExtractionExecutor 需要在内部实现重试逻辑**。 + +### 10.2 错误类型 + +```python +# core/workflow/nodes/base/extraction_errors.py + +class ExtractionError(Exception): + """Base exception for extraction operations""" + pass + + +class VariableNotFoundError(ExtractionError): + """Source variable not found in variable pool""" + + def __init__(self, selector: list[str]): + self.selector = selector + super().__init__(f"Variable {'.'.join(selector)} not found in variable pool") + + +class InvalidVariableTypeError(ExtractionError): + """Source variable is not a valid context type (list[dict])""" + + def __init__(self, selector: list[str], actual_type: type): + self.selector = selector + self.actual_type = actual_type + super().__init__( + f"Variable {'.'.join(selector)} is not a list type, got {actual_type.__name__}" + ) + + +class SourceNodeNotFoundError(ExtractionError): + """Source node not found in graph config""" + + def __init__(self, node_id: str): + self.node_id = node_id + super().__init__(f"Source node {node_id} not found in graph config") + + +class LLMInvocationError(ExtractionError): + """LLM invocation failed during extraction""" + + def __init__(self, original_error: Exception): + self.original_error = original_error + super().__init__(f"LLM invocation failed: {original_error}") +``` + +### 10.3 内部重试机制 + +虚拟节点的重试在 `ExtractionExecutor` 内部处理,继承父节点的 `retry_config`: + +```python +# ExtractionExecutor 的重试实现 + +def _execute_single_extraction_with_retry( + self, + spec: VariableExtractionSpec, + ext_node_id: str, +) -> Generator[..., None, tuple[str, LLMUsage]]: + """ + Execute extraction with internal retry support. + + Retry config is inherited from parent node. + """ + retry_config = self._parent_retry_config + last_error: Exception | None = None + + for attempt in range(retry_config.max_retries + 1): + try: + return (yield from self._execute_single_extraction(spec, ext_node_id)) + except LLMInvocationError as e: + last_error = e + + if attempt < retry_config.max_retries: + # Yield retry event for frontend display + yield NodeRunRetryEvent( + id=str(uuid4()), + node_id=ext_node_id, + node_type=NodeType.LLM, + node_title=f"Extraction: {spec.extraction_prompt[:30]}...", + start_at=self._start_time, + error=str(e), + retry_index=attempt + 1, + ) + + # Wait for retry interval + time.sleep(retry_config.retry_interval_seconds) + continue + + # Max retries exceeded, raise + raise + + # Should not reach here, but for type safety + raise last_error or LLMInvocationError(Exception("Unknown error")) +``` + +### 10.4 错误传播 + +```python +# ToolNode 中的错误处理示例 + +def _run(self) -> Generator[NodeEventBase, None, None]: + try: + # 执行 extractions(内部已处理重试) + extraction_results = yield from extraction_executor.process_extractions(specs) + except ExtractionError as e: + # 虚拟节点已 yield 了 NodeRunFailedEvent + # 异常传播到父节点,由父节点的 error_strategy 决定后续处理 + if self.error_strategy == ErrorStrategy.DEFAULT_VALUE: + extraction_results = self._get_default_extraction_values(specs) + else: + raise # 终止执行 + + # 继续执行... +``` + +### 10.5 为什么不能复用节点级别重试 + +节点级别的重试流程: + +``` +Worker 执行节点 + → 失败 → NodeRunFailedEvent + → Dispatcher → EventHandler + → ErrorHandler._handle_retry() + → 检查 graph.nodes[node_id] ← 虚拟节点不存在! + → 重新入队 ready_queue +``` + +虚拟节点不在 `graph.nodes` 中,无法进入此流程。因此重试必须在 ExtractionExecutor 内部完成。 + +--- + +## 11. 设计决策 + +### 11.1 模型配置 + +**决定:使用结构化配置,可选显式指定模型** + +**配置方式**: + +```yaml +# 继承源节点模型(推荐) +- name: query + type: extraction + value: + source_node_id: llm1 + source_variable: context + extraction_prompt: "提取关键词" + +# 显式指定模型 +- name: summary + type: extraction + value: + source_node_id: agent1 + source_variable: context + extraction_prompt: "总结对话" + model: + provider: openai + name: gpt-4o-mini +``` + +**优先级**: + +1. 如果 `extraction.value.model` 存在,使用指定的模型 +2. 否则,继承源节点的模型配置 + +**模型配置字段**: + +| 字段 | 说明 | 来源 | +| ------------------- | ---------- | ------------------------- | +| `provider` | 模型提供商 | 显式指定 或 源节点配置 | +| `name` | 模型名称 | 显式指定 或 源节点配置 | +| `mode` | LLM 模式 | 默认 `chat` 或 源节点配置 | +| `completion_params` | 推理参数 | 显式指定 或 源节点配置 | + +### 11.2 Token 计费 + +**决定:A - 虚拟节点独立计费** + +虚拟节点有独立的 `NodeRunResult`,token 消耗记录在虚拟节点的 `metadata` 中。 + +### 11.3 context 变量类型 + +**决定:C - 暂不新增类型** + +当前 `context` 使用 `list[dict]` 格式(`ArrayAnySegment`),先这样实现,后续视需要再考虑新增 `PromptMessagesSegment` 类型。 + +### 11.4 支持范围 + +**决定:A - 支持所有使用 LLM 的节点** + +包括: + +- LLM 节点 +- Agent 节点 +- Question Classify 节点 +- Parameter Extractor 节点 + +这些节点都需要输出 `context` 变量。 + +### 11.5 重试机制 + +**决定:A - 内部实现重试** + +虚拟节点在 `ExtractionExecutor` 内部实现重试机制,而非复用节点级别的重试流程。 + +**原因**: + +- 节点级别的重试需要节点在 `graph.nodes` 中,虚拟节点不满足此条件 +- `ErrorHandler._handle_retry()` 无法找到虚拟节点 + +**实现方式**: + +- 继承父节点的 `retry_config`(max_retries, retry_interval_seconds) +- 在 `ExtractionExecutor._execute_with_retry()` 中实现重试循环 +- 每次重试 yield `NodeRunRetryEvent` 供前端展示 + +### 11.6 复用 LLMNode 逻辑 + +**决定:使用 LLMNode 静态方法** + +ExtractionExecutor 复用 `LLMNode.invoke_llm()` 和 `LLMNode.handle_invoke_result()` 静态方法: + +**优点**: + +- 获得完整的 streaming 处理能力 +- 获得完整的 token 统计(`LLMUsage`) +- 获得文件处理能力(multimodal) +- 返回格式与真正的 LLM 节点一致 + +**NodeRunResult 包含**: + +- `outputs`: `{"text": "..."}` +- `llm_usage`: `LLMUsage` 对象 +- `metadata`: token 计费信息(TOTAL_TOKENS, TOTAL_PRICE, CURRENCY) + +--- + +## 12. 实现计划 + +### Phase 1: 基础设施 + +| Task | 文件 | 说明 | +| ---- | ----------------------------------------------- | ------------------------------------------------------ | +| 1.1 | `core/workflow/entities/variable_extraction.py` | 定义 `VariableExtractionSpec`、`ExtractionModelConfig` | +| 1.2 | `core/workflow/graph_events/node.py` | 在 `NodeRunStartedEvent` 添加虚拟节点字段 | +| 1.3 | `core/workflow/nodes/llm/llm_utils.py` | 提取 `build_context` 为公共函数 | + +### Phase 2: 核心执行器 + +| Task | 文件 | 说明 | +| ---- | --------------------------------------------------------------- | ----------------------------------------------------------- | +| 2.1 | `core/workflow/nodes/base/extraction_errors.py` | 定义错误类型 | +| 2.2 | `core/workflow/nodes/base/extraction_executor.py` | 实现 `ExtractionExecutor` | +| 2.3 | `core/workflow/graph_engine/event_management/event_handlers.py` | 修改 `_is_virtual_node` 判断,虚拟节点不触发 edge_processor | + +### Phase 3: 节点 context 输出 + +| Task | 文件 | 说明 | +| ---- | ------------------------------------------ | ------------------- | +| 3.1 | `core/workflow/nodes/agent/agent_node.py` | 添加 `context` 输出 | +| 3.2 | `core/workflow/nodes/question_classifier/` | 添加 `context` 输出 | +| 3.3 | `core/workflow/nodes/parameter_extractor/` | 添加 `context` 输出 | + +### Phase 4: 节点集成 + +| Task | 文件 | 说明 | +| ---- | ----------------------------------------- | ------------------------- | +| 4.1 | `core/workflow/nodes/tool/tool_node.py` | 集成 `ExtractionExecutor` | +| 4.2 | `core/workflow/nodes/agent/agent_node.py` | 集成 `ExtractionExecutor` | +| 4.3 | 其他节点 | 按需集成 | + +### Phase 5: 测试 + +| Task | 说明 | +| ---- | ---------------------------------- | +| 5.1 | 单元测试:结构化配置解析 | +| 5.2 | 单元测试:ExtractionExecutor | +| 5.3 | 集成测试:ToolNode with extraction | +| 5.4 | 集成测试:多个 extraction 场景 | + +--- + +## 13. 附录 + +### 13.1 相关代码位置 + +| 模块 | 路径 | 说明 | +| ------------- | --------------------------------------------------------------- | --------------------------------- | +| LLM Node | `core/workflow/nodes/llm/node.py` | `_build_context` 方法(line 600) | +| Tool Node | `core/workflow/nodes/tool/tool_node.py` | `_generate_parameters` 方法 | +| Agent Node | `core/workflow/nodes/agent/agent_node.py` | 需要添加 context 输出 | +| Variable Pool | `core/workflow/runtime/variable_pool.py` | 变量存取和模板解析 | +| Graph Events | `core/workflow/graph_events/node.py` | 节点事件定义 | +| Event Handler | `core/workflow/graph_engine/event_management/event_handlers.py` | 事件处理和变量存储 | +| Worker | `core/workflow/graph_engine/worker.py` | 节点执行和事件队列 | + +### 13.2 参考实现 + +| 功能 | 参考代码 | 说明 | +| ------------- | ------------------------ | ------------------------------------------------------ | +| 模板解析 | `VariableTemplateParser` | `core/workflow/nodes/base/variable_template_parser.py` | +| 历史消息处理 | `TokenBufferMemory` | `core/memory/token_buffer_memory.py` | +| LLM 流式调用 | `LLMNode.invoke_llm` | `core/workflow/nodes/llm/node.py` line 386 | +| 事件 dispatch | `Node._dispatch` | `core/workflow/nodes/base/node.py` line 559 | + +### 13.3 新增文件 + +实现本功能需要新增以下文件: + +``` +core/workflow/ +├── entities/ +│ └── variable_extraction.py # NEW: VariableExtractionSpec 定义 +└── nodes/ + └── base/ + ├── extraction_errors.py # NEW: 错误类型定义 + └── extraction_executor.py # NEW: ExtractionExecutor 实现 +``` + +### 13.4 修改文件清单 + +| 文件 | 修改内容 | +| --------------------------------------------------------------- | ------------------------------------------- | +| `core/workflow/graph_events/node.py` | 添加 `is_virtual`, `parent_node_id` 等字段 | +| `core/workflow/graph_engine/event_management/event_handlers.py` | 添加 `_is_virtual_node` 判断 | +| `core/workflow/nodes/llm/llm_utils.py` | 提取 `build_context` 公共函数 | +| `core/workflow/nodes/tool/tool_node.py` | 集成 ExtractionExecutor | +| `core/workflow/nodes/agent/agent_node.py` | 添加 context 输出 + 集成 ExtractionExecutor | +| `core/workflow/nodes/question_classifier/*.py` | 添加 context 输出 | +| `core/workflow/nodes/parameter_extractor/*.py` | 添加 context 输出 | diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index d9cb9c227e..2d36bfb198 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -64,6 +64,7 @@ class NodeType(StrEnum): TRIGGER_PLUGIN = "trigger-plugin" HUMAN_INPUT = "human-input" COMMAND = "command" + GROUP = "group" @property def is_trigger_node(self) -> bool: @@ -255,6 +256,7 @@ class WorkflowNodeExecutionMetadataKey(StrEnum): LLM_CONTENT_SEQUENCE = "llm_content_sequence" LLM_TRACE = "llm_trace" COMPLETED_REASON = "completed_reason" # completed reason for loop node + MENTION_PARENT_ID = "mention_parent_id" # parent node id for extractor nodes class WorkflowNodeExecutionStatus(StrEnum): diff --git a/api/core/workflow/graph/graph.py b/api/core/workflow/graph/graph.py index 7be94c2426..bd2326e84f 100644 --- a/api/core/workflow/graph/graph.py +++ b/api/core/workflow/graph/graph.py @@ -307,7 +307,14 @@ class Graph: if not node_configs: raise ValueError("Graph must have at least one node") - node_configs = [node_config for node_config in node_configs if node_config.get("type", "") != "custom-note"] + # Filter out UI-only node types: + # - custom-note: top-level type (node_config.type == "custom-note") + # - group: data-level type (node_config.data.type == "group") + node_configs = [ + node_config + for node_config in node_configs + if node_config.get("type", "") != "custom-note" and node_config.get("data", {}).get("type", "") != "group" + ] # Parse node configurations node_configs_map = cls._parse_node_configs(node_configs) diff --git a/api/core/workflow/graph_engine/event_management/event_handlers.py b/api/core/workflow/graph_engine/event_management/event_handlers.py index 5b0f56e59d..9f2d8bcff4 100644 --- a/api/core/workflow/graph_engine/event_management/event_handlers.py +++ b/api/core/workflow/graph_engine/event_management/event_handlers.py @@ -93,8 +93,8 @@ class EventHandler: Args: event: The event to handle """ - # Events in loops or iterations are always collected - if event.in_loop_id or event.in_iteration_id: + # Events in loops, iterations, or extractor groups are always collected + if event.in_loop_id or event.in_iteration_id or event.in_mention_parent_id: self._event_collector.collect(event) return return self._dispatch(event) @@ -125,6 +125,11 @@ class EventHandler: Args: event: The node started event """ + # Check if this is an extractor node (has parent_node_id) + if self._is_extractor_node(event.node_id): + self._handle_extractor_node_started(event) + return + # Track execution in domain model node_execution = self._graph_execution.get_or_create_node_execution(event.node_id) is_initial_attempt = node_execution.retry_count == 0 @@ -164,6 +169,11 @@ class EventHandler: Args: event: The node succeeded event """ + # Check if this is an extractor node (has parent_node_id) + if self._is_extractor_node(event.node_id): + self._handle_extractor_node_success(event) + return + # Update domain model node_execution = self._graph_execution.get_or_create_node_execution(event.node_id) node_execution.mark_taken() @@ -226,6 +236,11 @@ class EventHandler: Args: event: The node failed event """ + # Check if this is an extractor node (has parent_node_id) + if self._is_extractor_node(event.node_id): + self._handle_extractor_node_failed(event) + return + # Update domain model node_execution = self._graph_execution.get_or_create_node_execution(event.node_id) node_execution.mark_failed(event.error) @@ -345,3 +360,57 @@ class EventHandler: self._graph_runtime_state.set_output("answer", value) else: self._graph_runtime_state.set_output(key, value) + + def _is_extractor_node(self, node_id: str) -> bool: + """ + Check if node_id represents an extractor node (has parent_node_id). + + Extractor nodes extract values from list[PromptMessage] for their parent node. + They have a parent_node_id field pointing to their parent node. + """ + node = self._graph.nodes.get(node_id) + if node is None: + return False + return node.node_data.is_extractor_node + + def _handle_extractor_node_started(self, event: NodeRunStartedEvent) -> None: + """ + Handle extractor node started event. + + Extractor nodes don't need full execution tracking, just collect the event. + """ + # Track in response coordinator for stream ordering + self._response_coordinator.track_node_execution(event.node_id, event.id) + + # Collect the event + self._event_collector.collect(event) + + def _handle_extractor_node_success(self, event: NodeRunSucceededEvent) -> None: + """ + Handle extractor node success event. + + Extractor nodes need special handling: + - Store outputs in variable pool (for reference by other nodes) + - Accumulate token usage + - Collect the event for logging + - Do NOT process edges or enqueue next nodes (parent node handles that) + """ + self._accumulate_node_usage(event.node_run_result.llm_usage) + + # Store outputs in variable pool + self._store_node_outputs(event.node_id, event.node_run_result.outputs) + + # Collect the event + self._event_collector.collect(event) + + def _handle_extractor_node_failed(self, event: NodeRunFailedEvent) -> None: + """ + Handle extractor node failed event. + + Extractor node failures are collected for logging, + but the parent node is responsible for handling the error. + """ + self._accumulate_node_usage(event.node_run_result.llm_usage) + + # Collect the event for logging + self._event_collector.collect(event) diff --git a/api/core/workflow/graph_engine/layers/persistence.py b/api/core/workflow/graph_engine/layers/persistence.py index e81df4f3b7..6f7c76defe 100644 --- a/api/core/workflow/graph_engine/layers/persistence.py +++ b/api/core/workflow/graph_engine/layers/persistence.py @@ -68,6 +68,7 @@ class _NodeRuntimeSnapshot: predecessor_node_id: str | None iteration_id: str | None loop_id: str | None + mention_parent_id: str | None created_at: datetime @@ -230,6 +231,7 @@ class WorkflowPersistenceLayer(GraphEngineLayer): metadata = { WorkflowNodeExecutionMetadataKey.ITERATION_ID: event.in_iteration_id, WorkflowNodeExecutionMetadataKey.LOOP_ID: event.in_loop_id, + WorkflowNodeExecutionMetadataKey.MENTION_PARENT_ID: event.in_mention_parent_id, } domain_execution = WorkflowNodeExecution( @@ -256,6 +258,7 @@ class WorkflowPersistenceLayer(GraphEngineLayer): predecessor_node_id=event.predecessor_node_id, iteration_id=event.in_iteration_id, loop_id=event.in_loop_id, + mention_parent_id=event.in_mention_parent_id, created_at=event.start_at, ) self._node_snapshots[event.id] = snapshot diff --git a/api/core/workflow/graph_events/base.py b/api/core/workflow/graph_events/base.py index 3714679201..16dd49c7ad 100644 --- a/api/core/workflow/graph_events/base.py +++ b/api/core/workflow/graph_events/base.py @@ -21,6 +21,12 @@ class GraphNodeEventBase(GraphEngineEvent): """iteration id if node is in iteration""" in_loop_id: str | None = None """loop id if node is in loop""" + in_mention_parent_id: str | None = None + """Parent node id if this is an extractor node event. + + When set, indicates this event belongs to an extractor node that + is extracting values for the specified parent node. + """ # The version of the node, or "1" if not specified. node_version: str = "1" diff --git a/api/core/workflow/nodes/agent/agent_node.py b/api/core/workflow/nodes/agent/agent_node.py index 234651ce96..c527c50280 100644 --- a/api/core/workflow/nodes/agent/agent_node.py +++ b/api/core/workflow/nodes/agent/agent_node.py @@ -12,11 +12,20 @@ from sqlalchemy.orm import Session from core.agent.entities import AgentToolEntity from core.agent.plugin_entities import AgentStrategyParameter from core.file import File, FileTransferMethod +from core.memory.base import BaseMemory +from core.memory.node_token_buffer_memory import NodeTokenBufferMemory from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelInstance, ModelManager from core.model_runtime.entities.llm_entities import LLMUsage, LLMUsageMetadata +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + PromptMessage, + ToolPromptMessage, + UserPromptMessage, +) from core.model_runtime.entities.model_entities import AIModelEntity, ModelType from core.model_runtime.utils.encoders import jsonable_encoder +from core.prompt.entities.advanced_prompt_entities import MemoryMode from core.provider_manager import ProviderManager from core.tools.entities.tool_entities import ( ToolIdentity, @@ -136,6 +145,9 @@ class AgentNode(Node[AgentNodeData]): ) return + # Fetch memory for node memory saving + memory = self._fetch_memory_for_save() + try: yield from self._transform_message( messages=message_stream, @@ -149,6 +161,7 @@ class AgentNode(Node[AgentNodeData]): node_type=self.node_type, node_id=self._node_id, node_execution_id=self.id, + memory=memory, ) except PluginDaemonClientSideError as e: transform_error = AgentMessageTransformError( @@ -395,8 +408,20 @@ class AgentNode(Node[AgentNodeData]): icon = None return icon - def _fetch_memory(self, model_instance: ModelInstance) -> TokenBufferMemory | None: - # get conversation id + def _fetch_memory(self, model_instance: ModelInstance) -> BaseMemory | None: + """ + Fetch memory based on configuration mode. + + Returns TokenBufferMemory for conversation mode (default), + or NodeTokenBufferMemory for node mode (Chatflow only). + """ + node_data = self.node_data + memory_config = node_data.memory + + if not memory_config: + return None + + # get conversation id (required for both modes in Chatflow) conversation_id_variable = self.graph_runtime_state.variable_pool.get( ["sys", SystemVariableKey.CONVERSATION_ID] ) @@ -404,16 +429,26 @@ class AgentNode(Node[AgentNodeData]): return None conversation_id = conversation_id_variable.value - with Session(db.engine, expire_on_commit=False) as session: - stmt = select(Conversation).where(Conversation.app_id == self.app_id, Conversation.id == conversation_id) - conversation = session.scalar(stmt) - - if not conversation: - return None - - memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance) - - return memory + # Return appropriate memory type based on mode + if memory_config.mode == MemoryMode.NODE: + # Node-level memory (Chatflow only) + return NodeTokenBufferMemory( + app_id=self.app_id, + conversation_id=conversation_id, + node_id=self._node_id, + tenant_id=self.tenant_id, + model_instance=model_instance, + ) + else: + # Conversation-level memory (default) + with Session(db.engine, expire_on_commit=False) as session: + stmt = select(Conversation).where( + Conversation.app_id == self.app_id, Conversation.id == conversation_id + ) + conversation = session.scalar(stmt) + if not conversation: + return None + return TokenBufferMemory(conversation=conversation, model_instance=model_instance) def _fetch_model(self, value: dict[str, Any]) -> tuple[ModelInstance, AIModelEntity | None]: provider_manager = ProviderManager() @@ -457,6 +492,136 @@ class AgentNode(Node[AgentNodeData]): else: return [tool for tool in tools if tool.get("type") != ToolProviderType.MCP] + def _fetch_memory_for_save(self) -> BaseMemory | None: + """ + Fetch memory instance for saving node memory. + This is a simplified version that doesn't require model_instance. + """ + from core.model_manager import ModelManager + from core.model_runtime.entities.model_entities import ModelType + + node_data = self.node_data + if not node_data.memory: + return None + + # Get conversation_id + conversation_id_var = self.graph_runtime_state.variable_pool.get(["sys", SystemVariableKey.CONVERSATION_ID]) + if not isinstance(conversation_id_var, StringSegment): + return None + conversation_id = conversation_id_var.value + + # Return appropriate memory type based on mode + if node_data.memory.mode == MemoryMode.NODE: + # For node memory, we need a model_instance for token counting + # Use a simple default model for this purpose + try: + model_instance = ModelManager().get_default_model_instance( + tenant_id=self.tenant_id, + model_type=ModelType.LLM, + ) + except Exception: + return None + + return NodeTokenBufferMemory( + app_id=self.app_id, + conversation_id=conversation_id, + node_id=self._node_id, + tenant_id=self.tenant_id, + model_instance=model_instance, + ) + else: + # Conversation-level memory doesn't need saving here + return None + + def _build_context( + self, + parameters_for_log: dict[str, Any], + user_query: str, + assistant_response: str, + agent_logs: list[AgentLogEvent], + ) -> list[PromptMessage]: + """ + Build context from user query, tool calls, and assistant response. + Format: user -> assistant(with tool_calls) -> tool -> assistant + + The context includes: + - Current user query (always present, may be empty) + - Assistant message with tool_calls (if tools were called) + - Tool results + - Assistant's final response + """ + context_messages: list[PromptMessage] = [] + + # Always add user query (even if empty, to maintain conversation structure) + context_messages.append(UserPromptMessage(content=user_query or "")) + + # Extract actual tool calls from agent logs + # Only include logs with label starting with "CALL " - these are real tool invocations + tool_calls: list[AssistantPromptMessage.ToolCall] = [] + tool_results: list[tuple[str, str, str]] = [] # (tool_call_id, tool_name, result) + + for log in agent_logs: + if log.status == "success" and log.label and log.label.startswith("CALL "): + # Extract tool name from label (format: "CALL tool_name") + tool_name = log.label[5:] # Remove "CALL " prefix + tool_call_id = log.message_id + + # Parse tool response from data + data = log.data or {} + tool_response = "" + + # Try to extract the actual tool response + if "tool_response" in data: + tool_response = data["tool_response"] + elif "output" in data: + tool_response = data["output"] + elif "result" in data: + tool_response = data["result"] + + if isinstance(tool_response, dict): + tool_response = str(tool_response) + + # Get tool input for arguments + tool_input = data.get("tool_call_input", {}) or data.get("input", {}) + if isinstance(tool_input, dict): + import json + + tool_input_str = json.dumps(tool_input, ensure_ascii=False) + else: + tool_input_str = str(tool_input) if tool_input else "" + + if tool_response: + tool_calls.append( + AssistantPromptMessage.ToolCall( + id=tool_call_id, + type="function", + function=AssistantPromptMessage.ToolCall.ToolCallFunction( + name=tool_name, + arguments=tool_input_str, + ), + ) + ) + tool_results.append((tool_call_id, tool_name, str(tool_response))) + + # Add assistant message with tool_calls if there were tool calls + if tool_calls: + context_messages.append(AssistantPromptMessage(content="", tool_calls=tool_calls)) + + # Add tool result messages + for tool_call_id, tool_name, result in tool_results: + context_messages.append( + ToolPromptMessage( + content=result, + tool_call_id=tool_call_id, + name=tool_name, + ) + ) + + # Add final assistant response + context_messages.append(AssistantPromptMessage(content=assistant_response)) + + return context_messages + def _transform_message( self, messages: Generator[ToolInvokeMessage, None, None], @@ -467,6 +632,7 @@ class AgentNode(Node[AgentNodeData]): node_type: NodeType, node_id: str, node_execution_id: str, + memory: BaseMemory | None = None, ) -> Generator[NodeEventBase, None, None]: """ Convert ToolInvokeMessages into tuple[plain_text, files] @@ -711,6 +877,12 @@ class AgentNode(Node[AgentNodeData]): is_final=True, ) + # Get user query from parameters for building context + user_query = parameters_for_log.get("query", "") + + # Build context from history, user query, tool calls and assistant response + context = self._build_context(parameters_for_log, user_query, text, agent_logs) + yield StreamCompletedEvent( node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, @@ -719,6 +891,7 @@ class AgentNode(Node[AgentNodeData]): "usage": jsonable_encoder(llm_usage), "files": ArrayFileSegment(value=files), "json": json_output, + "context": context, **variables, }, metadata={ diff --git a/api/core/workflow/nodes/base/__init__.py b/api/core/workflow/nodes/base/__init__.py index f83df0e323..87fd6c5b32 100644 --- a/api/core/workflow/nodes/base/__init__.py +++ b/api/core/workflow/nodes/base/__init__.py @@ -1,4 +1,10 @@ -from .entities import BaseIterationNodeData, BaseIterationState, BaseLoopNodeData, BaseLoopState, BaseNodeData +from .entities import ( + BaseIterationNodeData, + BaseIterationState, + BaseLoopNodeData, + BaseLoopState, + BaseNodeData, +) from .usage_tracking_mixin import LLMUsageTrackingMixin __all__ = [ diff --git a/api/core/workflow/nodes/base/entities.py b/api/core/workflow/nodes/base/entities.py index e5a20c8e91..fa8673db5f 100644 --- a/api/core/workflow/nodes/base/entities.py +++ b/api/core/workflow/nodes/base/entities.py @@ -175,6 +175,16 @@ class BaseNodeData(ABC, BaseModel): default_value: list[DefaultValue] | None = None retry_config: RetryConfig = RetryConfig() + # Parent node ID when this node is used as an extractor. + # If set, this node is an "attached" extractor node that extracts values + # from list[PromptMessage] for the parent node's parameters. + parent_node_id: str | None = None + + @property + def is_extractor_node(self) -> bool: + """Check if this node is an extractor node (has parent_node_id).""" + return self.parent_node_id is not None + @property def default_value_dict(self) -> dict[str, Any]: if self.default_value: diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index 06e4c0440d..d80f872486 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -273,10 +273,87 @@ class Node(Generic[NodeDataT]): """Check if execution should be stopped.""" return self.graph_runtime_state.stop_event.is_set() + def _find_extractor_node_configs(self) -> list[dict[str, Any]]: + """ + Find all extractor node configurations that have parent_node_id == self._node_id. + + Returns: + List of node configuration dicts for extractor nodes + """ + nodes = self.graph_config.get("nodes", []) + extractor_configs = [] + for node_config in nodes: + node_data = node_config.get("data", {}) + if node_data.get("parent_node_id") == self._node_id: + extractor_configs.append(node_config) + return extractor_configs + + def _execute_mention_nodes(self) -> Generator[GraphNodeEventBase, None, None]: + """ + Execute all extractor nodes associated with this node. + + Extractor nodes are nodes with parent_node_id == self._node_id. + They are executed before the main node to extract values from list[PromptMessage]. + """ + from core.workflow.nodes.node_mapping import LATEST_VERSION, NODE_TYPE_CLASSES_MAPPING + + extractor_configs = self._find_extractor_node_configs() + logger.debug("[Extractor] Found %d extractor nodes for parent '%s'", len(extractor_configs), self._node_id) + if not extractor_configs: + return + + for config in extractor_configs: + node_id = config.get("id") + node_data = config.get("data", {}) + node_type_str = node_data.get("type") + + if not node_id or not node_type_str: + continue + + # Get node class + try: + node_type = NodeType(node_type_str) + except ValueError: + continue + + node_mapping = NODE_TYPE_CLASSES_MAPPING.get(node_type) + if not node_mapping: + continue + + node_version = str(node_data.get("version", "1")) + node_cls = node_mapping.get(node_version) or node_mapping.get(LATEST_VERSION) + if not node_cls: + continue + + # Instantiate and execute the extractor node + extractor_node = node_cls( + id=node_id, + config=config, + graph_init_params=self._graph_init_params, + graph_runtime_state=self.graph_runtime_state, + ) + + # Execute and process extractor node events + for event in extractor_node.run(): + # Tag event with parent node id for stream ordering and history tracking + if isinstance(event, GraphNodeEventBase): + event.in_mention_parent_id = self._node_id + + if isinstance(event, NodeRunSucceededEvent): + # Store extractor node outputs in variable pool + outputs: Mapping[str, Any] = event.node_run_result.outputs + for variable_name, variable_value in outputs.items(): + self.graph_runtime_state.variable_pool.add((node_id, variable_name), variable_value) + if not isinstance(event, NodeRunStreamChunkEvent): + yield event + def run(self) -> Generator[GraphNodeEventBase, None, None]: execution_id = self.ensure_execution_id() self._start_at = naive_utc_now() + # Step 1: Execute associated extractor nodes before main node execution + yield from self._execute_mention_nodes() + # Create and push start event with required fields start_event = NodeRunStartedEvent( id=execution_id, diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index a04da8b6e0..b21311086b 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -1,6 +1,6 @@ import re from collections.abc import Mapping, Sequence -from typing import Any, Literal +from typing import Annotated, Any, Literal, TypeAlias from pydantic import BaseModel, ConfigDict, Field, field_validator @@ -327,9 +327,28 @@ class ToolLogPayload(BaseModel): ) +class PromptMessageContext(BaseModel): + """Context variable reference in prompt template. + + YAML/JSON format: { "$context": ["node_id", "variable_name"] } + This will be expanded to list[PromptMessage] at runtime. + """ + + model_config = ConfigDict(populate_by_name=True) + + value_selector: Sequence[str] = Field(alias="$context") + + +# Union type for prompt template items (static message or context variable reference) +PromptTemplateItem: TypeAlias = Annotated[ + LLMNodeChatModelMessage | PromptMessageContext, + Field(discriminator=None), +] + + class LLMNodeData(BaseNodeData): model: ModelConfig - prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate + prompt_template: Sequence[PromptTemplateItem] | LLMNodeCompletionModelPromptTemplate prompt_config: PromptConfig = Field(default_factory=PromptConfig) memory: MemoryConfig | None = None context: ContextConfig diff --git a/api/core/workflow/nodes/llm/llm_utils.py b/api/core/workflow/nodes/llm/llm_utils.py index 01e25cbf5c..966c34a0d7 100644 --- a/api/core/workflow/nodes/llm/llm_utils.py +++ b/api/core/workflow/nodes/llm/llm_utils.py @@ -8,12 +8,20 @@ from configs import dify_config from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.entities.provider_entities import ProviderQuotaType, QuotaUnit from core.file.models import File -from core.memory.token_buffer_memory import TokenBufferMemory +from core.memory import NodeTokenBufferMemory, TokenBufferMemory +from core.memory.base import BaseMemory from core.model_manager import ModelInstance, ModelManager from core.model_runtime.entities.llm_entities import LLMUsage +from core.model_runtime.entities.message_entities import ( + AssistantPromptMessage, + MultiModalPromptMessageContent, + PromptMessage, + PromptMessageContentUnionTypes, + PromptMessageRole, +) from core.model_runtime.entities.model_entities import ModelType from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel -from core.prompt.entities.advanced_prompt_entities import MemoryConfig +from core.prompt.entities.advanced_prompt_entities import MemoryConfig, MemoryMode from core.variables.segments import ArrayAnySegment, ArrayFileSegment, FileSegment, NoneSegment, StringSegment from core.workflow.enums import SystemVariableKey from core.workflow.nodes.llm.entities import ModelConfig @@ -86,25 +94,56 @@ def fetch_files(variable_pool: VariablePool, selector: Sequence[str]) -> Sequenc def fetch_memory( - variable_pool: VariablePool, app_id: str, node_data_memory: MemoryConfig | None, model_instance: ModelInstance -) -> TokenBufferMemory | None: + variable_pool: VariablePool, + app_id: str, + tenant_id: str, + node_data_memory: MemoryConfig | None, + model_instance: ModelInstance, + node_id: str = "", +) -> BaseMemory | None: + """ + Fetch memory based on configuration mode. + + Returns TokenBufferMemory for conversation mode (default), + or NodeTokenBufferMemory for node mode (Chatflow only). + + :param variable_pool: Variable pool containing system variables + :param app_id: Application ID + :param tenant_id: Tenant ID + :param node_data_memory: Memory configuration + :param model_instance: Model instance for token counting + :param node_id: Node ID in the workflow (required for node mode) + :return: Memory instance or None if not applicable + """ if not node_data_memory: return None - # get conversation id + # Get conversation_id from variable pool (required for both modes in Chatflow) conversation_id_variable = variable_pool.get(["sys", SystemVariableKey.CONVERSATION_ID]) if not isinstance(conversation_id_variable, StringSegment): return None conversation_id = conversation_id_variable.value - with Session(db.engine, expire_on_commit=False) as session: - stmt = select(Conversation).where(Conversation.app_id == app_id, Conversation.id == conversation_id) - conversation = session.scalar(stmt) - if not conversation: + # Return appropriate memory type based on mode + if node_data_memory.mode == MemoryMode.NODE: + # Node-level memory (Chatflow only) + if not node_id: return None - - memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance) - return memory + return NodeTokenBufferMemory( + app_id=app_id, + conversation_id=conversation_id, + node_id=node_id, + tenant_id=tenant_id, + model_instance=model_instance, + ) + else: + # Conversation-level memory (default) + with Session(db.engine, expire_on_commit=False) as session: + stmt = select(Conversation).where(Conversation.app_id == app_id, Conversation.id == conversation_id) + conversation = session.scalar(stmt) + if not conversation: + return None + return TokenBufferMemory(conversation=conversation, model_instance=model_instance) def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUsage): @@ -170,3 +209,87 @@ def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUs ) session.execute(stmt) session.commit() + + +def build_context( + prompt_messages: Sequence[PromptMessage], + assistant_response: str, +) -> list[PromptMessage]: + """ + Build context from prompt messages and assistant response. + Excludes system messages and includes the current LLM response. + Returns list[PromptMessage] for use with ArrayPromptMessageSegment. + + Note: Multi-modal content base64 data is truncated to avoid storing large data in context. + """ + context_messages: list[PromptMessage] = [ + _truncate_multimodal_content(m) for m in prompt_messages if m.role != PromptMessageRole.SYSTEM + ] + context_messages.append(AssistantPromptMessage(content=assistant_response)) + return context_messages + + +def _truncate_multimodal_content(message: PromptMessage) -> PromptMessage: + """ + Truncate multi-modal content base64 data in a message to avoid storing large data. + Preserves the PromptMessage structure for ArrayPromptMessageSegment compatibility. + + If file_ref is present, clears base64_data and url (they can be restored later). + Otherwise, truncates base64_data as fallback for legacy data. + """ + content = message.content + if content is None or isinstance(content, str): + return message + + # Process list content, handling multi-modal data based on file_ref availability + new_content: list[PromptMessageContentUnionTypes] = [] + for item in content: + if isinstance(item, MultiModalPromptMessageContent): + if item.file_ref: + # Clear base64 and url, keep file_ref for later restoration + new_content.append(item.model_copy(update={"base64_data": "", "url": ""})) + else: + # Fallback: truncate base64_data if no file_ref (legacy data) + truncated_base64 = "" + if item.base64_data: + truncated_base64 = item.base64_data[:10] + "...[TRUNCATED]..." + item.base64_data[-10:] + new_content.append(item.model_copy(update={"base64_data": truncated_base64})) + else: + new_content.append(item) + + return message.model_copy(update={"content": new_content}) + + +def restore_multimodal_content_in_messages(messages: Sequence[PromptMessage]) -> list[PromptMessage]: + """ + Restore multimodal content (base64 or url) in a list of PromptMessages. + + When context is saved, base64_data is cleared to save storage space. + This function restores the content by parsing file_ref in each MultiModalPromptMessageContent. + + Args: + messages: List of PromptMessages that may contain truncated multimodal content + + Returns: + List of PromptMessages with restored multimodal content + """ + from core.file import file_manager + + return [_restore_message_content(msg, file_manager) for msg in messages] + + +def _restore_message_content(message: PromptMessage, file_manager) -> PromptMessage: + """Restore multimodal content in a single PromptMessage.""" + content = message.content + if content is None or isinstance(content, str): + return message + + restored_content: list[PromptMessageContentUnionTypes] = [] + for item in content: + if isinstance(item, MultiModalPromptMessageContent): + restored_item = file_manager.restore_multimodal_content(item) + restored_content.append(cast(PromptMessageContentUnionTypes, restored_item)) + else: + restored_content.append(item) + + return message.model_copy(update={"content": restored_content}) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 36a0378fbf..1853739650 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -7,7 +7,7 @@ import logging import re import time from collections.abc import Generator, Mapping, Sequence -from typing import TYPE_CHECKING, Any, Literal +from typing import TYPE_CHECKING, Any, Literal, cast from sqlalchemy import select @@ -18,7 +18,7 @@ from core.file import File, FileTransferMethod, FileType, file_manager from core.helper.code_executor import CodeExecutor, CodeLanguage from core.llm_generator.output_parser.errors import OutputParserError from core.llm_generator.output_parser.structured_output import invoke_llm_with_structured_output -from core.memory.token_buffer_memory import TokenBufferMemory +from core.memory.base import BaseMemory from core.model_manager import ModelInstance, ModelManager from core.model_runtime.entities import ( ImagePromptMessageContent, @@ -56,6 +56,7 @@ from core.tools.signature import sign_upload_file from core.tools.tool_manager import ToolManager from core.variables import ( ArrayFileSegment, + ArrayPromptMessageSegment, ArraySegment, FileSegment, NoneSegment, @@ -103,6 +104,7 @@ from .entities import ( LLMTraceSegment, ModelConfig, ModelTraceSegment, + PromptMessageContext, StreamBuffers, ThinkTagStreamParser, ToolLogPayload, @@ -181,8 +183,9 @@ class LLMNode(Node[LLMNodeData]): variable_pool = self.graph_runtime_state.variable_pool try: - # init messages template - self.node_data.prompt_template = self._transform_chat_messages(self.node_data.prompt_template) + # Parse prompt template to separate static messages and context references + prompt_template = self.node_data.prompt_template + static_messages, context_refs, template_order = self._parse_prompt_template() # fetch variables and fetch values from variable pool inputs = self._fetch_inputs(node_data=self.node_data) @@ -230,8 +233,10 @@ class LLMNode(Node[LLMNodeData]): memory = llm_utils.fetch_memory( variable_pool=variable_pool, app_id=self.app_id, + tenant_id=self.tenant_id, node_data_memory=self.node_data.memory, model_instance=model_instance, + node_id=self._node_id, ) query: str | None = None @@ -242,21 +247,40 @@ class LLMNode(Node[LLMNodeData]): ): query = query_variable.text - prompt_messages, stop = LLMNode.fetch_prompt_messages( - sys_query=query, - sys_files=files, - context=context, - memory=memory, - model_config=model_config, - prompt_template=self.node_data.prompt_template, - memory_config=self.node_data.memory, - vision_enabled=self.node_data.vision.enabled, - vision_detail=self.node_data.vision.configs.detail, - variable_pool=variable_pool, - jinja2_variables=self.node_data.prompt_config.jinja2_variables, - tenant_id=self.tenant_id, - context_files=context_files, - ) + # Get prompt messages + prompt_messages: Sequence[PromptMessage] + stop: Sequence[str] | None + if isinstance(prompt_template, list) and context_refs: + prompt_messages, stop = self._build_prompt_messages_with_context( + context_refs=context_refs, + template_order=template_order, + static_messages=static_messages, + query=query, + files=files, + context=context, + memory=memory, + model_config=model_config, + context_files=context_files, + ) + else: + prompt_messages, stop = LLMNode.fetch_prompt_messages( + sys_query=query, + sys_files=files, + context=context, + memory=memory, + model_config=model_config, + prompt_template=cast( + Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, + self.node_data.prompt_template, + ), + memory_config=self.node_data.memory, + vision_enabled=self.node_data.vision.enabled, + vision_detail=self.node_data.vision.configs.detail, + variable_pool=variable_pool, + jinja2_variables=self.node_data.prompt_config.jinja2_variables, + tenant_id=self.tenant_id, + context_files=context_files, + ) # Variables for outputs generation_data: LLMGenerationData | None = None @@ -300,6 +324,7 @@ class LLMNode(Node[LLMNodeData]): node_id=self._node_id, node_type=self.node_type, reasoning_format=self._node_data.reasoning_format, + tenant_id=self.tenant_id, ) ( @@ -348,6 +373,7 @@ class LLMNode(Node[LLMNodeData]): "reasoning_content": reasoning_content, "usage": jsonable_encoder(usage), "finish_reason": finish_reason, + "context": llm_utils.build_context(prompt_messages, clean_text), } # Build generation field @@ -463,6 +489,7 @@ class LLMNode(Node[LLMNodeData]): node_id: str, node_type: NodeType, reasoning_format: Literal["separated", "tagged"] = "tagged", + tenant_id: str | None = None, ) -> Generator[NodeEventBase | LLMStructuredOutput, None, None]: model_schema = model_instance.model_type_instance.get_model_schema( node_data_model.name, model_instance.credentials @@ -486,6 +513,7 @@ class LLMNode(Node[LLMNodeData]): stop=list(stop or []), stream=True, user=user_id, + tenant_id=tenant_id, ) else: request_start_time = time.perf_counter() @@ -743,6 +771,212 @@ class LLMNode(Node[LLMNodeData]): return messages + def _parse_prompt_template( + self, + ) -> tuple[list[LLMNodeChatModelMessage], list[PromptMessageContext], list[tuple[int, str]]]: + """ + Parse prompt_template to separate static messages and context references. + + Returns: + Tuple of (static_messages, context_refs, template_order) + - static_messages: list of LLMNodeChatModelMessage + - context_refs: list of PromptMessageContext + - template_order: list of (index, type) tuples preserving original order + """ + prompt_template = self.node_data.prompt_template + static_messages: list[LLMNodeChatModelMessage] = [] + context_refs: list[PromptMessageContext] = [] + template_order: list[tuple[int, str]] = [] + + if isinstance(prompt_template, list): + for idx, item in enumerate(prompt_template): + if isinstance(item, PromptMessageContext): + context_refs.append(item) + template_order.append((idx, "context")) + else: + static_messages.append(item) + template_order.append((idx, "static")) + # Transform static messages for jinja2 + if static_messages: + self.node_data.prompt_template = self._transform_chat_messages(static_messages) + + return static_messages, context_refs, template_order + + def _build_prompt_messages_with_context( + self, + *, + context_refs: list[PromptMessageContext], + template_order: list[tuple[int, str]], + static_messages: list[LLMNodeChatModelMessage], + query: str | None, + files: Sequence[File], + context: str | None, + memory: BaseMemory | None, + model_config: ModelConfigWithCredentialsEntity, + context_files: list[File], + ) -> tuple[list[PromptMessage], Sequence[str] | None]: + """ + Build prompt messages by combining static messages and context references in DSL order. + + Returns: + Tuple of (prompt_messages, stop_sequences) + """ + variable_pool = self.graph_runtime_state.variable_pool + + # Process messages in DSL order: iterate once and handle each type directly + combined_messages: list[PromptMessage] = [] + context_idx = 0 + static_idx = 0 + + for _, type_ in template_order: + if type_ == "context": + # Handle context reference + ctx_ref = context_refs[context_idx] + ctx_var = variable_pool.get(ctx_ref.value_selector) + if ctx_var is None: + raise VariableNotFoundError(f"Variable {'.'.join(ctx_ref.value_selector)} not found") + if not isinstance(ctx_var, ArrayPromptMessageSegment): + raise InvalidVariableTypeError(f"Variable {'.'.join(ctx_ref.value_selector)} is not array[message]") + # Restore multimodal content (base64/url) that was truncated when saving context + restored_messages = llm_utils.restore_multimodal_content_in_messages(ctx_var.value) + combined_messages.extend(restored_messages) + context_idx += 1 + else: + # Handle static message + static_msg = static_messages[static_idx] + processed_msgs = LLMNode.handle_list_messages( + messages=[static_msg], + context=context, + jinja2_variables=self.node_data.prompt_config.jinja2_variables or [], + variable_pool=variable_pool, + vision_detail_config=self.node_data.vision.configs.detail, + ) + combined_messages.extend(processed_msgs) + static_idx += 1 + + # Append memory messages + memory_messages = _handle_memory_chat_mode( + memory=memory, + memory_config=self.node_data.memory, + model_config=model_config, + ) + combined_messages.extend(memory_messages) + + # Append current query if provided + if query: + query_message = LLMNodeChatModelMessage( + text=query, + role=PromptMessageRole.USER, + edition_type="basic", + ) + query_msgs = LLMNode.handle_list_messages( + messages=[query_message], + context="", + jinja2_variables=[], + variable_pool=variable_pool, + vision_detail_config=self.node_data.vision.configs.detail, + ) + combined_messages.extend(query_msgs) + + # Handle files (sys_files and context_files) + combined_messages = self._append_files_to_messages( + messages=combined_messages, + sys_files=files, + context_files=context_files, + model_config=model_config, + ) + + # Filter empty messages and get stop sequences + combined_messages = self._filter_messages(combined_messages, model_config) + stop = self._get_stop_sequences(model_config) + + return combined_messages, stop + + def _append_files_to_messages( + self, + *, + messages: list[PromptMessage], + sys_files: Sequence[File], + context_files: list[File], + model_config: ModelConfigWithCredentialsEntity, + ) -> list[PromptMessage]: + """Append sys_files and context_files to messages.""" + vision_enabled = self.node_data.vision.enabled + vision_detail = self.node_data.vision.configs.detail + + # Handle sys_files (will be deprecated later) + if vision_enabled and sys_files: + file_prompts = [ + file_manager.to_prompt_message_content(file, image_detail_config=vision_detail) for file in sys_files + ] + if messages and isinstance(messages[-1], UserPromptMessage) and isinstance(messages[-1].content, list): + messages[-1] = UserPromptMessage(content=file_prompts + messages[-1].content) + else: + messages.append(UserPromptMessage(content=file_prompts)) + + # Handle context_files + if vision_enabled and context_files: + file_prompts = [ + file_manager.to_prompt_message_content(file, image_detail_config=vision_detail) + for file in context_files + ] + if messages and isinstance(messages[-1], UserPromptMessage) and isinstance(messages[-1].content, list): + messages[-1] = UserPromptMessage(content=file_prompts + messages[-1].content) + else: + messages.append(UserPromptMessage(content=file_prompts)) + + return messages + + def _filter_messages( + self, messages: list[PromptMessage], model_config: ModelConfigWithCredentialsEntity + ) -> list[PromptMessage]: + """Filter empty messages and unsupported content types.""" + filtered_messages: list[PromptMessage] = [] + + for message in messages: + if isinstance(message.content, list): + filtered_content: list[PromptMessageContentUnionTypes] = [] + for content_item in message.content: + # Skip non-text content if features are not defined + if not model_config.model_schema.features: + if content_item.type != PromptMessageContentType.TEXT: + continue + filtered_content.append(content_item) + continue + + # Skip content if corresponding feature is not supported + feature_map = { + PromptMessageContentType.IMAGE: ModelFeature.VISION, + PromptMessageContentType.DOCUMENT: ModelFeature.DOCUMENT, + PromptMessageContentType.VIDEO: ModelFeature.VIDEO, + PromptMessageContentType.AUDIO: ModelFeature.AUDIO, + } + required_feature = feature_map.get(content_item.type) + if required_feature and required_feature not in model_config.model_schema.features: + continue + filtered_content.append(content_item) + + # Simplify single text content + if len(filtered_content) == 1 and filtered_content[0].type == PromptMessageContentType.TEXT: + message.content = filtered_content[0].data + else: + message.content = filtered_content + + if not message.is_empty(): + filtered_messages.append(message) + + if not filtered_messages: + raise NoPromptFoundError( + "No prompt found in the LLM configuration. " + "Please ensure a prompt is properly configured before proceeding." + ) + + return filtered_messages + + def _get_stop_sequences(self, model_config: ModelConfigWithCredentialsEntity) -> Sequence[str] | None: + """Get stop sequences from model config.""" + return model_config.stop + def _fetch_jinja_inputs(self, node_data: LLMNodeData) -> dict[str, str]: variables: dict[str, Any] = {} @@ -940,7 +1174,7 @@ class LLMNode(Node[LLMNodeData]): sys_query: str | None = None, sys_files: Sequence[File], context: str | None = None, - memory: TokenBufferMemory | None = None, + memory: BaseMemory | None = None, model_config: ModelConfigWithCredentialsEntity, prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate, memory_config: MemoryConfig | None = None, @@ -2287,7 +2521,7 @@ def _calculate_rest_token( def _handle_memory_chat_mode( *, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, memory_config: MemoryConfig | None, model_config: ModelConfigWithCredentialsEntity, ) -> Sequence[PromptMessage]: @@ -2304,7 +2538,7 @@ def _handle_memory_chat_mode( def _handle_memory_completion_mode( *, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, memory_config: MemoryConfig | None, model_config: ModelConfigWithCredentialsEntity, ) -> str: diff --git a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py index 08e0542d61..f78aa0cc3e 100644 --- a/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py +++ b/api/core/workflow/nodes/parameter_extractor/parameter_extractor_node.py @@ -7,7 +7,7 @@ from typing import Any, cast from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.file import File -from core.memory.token_buffer_memory import TokenBufferMemory +from core.memory.base import BaseMemory from core.model_manager import ModelInstance from core.model_runtime.entities import ImagePromptMessageContent from core.model_runtime.entities.llm_entities import LLMUsage @@ -145,8 +145,10 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): memory = llm_utils.fetch_memory( variable_pool=variable_pool, app_id=self.app_id, + tenant_id=self.tenant_id, node_data_memory=node_data.memory, model_instance=model_instance, + node_id=self._node_id, ) if ( @@ -244,6 +246,10 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): # transform result into standard format result = self._transform_result(data=node_data, result=result or {}) + # Build context from prompt messages and response + assistant_response = json.dumps(result, ensure_ascii=False) + context = llm_utils.build_context(prompt_messages, assistant_response) + return NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=inputs, @@ -252,6 +258,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): "__is_success": 1 if not error else 0, "__reason": error, "__usage": jsonable_encoder(usage), + "context": context, **result, }, metadata={ @@ -299,7 +306,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): query: str, variable_pool: VariablePool, model_config: ModelConfigWithCredentialsEntity, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, files: Sequence[File], vision_detail: ImagePromptMessageContent.DETAIL | None = None, ) -> tuple[list[PromptMessage], list[PromptMessageTool]]: @@ -381,7 +388,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): query: str, variable_pool: VariablePool, model_config: ModelConfigWithCredentialsEntity, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, files: Sequence[File], vision_detail: ImagePromptMessageContent.DETAIL | None = None, ) -> list[PromptMessage]: @@ -419,7 +426,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): query: str, variable_pool: VariablePool, model_config: ModelConfigWithCredentialsEntity, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, files: Sequence[File], vision_detail: ImagePromptMessageContent.DETAIL | None = None, ) -> list[PromptMessage]: @@ -453,7 +460,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): query: str, variable_pool: VariablePool, model_config: ModelConfigWithCredentialsEntity, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, files: Sequence[File], vision_detail: ImagePromptMessageContent.DETAIL | None = None, ) -> list[PromptMessage]: @@ -681,7 +688,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): node_data: ParameterExtractorNodeData, query: str, variable_pool: VariablePool, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, max_token_limit: int = 2000, ) -> list[ChatModelMessage]: model_mode = ModelMode(node_data.model.mode) @@ -708,7 +715,7 @@ class ParameterExtractorNode(Node[ParameterExtractorNodeData]): node_data: ParameterExtractorNodeData, query: str, variable_pool: VariablePool, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, max_token_limit: int = 2000, ): model_mode = ModelMode(node_data.model.mode) diff --git a/api/core/workflow/nodes/question_classifier/question_classifier_node.py b/api/core/workflow/nodes/question_classifier/question_classifier_node.py index 4a3e8e56f8..564e548e9f 100644 --- a/api/core/workflow/nodes/question_classifier/question_classifier_node.py +++ b/api/core/workflow/nodes/question_classifier/question_classifier_node.py @@ -4,7 +4,7 @@ from collections.abc import Mapping, Sequence from typing import TYPE_CHECKING, Any from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity -from core.memory.token_buffer_memory import TokenBufferMemory +from core.memory.base import BaseMemory from core.model_manager import ModelInstance from core.model_runtime.entities import LLMUsage, ModelPropertyKey, PromptMessageRole from core.model_runtime.utils.encoders import jsonable_encoder @@ -96,8 +96,10 @@ class QuestionClassifierNode(Node[QuestionClassifierNodeData]): memory = llm_utils.fetch_memory( variable_pool=variable_pool, app_id=self.app_id, + tenant_id=self.tenant_id, node_data_memory=node_data.memory, model_instance=model_instance, + node_id=self._node_id, ) # fetch instruction node_data.instruction = node_data.instruction or "" @@ -197,10 +199,15 @@ class QuestionClassifierNode(Node[QuestionClassifierNodeData]): "model_provider": model_config.provider, "model_name": model_config.model, } + # Build context from prompt messages and response + assistant_response = f"class_name: {category_name}, class_id: {category_id}" + context = llm_utils.build_context(prompt_messages, assistant_response) + outputs = { "class_name": category_name, "class_id": category_id, "usage": jsonable_encoder(usage), + "context": context, } return NodeRunResult( @@ -312,7 +319,7 @@ class QuestionClassifierNode(Node[QuestionClassifierNodeData]): self, node_data: QuestionClassifierNodeData, query: str, - memory: TokenBufferMemory | None, + memory: BaseMemory | None, max_token_limit: int = 2000, ): model_mode = ModelMode(node_data.model.mode) diff --git a/api/core/workflow/nodes/tool/entities.py b/api/core/workflow/nodes/tool/entities.py index c1cfbb1edc..30bca3b7f2 100644 --- a/api/core/workflow/nodes/tool/entities.py +++ b/api/core/workflow/nodes/tool/entities.py @@ -1,11 +1,63 @@ -from typing import Any, Literal, Union +import re +from collections.abc import Sequence +from typing import Any, Literal, Self, Union -from pydantic import BaseModel, field_validator +from pydantic import BaseModel, field_validator, model_validator from pydantic_core.core_schema import ValidationInfo from core.tools.entities.tool_entities import ToolProviderType from core.workflow.nodes.base.entities import BaseNodeData +# Pattern to match mention value format: {{@node.context@}}instruction +# The placeholder {{@node.context@}} must appear at the beginning +# Format: {{@agent_node_id.context@}} where agent_node_id is dynamic, context is fixed +MENTION_VALUE_PATTERN = re.compile(r"^\{\{@([a-zA-Z0-9_]+)\.context@\}\}(.*)$", re.DOTALL) + + +def parse_mention_value(value: str) -> tuple[str, str]: + """Parse mention value into (node_id, instruction). + + Args: + value: The mention value string like "{{@llm.context@}}extract keywords" + + Returns: + Tuple of (node_id, instruction) + + Raises: + ValueError: If value format is invalid + """ + match = MENTION_VALUE_PATTERN.match(value) + if not match: + raise ValueError( + "For mention type, value must start with {{@node.context@}} placeholder, " + "e.g., '{{@llm.context@}}extract keywords'" + ) + return match.group(1), match.group(2) + + +class MentionConfig(BaseModel): + """Configuration for extracting value from context variable. + + Used when a tool parameter needs to be extracted from list[PromptMessage] + context using an extractor LLM node. + + Note: instruction is embedded in the value field as "{{@node.context@}}instruction" + """ + + # ID of the extractor LLM node + extractor_node_id: str + + # Output variable selector from extractor node + # e.g., ["text"], ["structured_output", "query"] + output_selector: Sequence[str] + + # Strategy when output is None + null_strategy: Literal["raise_error", "use_default"] = "raise_error" + + # Default value when null_strategy is "use_default" + # Type should match the parameter's expected type + default_value: Any = None + class ToolEntity(BaseModel): provider_id: str @@ -35,7 +87,9 @@ class ToolNodeData(BaseNodeData, ToolEntity): class ToolInput(BaseModel): # TODO: check this type value: Union[Any, list[str]] - type: Literal["mixed", "variable", "constant"] + type: Literal["mixed", "variable", "constant", "mention"] + # Required config for mention type, extracting value from context variable + mention_config: MentionConfig | None = None @field_validator("type", mode="before") @classmethod @@ -48,6 +102,9 @@ class ToolNodeData(BaseNodeData, ToolEntity): if typ == "mixed" and not isinstance(value, str): raise ValueError("value must be a string") + elif typ == "mention": + # Skip here, will be validated in model_validator + pass elif typ == "variable": if not isinstance(value, list): raise ValueError("value must be a list") @@ -58,6 +115,26 @@ class ToolNodeData(BaseNodeData, ToolEntity): raise ValueError("value must be a string, int, float, bool or dict") return typ + @model_validator(mode="after") + def check_mention_type(self) -> Self: + """Validate mention type with mention_config.""" + if self.type != "mention": + return self + + value = self.value + if value is None: + return self + + if not isinstance(value, str): + raise ValueError("value must be a string for mention type") + # For mention type, value must match format: {{@node.context@}}instruction + # This will raise ValueError if format is invalid + parse_mention_value(value) + # mention_config is required for mention type + if self.mention_config is None: + raise ValueError("mention_config is required for mention type") + return self + tool_parameters: dict[str, ToolInput] # The version of the tool parameter. # If this value is None, it indicates this is a previous version diff --git a/api/core/workflow/nodes/tool/tool_node.py b/api/core/workflow/nodes/tool/tool_node.py index 2e7ec757b4..549851302a 100644 --- a/api/core/workflow/nodes/tool/tool_node.py +++ b/api/core/workflow/nodes/tool/tool_node.py @@ -1,7 +1,10 @@ +import logging from collections.abc import Generator, Mapping, Sequence from typing import TYPE_CHECKING, Any from sqlalchemy import select + +logger = logging.getLogger(__name__) from sqlalchemy.orm import Session from core.callback_handler.workflow_tool_callback_handler import DifyWorkflowCallbackHandler @@ -184,6 +187,7 @@ class ToolNode(Node[ToolNodeData]): tool_parameters (Sequence[ToolParameter]): The list of tool parameters. variable_pool (VariablePool): The variable pool containing the variables. node_data (ToolNodeData): The data associated with the tool node. + for_log (bool): Whether to generate parameters for logging. Returns: Mapping[str, Any]: A dictionary containing the generated parameters. @@ -199,14 +203,37 @@ class ToolNode(Node[ToolNodeData]): continue tool_input = node_data.tool_parameters[parameter_name] if tool_input.type == "variable": - variable = variable_pool.get(tool_input.value) + if not isinstance(tool_input.value, list): + raise ToolParameterError(f"Invalid variable selector for parameter '{parameter_name}'") + selector = tool_input.value + variable = variable_pool.get(selector) if variable is None: if parameter.required: - raise ToolParameterError(f"Variable {tool_input.value} does not exist") + raise ToolParameterError(f"Variable {selector} does not exist") continue parameter_value = variable.value + elif tool_input.type == "mention": + # Mention type: get value from extractor node's output + if tool_input.mention_config is None: + raise ToolParameterError( + f"mention_config is required for mention type parameter '{parameter_name}'" + ) + mention_config = tool_input.mention_config.model_dump() + try: + parameter_value, found = variable_pool.resolve_mention( + mention_config, parameter_name=parameter_name + ) + if not found and parameter.required: + raise ToolParameterError( + f"Extractor output not found for required parameter '{parameter_name}'" + ) + if not found: + continue + except ValueError as e: + raise ToolParameterError(str(e)) from e elif tool_input.type in {"mixed", "constant"}: - segment_group = variable_pool.convert_template(str(tool_input.value)) + template = str(tool_input.value) + segment_group = variable_pool.convert_template(template) parameter_value = segment_group.log if for_log else segment_group.text else: raise ToolParameterError(f"Unknown tool input type '{tool_input.type}'") @@ -488,8 +515,12 @@ class ToolNode(Node[ToolNodeData]): for selector in selectors: result[selector.variable] = selector.value_selector elif input.type == "variable": - selector_key = ".".join(input.value) - result[f"#{selector_key}#"] = input.value + if isinstance(input.value, list): + selector_key = ".".join(input.value) + result[f"#{selector_key}#"] = input.value + elif input.type == "mention": + # Mention type: value is handled by extractor node, no direct variable reference + pass elif input.type == "constant": pass diff --git a/api/core/workflow/runtime/variable_pool.py b/api/core/workflow/runtime/variable_pool.py index d205c6ac8f..ff8c215a76 100644 --- a/api/core/workflow/runtime/variable_pool.py +++ b/api/core/workflow/runtime/variable_pool.py @@ -268,6 +268,58 @@ class VariablePool(BaseModel): continue self.add(selector, value) + def resolve_mention( + self, + mention_config: Mapping[str, Any], + /, + *, + parameter_name: str = "", + ) -> tuple[Any, bool]: + """ + Resolve a mention parameter value from an extractor node's output. + + Mention parameters reference values extracted by an extractor LLM node + from list[PromptMessage] context. + + Args: + mention_config: A dict containing: + - extractor_node_id: ID of the extractor LLM node + - output_selector: Selector path for the output variable (e.g., ["text"]) + - null_strategy: "raise_error" or "use_default" + - default_value: Value to use when null_strategy is "use_default" + parameter_name: Name of the parameter being resolved (for error messages) + + Returns: + Tuple of (resolved_value, found): + - resolved_value: The extracted value, or default_value if not found + - found: True if value was found, False if using default + + Raises: + ValueError: If extractor_node_id is missing, or if null_strategy is + "raise_error" and the value is not found + """ + extractor_node_id = mention_config.get("extractor_node_id") + if not extractor_node_id: + raise ValueError(f"Missing extractor_node_id for mention parameter '{parameter_name}'") + + output_selector = list(mention_config.get("output_selector", [])) + null_strategy = mention_config.get("null_strategy", "raise_error") + default_value = mention_config.get("default_value") + + # Build full selector: [extractor_node_id, ...output_selector] + full_selector = [extractor_node_id] + output_selector + variable = self.get(full_selector) + + if variable is None: + if null_strategy == "use_default": + return default_value, False + raise ValueError( + f"Extractor node '{extractor_node_id}' output '{'.'.join(output_selector)}' " + f"not found for parameter '{parameter_name}'" + ) + + return variable.value, True + @classmethod def empty(cls) -> VariablePool: """Create an empty variable pool.""" diff --git a/api/factories/variable_factory.py b/api/factories/variable_factory.py index 3f030ae127..17cbb9cfdd 100644 --- a/api/factories/variable_factory.py +++ b/api/factories/variable_factory.py @@ -4,6 +4,7 @@ from uuid import uuid4 from configs import dify_config from core.file import File +from core.model_runtime.entities import PromptMessage from core.variables.exc import VariableError from core.variables.segments import ( ArrayAnySegment, @@ -11,6 +12,7 @@ from core.variables.segments import ( ArrayFileSegment, ArrayNumberSegment, ArrayObjectSegment, + ArrayPromptMessageSegment, ArraySegment, ArrayStringSegment, BooleanSegment, @@ -29,6 +31,7 @@ from core.variables.variables import ( ArrayFileVariable, ArrayNumberVariable, ArrayObjectVariable, + ArrayPromptMessageVariable, ArrayStringVariable, BooleanVariable, FileVariable, @@ -61,6 +64,7 @@ SEGMENT_TO_VARIABLE_MAP = { ArrayFileSegment: ArrayFileVariable, ArrayNumberSegment: ArrayNumberVariable, ArrayObjectSegment: ArrayObjectVariable, + ArrayPromptMessageSegment: ArrayPromptMessageVariable, ArrayStringSegment: ArrayStringVariable, BooleanSegment: BooleanVariable, FileSegment: FileVariable, @@ -156,7 +160,13 @@ def build_segment(value: Any, /) -> Segment: return ObjectSegment(value=value) if isinstance(value, File): return FileSegment(value=value) + if isinstance(value, PromptMessage): + # Single PromptMessage should be wrapped in a list + return ArrayPromptMessageSegment(value=[value]) if isinstance(value, list): + # Check if all items are PromptMessage + if value and all(isinstance(item, PromptMessage) for item in value): + return ArrayPromptMessageSegment(value=value) items = [build_segment(item) for item in value] types = {item.value_type for item in items} if all(isinstance(item, ArraySegment) for item in items): @@ -200,6 +210,7 @@ _segment_factory: Mapping[SegmentType, type[Segment]] = { SegmentType.ARRAY_OBJECT: ArrayObjectSegment, SegmentType.ARRAY_FILE: ArrayFileSegment, SegmentType.ARRAY_BOOLEAN: ArrayBooleanSegment, + SegmentType.ARRAY_PROMPT_MESSAGE: ArrayPromptMessageSegment, } @@ -274,6 +285,10 @@ def build_segment_with_type(segment_type: SegmentType, value: Any) -> Segment: ): segment_class = _segment_factory[inferred_type] return segment_class(value_type=inferred_type, value=value) + elif segment_type == SegmentType.ARRAY_PROMPT_MESSAGE and inferred_type == SegmentType.ARRAY_OBJECT: + # PromptMessage serializes to dict, so ARRAY_OBJECT is compatible with ARRAY_PROMPT_MESSAGE + segment_class = _segment_factory[segment_type] + return segment_class(value_type=segment_type, value=value) else: raise TypeMismatchError(f"Type mismatch: expected {segment_type}, but got {inferred_type}, value={value}") diff --git a/api/models/workflow.py b/api/models/workflow.py index 7915d923ec..8a40d4d985 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -1324,7 +1324,7 @@ class WorkflowDraftVariable(Base): # which may differ from the original value's type. Typically, they are the same, # but in cases where the structurally truncated value still exceeds the size limit, # text slicing is applied, and the `value_type` is converted to `STRING`. - value_type: Mapped[SegmentType] = mapped_column(EnumText(SegmentType, length=20)) + value_type: Mapped[SegmentType] = mapped_column(EnumText(SegmentType, length=21)) # The variable's value serialized as a JSON string # @@ -1698,7 +1698,7 @@ class WorkflowDraftVariableFile(Base): # The `value_type` field records the type of the original value. value_type: Mapped[SegmentType] = mapped_column( - EnumText(SegmentType, length=20), + EnumText(SegmentType, length=21), nullable=False, ) diff --git a/api/services/variable_truncator.py b/api/services/variable_truncator.py index f973361341..9d587c7850 100644 --- a/api/services/variable_truncator.py +++ b/api/services/variable_truncator.py @@ -7,6 +7,7 @@ from typing import Any, Generic, TypeAlias, TypeVar, overload from configs import dify_config from core.file.models import File +from core.model_runtime.entities import PromptMessage from core.variables.segments import ( ArrayFileSegment, ArraySegment, @@ -287,6 +288,10 @@ class VariableTruncator(BaseTruncator): if isinstance(item, File): truncated_value.append(item) continue + # Handle PromptMessage types - convert to dict for truncation + if isinstance(item, PromptMessage): + truncated_value.append(item) + continue if i >= target_length: return _PartResult(truncated_value, used_size, True) if i > 0: diff --git a/api/services/workflow/entities.py b/api/services/workflow/entities.py index 70ec8d6e2a..cf5519527d 100644 --- a/api/services/workflow/entities.py +++ b/api/services/workflow/entities.py @@ -163,3 +163,29 @@ class WorkflowScheduleCFSPlanEntity(BaseModel): schedule_strategy: Strategy granularity: int = Field(default=-1) # -1 means infinite + + +# ========== Mention Graph Entities ========== + + +class MentionParameterSchema(BaseModel): + """Schema for the parameter to be extracted from mention context.""" + + name: str = Field(description="Parameter name (e.g., 'query')") + type: str = Field(default="string", description="Parameter type (e.g., 'string', 'number')") + description: str = Field(default="", description="Parameter description for LLM") + + +class MentionGraphRequest(BaseModel): + """Request payload for generating mention graph.""" + + parent_node_id: str = Field(description="ID of the parent node that uses the extracted value") + parameter_key: str = Field(description="Key of the parameter being extracted") + context_source: list[str] = Field(description="Variable selector for the context source") + parameter_schema: MentionParameterSchema = Field(description="Schema of the parameter to extract") + + +class MentionGraphResponse(BaseModel): + """Response containing the generated mention graph.""" + + graph: Mapping[str, Any] = Field(description="Complete graph structure with nodes, edges, viewport") diff --git a/api/services/workflow/mention_graph_service.py b/api/services/workflow/mention_graph_service.py new file mode 100644 index 0000000000..d0729c6d20 --- /dev/null +++ b/api/services/workflow/mention_graph_service.py @@ -0,0 +1,143 @@ +""" +Service for generating Mention LLM node graph structures. + +This service creates graph structures containing LLM nodes configured for +extracting values from list[PromptMessage] variables. +""" + +from typing import Any + +from sqlalchemy.orm import Session + +from core.model_runtime.entities import LLMMode +from core.workflow.enums import NodeType +from services.model_provider_service import ModelProviderService +from services.workflow.entities import MentionGraphRequest, MentionGraphResponse, MentionParameterSchema + + +class MentionGraphService: + """Service for generating Mention LLM node graph structures.""" + + def __init__(self, session: Session): + self._session = session + + def generate_mention_node_id(self, node_id: str, parameter_name: str) -> str: + """Generate mention node ID following the naming convention. + + Format: {node_id}_ext_{parameter_name} + """ + return f"{node_id}_ext_{parameter_name}" + + def generate_mention_graph(self, tenant_id: str, request: MentionGraphRequest) -> MentionGraphResponse: + """Generate a complete graph structure containing a Mention LLM node. + + Args: + tenant_id: The tenant ID for fetching default model config + request: The mention graph generation request + + Returns: + Complete graph structure with nodes, edges, and viewport + """ + node_id = self.generate_mention_node_id(request.parent_node_id, request.parameter_key) + model_config = self._get_default_model_config(tenant_id) + node = self._build_mention_llm_node( + node_id=node_id, + parent_node_id=request.parent_node_id, + context_source=request.context_source, + parameter_schema=request.parameter_schema, + model_config=model_config, + ) + + graph = { + "nodes": [node], + "edges": [], + "viewport": {}, + } + + return MentionGraphResponse(graph=graph) + + def _get_default_model_config(self, tenant_id: str) -> dict[str, Any]: + """Get the default LLM model configuration for the tenant.""" + model_provider_service = ModelProviderService() + default_model = model_provider_service.get_default_model_of_model_type( + tenant_id=tenant_id, + model_type="llm", + ) + + if default_model: + return { + "provider": default_model.provider.provider, + "name": default_model.model, + "mode": LLMMode.CHAT.value, + "completion_params": {}, + } + + # Fallback to empty config if no default model is configured + return { + "provider": "", + "name": "", + "mode": LLMMode.CHAT.value, + "completion_params": {}, + } + + def _build_mention_llm_node( + self, + *, + node_id: str, + parent_node_id: str, + context_source: list[str], + parameter_schema: MentionParameterSchema, + model_config: dict[str, Any], + ) -> dict[str, Any]: + """Build the Mention LLM node structure. + + The node uses: + - $context in prompt_template to reference the PromptMessage list + - structured_output for extracting the specific parameter + - parent_node_id to associate with the parent node + """ + prompt_template = [ + { + "role": "system", + "text": "Extract the required parameter value from the conversation context above.", + }, + {"$context": context_source}, + {"role": "user", "text": ""}, + ] + + structured_output = { + "schema": { + "type": "object", + "properties": { + parameter_schema.name: { + "type": parameter_schema.type, + "description": parameter_schema.description, + } + }, + "required": [parameter_schema.name], + "additionalProperties": False, + } + } + + return { + "id": node_id, + "position": {"x": 0, "y": 0}, + "data": { + "type": NodeType.LLM.value, + "title": f"Mention: {parameter_schema.name}", + "desc": f"Extract {parameter_schema.name} from conversation context", + "parent_node_id": parent_node_id, + "model": model_config, + "prompt_template": prompt_template, + "context": { + "enabled": False, + "variable_selector": None, + }, + "vision": { + "enabled": False, + }, + "memory": None, + "structured_output_enabled": True, + "structured_output": structured_output, + }, + } diff --git a/api/tests/fixtures/file output schema.yml b/api/tests/fixtures/file output schema.yml new file mode 100644 index 0000000000..37fc9c72c7 --- /dev/null +++ b/api/tests/fixtures/file output schema.yml @@ -0,0 +1,181 @@ +app: + description: '' + icon: 🤖 + icon_background: '#FFEAD5' + mode: advanced-chat + name: file output schema + use_icon_as_answer_icon: false +dependencies: +- current_identifier: null + type: marketplace + value: + marketplace_plugin_unique_identifier: langgenius/openai:0.2.3@5a7f82fa86e28332ad51941d0b491c1e8a38ead539656442f7bf4c6129cd15fa + version: null +kind: app +version: 0.5.0 +workflow: + conversation_variables: [] + environment_variables: [] + features: + file_upload: + allowed_file_extensions: + - .JPG + - .JPEG + - .PNG + - .GIF + - .WEBP + - .SVG + allowed_file_types: + - image + allowed_file_upload_methods: + - remote_url + - local_file + enabled: true + fileUploadConfig: + attachment_image_file_size_limit: 2 + audio_file_size_limit: 50 + batch_count_limit: 5 + file_size_limit: 15 + file_upload_limit: 10 + image_file_batch_limit: 10 + image_file_size_limit: 10 + single_chunk_attachment_limit: 10 + video_file_size_limit: 100 + workflow_file_upload_limit: 10 + number_limits: 3 + opening_statement: '' + retriever_resource: + enabled: true + sensitive_word_avoidance: + enabled: false + speech_to_text: + enabled: false + suggested_questions: [] + suggested_questions_after_answer: + enabled: false + text_to_speech: + enabled: false + language: '' + voice: '' + graph: + edges: + - data: + sourceType: start + targetType: llm + id: 1768292241666-llm + source: '1768292241666' + sourceHandle: source + target: llm + targetHandle: target + type: custom + - data: + sourceType: llm + targetType: answer + id: llm-answer + source: llm + sourceHandle: source + target: answer + targetHandle: target + type: custom + nodes: + - data: + selected: false + title: User Input + type: start + variables: [] + height: 73 + id: '1768292241666' + position: + x: 80 + y: 282 + positionAbsolute: + x: 80 + y: 282 + sourcePosition: right + targetPosition: left + type: custom + width: 242 + - data: + context: + enabled: false + variable_selector: [] + memory: + query_prompt_template: '{{#sys.query#}} + + + {{#sys.files#}}' + role_prefix: + assistant: '' + user: '' + window: + enabled: false + size: 10 + model: + completion_params: + temperature: 0.7 + mode: chat + name: gpt-4o-mini + provider: langgenius/openai/openai + prompt_template: + - id: e30d75d7-7d85-49ec-be3c-3baf7f6d3c5a + role: system + text: '' + selected: false + structured_output: + schema: + additionalProperties: false + properties: + image: + description: File ID (UUID) of the selected image + format: dify-file-ref + type: string + required: + - image + type: object + structured_output_enabled: true + title: LLM + type: llm + vision: + configs: + detail: high + variable_selector: + - sys + - files + enabled: true + height: 88 + id: llm + position: + x: 380 + y: 282 + positionAbsolute: + x: 380 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 242 + - data: + answer: '{{#llm.structured_output.image#}}' + selected: false + title: Answer + type: answer + variables: [] + height: 103 + id: answer + position: + x: 680 + y: 282 + positionAbsolute: + x: 680 + y: 282 + selected: true + sourcePosition: right + targetPosition: left + type: custom + width: 242 + viewport: + x: -149 + y: 97.5 + zoom: 1 + rag_pipeline_variables: [] diff --git a/api/tests/fixtures/pav-test-extraction.yml b/api/tests/fixtures/pav-test-extraction.yml new file mode 100644 index 0000000000..69fe73c493 --- /dev/null +++ b/api/tests/fixtures/pav-test-extraction.yml @@ -0,0 +1,307 @@ +app: + description: Test for variable extraction feature + icon: 🤖 + icon_background: '#FFEAD5' + mode: advanced-chat + name: pav-test-extraction + use_icon_as_answer_icon: false +dependencies: +- current_identifier: null + type: marketplace + value: + marketplace_plugin_unique_identifier: langgenius/google:0.0.8@3efcf55ffeef9d0f77715e0afb23534952ae0cb385c051d0637e86d71199d1a6 + version: null +- current_identifier: null + type: marketplace + value: + marketplace_plugin_unique_identifier: langgenius/openai:0.2.3@5a7f82fa86e28332ad51941d0b491c1e8a38ead539656442f7bf4c6129cd15fa + version: null +- current_identifier: null + type: marketplace + value: + marketplace_plugin_unique_identifier: langgenius/tongyi:0.1.16@d8bffbe45418f0c117fb3393e5e40e61faee98f9a2183f062e5a280e74b15d21 + version: null +kind: app +version: 0.5.0 +workflow: + conversation_variables: [] + environment_variables: [] + features: + file_upload: + allowed_file_extensions: + - .JPG + - .JPEG + - .PNG + - .GIF + - .WEBP + - .SVG + allowed_file_types: + - image + allowed_file_upload_methods: + - local_file + - remote_url + enabled: false + image: + enabled: false + number_limits: 3 + transfer_methods: + - local_file + - remote_url + number_limits: 3 + opening_statement: 你好!我是一个搜索助手,请告诉我你想搜索什么内容。 + retriever_resource: + enabled: true + sensitive_word_avoidance: + enabled: false + speech_to_text: + enabled: false + suggested_questions: [] + suggested_questions_after_answer: + enabled: false + text_to_speech: + enabled: false + language: '' + voice: '' + graph: + edges: + - data: + sourceType: start + targetType: llm + id: 1767773675796-llm + source: '1767773675796' + sourceHandle: source + target: llm + targetHandle: target + type: custom + - data: + isInIteration: false + isInLoop: false + sourceType: llm + targetType: tool + id: llm-source-1767773709491-target + source: llm + sourceHandle: source + target: '1767773709491' + targetHandle: target + type: custom + zIndex: 0 + - data: + isInIteration: false + isInLoop: false + sourceType: tool + targetType: answer + id: tool-source-answer-target + source: '1767773709491' + sourceHandle: source + target: answer + targetHandle: target + type: custom + zIndex: 0 + nodes: + - data: + selected: false + title: User Input + type: start + variables: [] + height: 73 + id: '1767773675796' + position: + x: 80 + y: 282 + positionAbsolute: + x: 80 + y: 282 + sourcePosition: right + targetPosition: left + type: custom + width: 242 + - data: + context: + enabled: false + variable_selector: [] + memory: + mode: node + query_prompt_template: '{{#sys.query#}}' + role_prefix: + assistant: '' + user: '' + window: + enabled: true + size: 10 + model: + completion_params: + temperature: 0.7 + mode: chat + name: qwen-max + provider: langgenius/tongyi/tongyi + prompt_template: + - id: 11d06d15-914a-4915-a5b1-0e35ab4fba51 + role: system + text: '你是一个智能搜索助手。用户会告诉你他们想搜索的内容。 + + 请与用户进行对话,了解他们的搜索需求。 + + 当用户明确表达了想要搜索的内容后,你可以回复"好的,我来帮你搜索"。 + + ' + selected: false + title: LLM + type: llm + vision: + enabled: false + height: 88 + id: llm + position: + x: 380 + y: 282 + positionAbsolute: + x: 380 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 242 + - data: + is_team_authorization: true + paramSchemas: + - auto_generate: null + default: null + form: llm + human_description: + en_US: used for searching + ja_JP: used for searching + pt_BR: used for searching + zh_Hans: 用于搜索网页内容 + label: + en_US: Query string + ja_JP: Query string + pt_BR: Query string + zh_Hans: 查询语句 + llm_description: key words for searching + max: null + min: null + name: query + options: [] + placeholder: null + precision: null + required: true + scope: null + template: null + type: string + params: + query: '' + plugin_id: langgenius/google + plugin_unique_identifier: langgenius/google:0.0.8@3efcf55ffeef9d0f77715e0afb23534952ae0cb385c051d0637e86d71199d1a6 + provider_icon: http://localhost:5001/console/api/workspaces/current/plugin/icon?tenant_id=7217e801-f6f5-49ec-8103-d7de97a4b98f&filename=1c5871163478957bac64c3fe33d72d003f767497d921c74b742aad27a8344a74.svg + provider_id: langgenius/google/google + provider_name: langgenius/google/google + provider_type: builtin + selected: false + title: GoogleSearch + tool_configurations: {} + tool_description: A tool for performing a Google SERP search and extracting + snippets and webpages.Input should be a search query. + tool_label: GoogleSearch + tool_name: google_search + tool_node_version: '2' + tool_parameters: + query: + type: mention + value: '{{@llm.context@}}请从对话历史中提取用户想要搜索的关键词,只返回关键词本身' + mention_config: + extractor_node_id: 1767773709491_ext_query + output_selector: + - structured_output + - query + null_strategy: use_default + default_value: '' + type: tool + height: 52 + id: '1767773709491' + position: + x: 682 + y: 282 + positionAbsolute: + x: 682 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 242 + - data: + context: + enabled: false + variable_selector: [] + model: + completion_params: + temperature: 0.7 + mode: chat + name: gpt-4o-mini + provider: langgenius/openai/openai + parent_node_id: '1767773709491' + prompt_template: + - $context: + - llm + - context + id: 75d58e22-dc59-40c8-ba6f-aeb28f4f305a + - id: 18ba6710-77f5-47f4-b144-9191833bb547 + role: user + text: 请从对话历史中提取用户想要搜索的关键词,只返回关键词本身,不要返回其他内容 + selected: false + structured_output: + schema: + additionalProperties: false + properties: + query: + description: 搜索的关键词 + type: string + required: + - query + type: object + structured_output_enabled: true + title: 提取搜索关键词 + type: llm + vision: + enabled: false + height: 88 + id: 1767773709491_ext_query + position: + x: 531 + y: 382 + positionAbsolute: + x: 531 + y: 382 + selected: true + sourcePosition: right + targetPosition: left + type: custom + width: 242 + - data: + answer: '搜索结果: + + {{#1767773709491.text#}} + + ' + selected: false + title: Answer + type: answer + height: 103 + id: answer + position: + x: 984 + y: 282 + positionAbsolute: + x: 984 + y: 282 + selected: false + sourcePosition: right + targetPosition: left + type: custom + width: 242 + viewport: + x: -151 + y: 123 + zoom: 1 + rag_pipeline_variables: [] diff --git a/api/tests/unit_tests/core/file/test_file_manager.py b/api/tests/unit_tests/core/file/test_file_manager.py new file mode 100644 index 0000000000..018bdee4d7 --- /dev/null +++ b/api/tests/unit_tests/core/file/test_file_manager.py @@ -0,0 +1,182 @@ +"""Tests for file_manager module, specifically multimodal content handling.""" + +from unittest.mock import patch + +from core.file import File, FileTransferMethod, FileType +from core.file.file_manager import ( + _encode_file_ref, + restore_multimodal_content, + to_prompt_message_content, +) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent + + +class TestEncodeFileRef: + """Tests for _encode_file_ref function.""" + + def test_encodes_local_file(self): + """Local file should be encoded as 'local:id'.""" + file = File( + tenant_id="t", + type=FileType.IMAGE, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id="abc123", + storage_key="key", + ) + assert _encode_file_ref(file) == "local:abc123" + + def test_encodes_tool_file(self): + """Tool file should be encoded as 'tool:id'.""" + file = File( + tenant_id="t", + type=FileType.IMAGE, + transfer_method=FileTransferMethod.TOOL_FILE, + related_id="xyz789", + storage_key="key", + ) + assert _encode_file_ref(file) == "tool:xyz789" + + def test_encodes_remote_url(self): + """Remote URL should be encoded as 'remote:url'.""" + file = File( + tenant_id="t", + type=FileType.IMAGE, + transfer_method=FileTransferMethod.REMOTE_URL, + remote_url="https://example.com/image.png", + storage_key="", + ) + assert _encode_file_ref(file) == "remote:https://example.com/image.png" + + +class TestToPromptMessageContent: + """Tests for to_prompt_message_content function with file_ref field.""" + + @patch("core.file.file_manager.dify_config") + @patch("core.file.file_manager._get_encoded_string") + def test_includes_file_ref(self, mock_get_encoded, mock_config): + """Generated content should include file_ref field.""" + mock_config.MULTIMODAL_SEND_FORMAT = "base64" + mock_get_encoded.return_value = "base64data" + + file = File( + id="test-message-file-id", + tenant_id="test-tenant", + type=FileType.IMAGE, + transfer_method=FileTransferMethod.LOCAL_FILE, + related_id="test-related-id", + remote_url=None, + extension=".png", + mime_type="image/png", + filename="test.png", + storage_key="test-key", + ) + + result = to_prompt_message_content(file) + + assert isinstance(result, ImagePromptMessageContent) + assert result.file_ref == "local:test-related-id" + assert result.base64_data == "base64data" + + +class TestRestoreMultimodalContent: + """Tests for restore_multimodal_content function.""" + + def test_returns_content_unchanged_when_no_file_ref(self): + """Content without file_ref should pass through unchanged.""" + content = ImagePromptMessageContent( + format="png", + base64_data="existing-data", + mime_type="image/png", + file_ref=None, + ) + + result = restore_multimodal_content(content) + + assert result.base64_data == "existing-data" + + def test_returns_content_unchanged_when_already_has_data(self): + """Content that already has base64_data should not be reloaded.""" + content = ImagePromptMessageContent( + format="png", + base64_data="existing-data", + mime_type="image/png", + file_ref="local:file-id", + ) + + result = restore_multimodal_content(content) + + assert result.base64_data == "existing-data" + + def test_returns_content_unchanged_when_already_has_url(self): + """Content that already has url should not be reloaded.""" + content = ImagePromptMessageContent( + format="png", + url="https://example.com/image.png", + mime_type="image/png", + file_ref="local:file-id", + ) + + result = restore_multimodal_content(content) + + assert result.url == "https://example.com/image.png" + + @patch("core.file.file_manager.dify_config") + @patch("core.file.file_manager._build_file_from_ref") + @patch("core.file.file_manager._to_url") + def test_restores_url_from_file_ref(self, mock_to_url, mock_build_file, mock_config): + """Content should be restored from file_ref when url is empty (url mode).""" + mock_config.MULTIMODAL_SEND_FORMAT = "url" + mock_build_file.return_value = "mock_file" + mock_to_url.return_value = "https://restored-url.com/image.png" + + content = ImagePromptMessageContent( + format="png", + base64_data="", + url="", + mime_type="image/png", + filename="test.png", + file_ref="local:test-file-id", + ) + + result = restore_multimodal_content(content) + + assert result.url == "https://restored-url.com/image.png" + mock_build_file.assert_called_once() + + @patch("core.file.file_manager.dify_config") + @patch("core.file.file_manager._build_file_from_ref") + @patch("core.file.file_manager._get_encoded_string") + def test_restores_base64_from_file_ref(self, mock_get_encoded, mock_build_file, mock_config): + """Content should be restored as base64 when in base64 mode.""" + mock_config.MULTIMODAL_SEND_FORMAT = "base64" + mock_build_file.return_value = "mock_file" + mock_get_encoded.return_value = "restored-base64-data" + + content = ImagePromptMessageContent( + format="png", + base64_data="", + url="", + mime_type="image/png", + filename="test.png", + file_ref="local:test-file-id", + ) + + result = restore_multimodal_content(content) + + assert result.base64_data == "restored-base64-data" + mock_build_file.assert_called_once() + + def test_handles_invalid_file_ref_gracefully(self): + """Invalid file_ref format should be handled gracefully.""" + content = ImagePromptMessageContent( + format="png", + base64_data="", + url="", + mime_type="image/png", + file_ref="invalid_format_no_colon", + ) + + result = restore_multimodal_content(content) + + # Should return unchanged on error + assert result.base64_data == "" diff --git a/api/tests/unit_tests/core/llm_generator/output_parser/test_file_ref.py b/api/tests/unit_tests/core/llm_generator/output_parser/test_file_ref.py new file mode 100644 index 0000000000..6d18ac7fc9 --- /dev/null +++ b/api/tests/unit_tests/core/llm_generator/output_parser/test_file_ref.py @@ -0,0 +1,269 @@ +""" +Unit tests for file reference detection and conversion. +""" + +import uuid +from unittest.mock import MagicMock, patch + +import pytest + +from core.file import File, FileTransferMethod, FileType +from core.llm_generator.output_parser.file_ref import ( + FILE_REF_FORMAT, + convert_file_refs_in_output, + detect_file_ref_fields, + is_file_ref_property, +) +from core.variables.segments import ArrayFileSegment, FileSegment + + +class TestIsFileRefProperty: + """Tests for is_file_ref_property function.""" + + def test_valid_file_ref(self): + schema = {"type": "string", "format": FILE_REF_FORMAT} + assert is_file_ref_property(schema) is True + + def test_invalid_type(self): + schema = {"type": "number", "format": FILE_REF_FORMAT} + assert is_file_ref_property(schema) is False + + def test_missing_format(self): + schema = {"type": "string"} + assert is_file_ref_property(schema) is False + + def test_wrong_format(self): + schema = {"type": "string", "format": "uuid"} + assert is_file_ref_property(schema) is False + + +class TestDetectFileRefFields: + """Tests for detect_file_ref_fields function.""" + + def test_simple_file_ref(self): + schema = { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + }, + } + paths = detect_file_ref_fields(schema) + assert paths == ["image"] + + def test_multiple_file_refs(self): + schema = { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + "document": {"type": "string", "format": FILE_REF_FORMAT}, + "name": {"type": "string"}, + }, + } + paths = detect_file_ref_fields(schema) + assert set(paths) == {"image", "document"} + + def test_array_of_file_refs(self): + schema = { + "type": "object", + "properties": { + "files": { + "type": "array", + "items": {"type": "string", "format": FILE_REF_FORMAT}, + }, + }, + } + paths = detect_file_ref_fields(schema) + assert paths == ["files[*]"] + + def test_nested_file_ref(self): + schema = { + "type": "object", + "properties": { + "data": { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + }, + }, + }, + } + paths = detect_file_ref_fields(schema) + assert paths == ["data.image"] + + def test_no_file_refs(self): + schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "count": {"type": "number"}, + }, + } + paths = detect_file_ref_fields(schema) + assert paths == [] + + def test_empty_schema(self): + schema = {} + paths = detect_file_ref_fields(schema) + assert paths == [] + + def test_mixed_schema(self): + schema = { + "type": "object", + "properties": { + "query": {"type": "string"}, + "image": {"type": "string", "format": FILE_REF_FORMAT}, + "documents": { + "type": "array", + "items": {"type": "string", "format": FILE_REF_FORMAT}, + }, + }, + } + paths = detect_file_ref_fields(schema) + assert set(paths) == {"image", "documents[*]"} + + +class TestConvertFileRefsInOutput: + """Tests for convert_file_refs_in_output function.""" + + @pytest.fixture + def mock_file(self): + """Create a mock File object with all required attributes.""" + file = MagicMock(spec=File) + file.type = FileType.IMAGE + file.transfer_method = FileTransferMethod.TOOL_FILE + file.related_id = "test-related-id" + file.remote_url = None + file.tenant_id = "tenant_123" + file.id = None + file.filename = "test.png" + file.extension = ".png" + file.mime_type = "image/png" + file.size = 1024 + file.dify_model_identity = "__dify__file__" + return file + + @pytest.fixture + def mock_build_from_mapping(self, mock_file): + """Mock the build_from_mapping function.""" + with patch("core.llm_generator.output_parser.file_ref.build_from_mapping") as mock: + mock.return_value = mock_file + yield mock + + def test_convert_simple_file_ref(self, mock_build_from_mapping, mock_file): + file_id = str(uuid.uuid4()) + output = {"image": file_id} + schema = { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + }, + } + + result = convert_file_refs_in_output(output, schema, "tenant_123") + + # Result should be wrapped in FileSegment + assert isinstance(result["image"], FileSegment) + assert result["image"].value == mock_file + mock_build_from_mapping.assert_called_once_with( + mapping={"transfer_method": "tool_file", "tool_file_id": file_id}, + tenant_id="tenant_123", + ) + + def test_convert_array_of_file_refs(self, mock_build_from_mapping, mock_file): + file_id1 = str(uuid.uuid4()) + file_id2 = str(uuid.uuid4()) + output = {"files": [file_id1, file_id2]} + schema = { + "type": "object", + "properties": { + "files": { + "type": "array", + "items": {"type": "string", "format": FILE_REF_FORMAT}, + }, + }, + } + + result = convert_file_refs_in_output(output, schema, "tenant_123") + + # Result should be wrapped in ArrayFileSegment + assert isinstance(result["files"], ArrayFileSegment) + assert list(result["files"].value) == [mock_file, mock_file] + assert mock_build_from_mapping.call_count == 2 + + def test_no_conversion_without_file_refs(self): + output = {"name": "test", "count": 5} + schema = { + "type": "object", + "properties": { + "name": {"type": "string"}, + "count": {"type": "number"}, + }, + } + + result = convert_file_refs_in_output(output, schema, "tenant_123") + + assert result == {"name": "test", "count": 5} + + def test_invalid_uuid_returns_none(self): + output = {"image": "not-a-valid-uuid"} + schema = { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + }, + } + + result = convert_file_refs_in_output(output, schema, "tenant_123") + + assert result["image"] is None + + def test_file_not_found_returns_none(self): + file_id = str(uuid.uuid4()) + output = {"image": file_id} + schema = { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + }, + } + + with patch("core.llm_generator.output_parser.file_ref.build_from_mapping") as mock: + mock.side_effect = ValueError("File not found") + result = convert_file_refs_in_output(output, schema, "tenant_123") + + assert result["image"] is None + + def test_preserves_non_file_fields(self, mock_build_from_mapping, mock_file): + file_id = str(uuid.uuid4()) + output = {"query": "search term", "image": file_id, "count": 10} + schema = { + "type": "object", + "properties": { + "query": {"type": "string"}, + "image": {"type": "string", "format": FILE_REF_FORMAT}, + "count": {"type": "number"}, + }, + } + + result = convert_file_refs_in_output(output, schema, "tenant_123") + + assert result["query"] == "search term" + assert isinstance(result["image"], FileSegment) + assert result["image"].value == mock_file + assert result["count"] == 10 + + def test_does_not_modify_original_output(self, mock_build_from_mapping, mock_file): + file_id = str(uuid.uuid4()) + original = {"image": file_id} + output = dict(original) + schema = { + "type": "object", + "properties": { + "image": {"type": "string", "format": FILE_REF_FORMAT}, + }, + } + + convert_file_refs_in_output(output, schema, "tenant_123") + + # Original should still contain the string ID + assert original["image"] == file_id diff --git a/api/tests/unit_tests/core/workflow/graph_engine/event_management/test_event_handlers.py b/api/tests/unit_tests/core/workflow/graph_engine/event_management/test_event_handlers.py index 5d17b7a243..65bd3d87d4 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/event_management/test_event_handlers.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/event_management/test_event_handlers.py @@ -25,6 +25,12 @@ class _StubErrorHandler: """Minimal error handler stub for tests.""" +class _StubNodeData: + """Simple node data stub with is_extractor_node property.""" + + is_extractor_node = False + + class _StubNode: """Simple node stub exposing the attributes needed by the state manager.""" @@ -36,6 +42,7 @@ class _StubNode: self.error_strategy = None self.retry_config = RetryConfig() self.retry = False + self.node_data = _StubNodeData() def _build_event_handler(node_id: str) -> tuple[EventHandler, EventManager, GraphExecution]: diff --git a/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py b/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py new file mode 100644 index 0000000000..e327e03159 --- /dev/null +++ b/api/tests/unit_tests/core/workflow/nodes/llm/test_llm_utils.py @@ -0,0 +1,174 @@ +"""Tests for llm_utils module, specifically multimodal content handling.""" + +import string +from unittest.mock import patch + +from core.model_runtime.entities.message_entities import ( + ImagePromptMessageContent, + TextPromptMessageContent, + UserPromptMessage, +) +from core.workflow.nodes.llm.llm_utils import ( + _truncate_multimodal_content, + build_context, + restore_multimodal_content_in_messages, +) + + +class TestTruncateMultimodalContent: + """Tests for _truncate_multimodal_content function.""" + + def test_returns_message_unchanged_for_string_content(self): + """String content should pass through unchanged.""" + message = UserPromptMessage(content="Hello, world!") + result = _truncate_multimodal_content(message) + assert result.content == "Hello, world!" + + def test_returns_message_unchanged_for_none_content(self): + """None content should pass through unchanged.""" + message = UserPromptMessage(content=None) + result = _truncate_multimodal_content(message) + assert result.content is None + + def test_clears_base64_when_file_ref_present(self): + """When file_ref is present, base64_data and url should be cleared.""" + image_content = ImagePromptMessageContent( + format="png", + base64_data=string.ascii_lowercase, + url="https://example.com/image.png", + mime_type="image/png", + filename="test.png", + file_ref="local:test-file-id", + ) + message = UserPromptMessage(content=[image_content]) + + result = _truncate_multimodal_content(message) + + assert isinstance(result.content, list) + assert len(result.content) == 1 + result_content = result.content[0] + assert isinstance(result_content, ImagePromptMessageContent) + assert result_content.base64_data == "" + assert result_content.url == "" + # file_ref should be preserved + assert result_content.file_ref == "local:test-file-id" + + def test_truncates_base64_when_no_file_ref(self): + """When file_ref is missing (legacy), base64_data should be truncated.""" + long_base64 = "a" * 100 + image_content = ImagePromptMessageContent( + format="png", + base64_data=long_base64, + mime_type="image/png", + filename="test.png", + file_ref=None, + ) + message = UserPromptMessage(content=[image_content]) + + result = _truncate_multimodal_content(message) + + assert isinstance(result.content, list) + result_content = result.content[0] + assert isinstance(result_content, ImagePromptMessageContent) + # Should be truncated with marker + assert "...[TRUNCATED]..." in result_content.base64_data + assert len(result_content.base64_data) < len(long_base64) + + def test_preserves_text_content(self): + """Text content should pass through unchanged.""" + text_content = TextPromptMessageContent(data="Hello!") + image_content = ImagePromptMessageContent( + format="png", + base64_data="test123", + mime_type="image/png", + file_ref="local:file-id", + ) + message = UserPromptMessage(content=[text_content, image_content]) + + result = _truncate_multimodal_content(message) + + assert isinstance(result.content, list) + assert len(result.content) == 2 + # Text content unchanged + assert result.content[0].data == "Hello!" + # Image content base64 cleared + assert result.content[1].base64_data == "" + + +class TestBuildContext: + """Tests for build_context function.""" + + def test_excludes_system_messages(self): + """System messages should be excluded from context.""" + from core.model_runtime.entities.message_entities import SystemPromptMessage + + messages = [ + SystemPromptMessage(content="You are a helpful assistant."), + UserPromptMessage(content="Hello!"), + ] + + context = build_context(messages, "Hi there!") + + # Should have user message + assistant response, no system message + assert len(context) == 2 + assert context[0].content == "Hello!" + assert context[1].content == "Hi there!" + + def test_appends_assistant_response(self): + """Assistant response should be appended to context.""" + messages = [UserPromptMessage(content="What is 2+2?")] + + context = build_context(messages, "The answer is 4.") + + assert len(context) == 2 + assert context[1].content == "The answer is 4." + + +class TestRestoreMultimodalContentInMessages: + """Tests for restore_multimodal_content_in_messages function.""" + + @patch("core.file.file_manager.restore_multimodal_content") + def test_restores_multimodal_content(self, mock_restore): + """Should restore multimodal content in messages.""" + # Setup mock + restored_content = ImagePromptMessageContent( + format="png", + base64_data="restored-base64", + mime_type="image/png", + file_ref="local:abc123", + ) + mock_restore.return_value = restored_content + + # Create message with truncated content + truncated_content = ImagePromptMessageContent( + format="png", + base64_data="", + mime_type="image/png", + file_ref="local:abc123", + ) + message = UserPromptMessage(content=[truncated_content]) + + result = restore_multimodal_content_in_messages([message]) + + assert len(result) == 1 + assert result[0].content[0].base64_data == "restored-base64" + mock_restore.assert_called_once() + + def test_passes_through_string_content(self): + """String content should pass through unchanged.""" + message = UserPromptMessage(content="Hello!") + + result = restore_multimodal_content_in_messages([message]) + + assert len(result) == 1 + assert result[0].content == "Hello!" + + def test_passes_through_text_content(self): + """TextPromptMessageContent should pass through unchanged.""" + text_content = TextPromptMessageContent(data="Hello!") + message = UserPromptMessage(content=[text_content]) + + result = restore_multimodal_content_in_messages([message]) + + assert len(result) == 1 + assert result[0].content[0].data == "Hello!" diff --git a/web/app/components/app/overview/trigger-card.tsx b/web/app/components/app/overview/trigger-card.tsx index a2d28606a1..c8f12745bd 100644 --- a/web/app/components/app/overview/trigger-card.tsx +++ b/web/app/components/app/overview/trigger-card.tsx @@ -14,7 +14,6 @@ import { BlockEnum } from '@/app/components/workflow/types' import { useAppContext } from '@/context/app-context' import { useDocLink } from '@/context/i18n' import { - useAppTriggers, useInvalidateAppTriggers, useUpdateTriggerStatus, diff --git a/web/app/components/base/icons/assets/vender/line/general/assemble-variables.svg b/web/app/components/base/icons/assets/vender/line/general/assemble-variables.svg new file mode 100644 index 0000000000..0575036fa9 --- /dev/null +++ b/web/app/components/base/icons/assets/vender/line/general/assemble-variables.svg @@ -0,0 +1,6 @@ + + + + + + diff --git a/web/app/components/base/icons/src/vender/line/general/AssembleVariables.json b/web/app/components/base/icons/src/vender/line/general/AssembleVariables.json new file mode 100644 index 0000000000..5db6132599 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/general/AssembleVariables.json @@ -0,0 +1,53 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "12", + "height": "12", + "viewBox": "0 0 12 12", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M2.91992 1.6875C3.23055 1.68754 3.48242 1.93937 3.48242 2.25C3.48242 2.56063 3.23055 2.81246 2.91992 2.8125C2.63855 2.8125 2.41064 3.04041 2.41064 3.32178V5.46436C2.41061 5.61344 2.35148 5.75637 2.24609 5.86182L2.10791 6L2.24609 6.13818C2.35148 6.24363 2.41061 6.38656 2.41064 6.53564V8.67822C2.41064 8.95959 2.63855 9.1875 2.91992 9.1875C3.23055 9.18754 3.48242 9.43937 3.48242 9.75C3.48242 10.0606 3.23055 10.3125 2.91992 10.3125C2.01723 10.3125 1.28564 9.58091 1.28564 8.67822V6.76855L0.914551 6.39795C0.809062 6.29246 0.75 6.14918 0.75 6C0.75 5.85082 0.809062 5.70754 0.914551 5.60205L1.28564 5.23145V3.32178C1.28564 2.41909 2.01723 1.6875 2.91992 1.6875Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M9.08008 1.6875C9.98276 1.68751 10.7144 2.41909 10.7144 3.32178V5.23145L11.085 5.60205C11.1904 5.70754 11.25 5.85082 11.25 6C11.25 6.14918 11.1904 6.29246 11.085 6.39795L10.7144 6.76855V8.67822C10.7144 9.58107 9.98213 10.3125 9.08008 10.3125C8.76942 10.3125 8.51758 10.0607 8.51758 9.75C8.51758 9.43934 8.76942 9.1875 9.08008 9.1875C9.36113 9.18749 9.58936 8.95943 9.58936 8.67822V6.53564C9.58939 6.38654 9.64849 6.24363 9.75391 6.13818L9.89209 6L9.75391 5.86182C9.64849 5.75637 9.58939 5.61346 9.58936 5.46436V3.32178C9.58936 3.04041 9.36144 2.81251 9.08008 2.8125C8.76942 2.8125 8.51758 2.56066 8.51758 2.25C8.51758 1.93934 8.76942 1.6875 9.08008 1.6875Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M5.24707 5.07715C5.36302 5.07715 5.46712 5.14866 5.50879 5.25684L5.8335 6.10059C5.88932 6.24563 6.00388 6.36018 6.14893 6.41602L6.99268 6.74072C7.10086 6.78238 7.17236 6.88648 7.17236 7.00244C7.17229 7.11832 7.10078 7.22202 6.99268 7.26367L6.14893 7.58838C6.00378 7.64424 5.88929 7.75912 5.8335 7.9043L5.50879 8.74756C5.46715 8.8558 5.36307 8.92725 5.24707 8.92725C5.13116 8.92717 5.02746 8.85572 4.98584 8.74756L4.66113 7.9043C4.60526 7.75904 4.49046 7.6442 4.34521 7.58838L3.50195 7.26367C3.39378 7.22205 3.32234 7.11835 3.32227 7.00244C3.32227 6.88645 3.39371 6.78236 3.50195 6.74072L4.34521 6.41602C4.49039 6.36022 4.60523 6.24573 4.66113 6.10059L4.98584 5.25684C5.02749 5.14874 5.13121 5.07723 5.24707 5.07715Z", + "fill": "currentColor" + }, + "children": [] + }, + { + "type": "element", + "name": "path", + "attributes": { + "d": "M6.89746 2.87744C6.98013 2.87754 7.05427 2.92822 7.08398 3.00537L7.29053 3.54297C7.34635 3.68816 7.46125 3.80302 7.60645 3.85889L8.14404 4.06543C8.22123 4.0952 8.27246 4.16966 8.27246 4.25244C8.27236 4.33513 8.22116 4.40922 8.14404 4.43896L7.60645 4.64551C7.46125 4.70138 7.34635 4.81624 7.29053 4.96143L7.08398 5.49902C7.05428 5.57614 6.98014 5.62734 6.89746 5.62744C6.81468 5.62744 6.74019 5.57622 6.71045 5.49902L6.50391 4.96143C6.44808 4.81624 6.33318 4.70138 6.18799 4.64551L5.65039 4.43896C5.57328 4.40922 5.52256 4.33513 5.52246 4.25244C5.52246 4.16966 5.5732 4.0952 5.65039 4.06543L6.18799 3.85889C6.33318 3.80302 6.44808 3.68816 6.50391 3.54297L6.71045 3.00537C6.74019 2.92814 6.81469 2.87744 6.89746 2.87744Z", + "fill": "currentColor" + }, + "children": [] + } + ] + }, + "name": "AssembleVariables" +} diff --git a/web/app/components/base/icons/src/vender/line/general/AssembleVariables.tsx b/web/app/components/base/icons/src/vender/line/general/AssembleVariables.tsx new file mode 100644 index 0000000000..40b72561f2 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/general/AssembleVariables.tsx @@ -0,0 +1,20 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import type { IconData } from '@/app/components/base/icons/IconBase' +import * as React from 'react' +import IconBase from '@/app/components/base/icons/IconBase' +import data from './AssembleVariables.json' + +const Icon = ( + { + ref, + ...props + }: React.SVGProps & { + ref?: React.RefObject> + }, +) => + +Icon.displayName = 'AssembleVariables' + +export default Icon diff --git a/web/app/components/base/icons/src/vender/line/general/AssembleVariablesAlt.json b/web/app/components/base/icons/src/vender/line/general/AssembleVariablesAlt.json new file mode 100644 index 0000000000..9823224134 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/general/AssembleVariablesAlt.json @@ -0,0 +1,26 @@ +{ + "icon": { + "type": "element", + "isRootNode": true, + "name": "svg", + "attributes": { + "width": "12", + "height": "12", + "viewBox": "0 0 12 12", + "fill": "none", + "xmlns": "http://www.w3.org/2000/svg" + }, + "children": [ + { + "type": "element", + "name": "path", + "attributes": { + "d": "M5.14286 5.14286V3.42857L8 5.71429L5.14286 8V6.28571H0V5.14286H5.14286ZM0.83303 7.42857H2.04658C2.72474 9.10389 4.36721 10.28571 6.28571 10.28571C8.81049 10.28571 10.85717 8.23903 10.85717 5.71429C10.85717 3.18956 8.81049 1.14285 6.28571 1.14285C4.36721 1.14285 2.72474 2.32467 2.04658 4H0.83303C1.56118 1.68165 3.72706 0 6.28571 0C9.4416 0 12 2.55837 12 5.71429C12 8.87014 9.4416 11.42854 6.28571 11.42854C3.72706 11.42854 1.56118 9.74691 0.83303 7.42857Z", + "fill": "currentColor" + }, + "children": [] + } + ] + }, + "name": "AssembleVariablesAlt" +} diff --git a/web/app/components/base/icons/src/vender/line/general/AssembleVariablesAlt.tsx b/web/app/components/base/icons/src/vender/line/general/AssembleVariablesAlt.tsx new file mode 100644 index 0000000000..980d9fc2b1 --- /dev/null +++ b/web/app/components/base/icons/src/vender/line/general/AssembleVariablesAlt.tsx @@ -0,0 +1,20 @@ +// GENERATE BY script +// DON NOT EDIT IT MANUALLY + +import type { IconData } from '@/app/components/base/icons/IconBase' +import * as React from 'react' +import IconBase from '@/app/components/base/icons/IconBase' +import data from './AssembleVariablesAlt.json' + +const Icon = ( + { + ref, + ...props + }: React.SVGProps & { + ref?: React.RefObject> + }, +) => + +Icon.displayName = 'AssembleVariablesAlt' + +export default Icon diff --git a/web/app/components/base/icons/src/vender/line/general/index.ts b/web/app/components/base/icons/src/vender/line/general/index.ts index 2409367264..90c37a6665 100644 --- a/web/app/components/base/icons/src/vender/line/general/index.ts +++ b/web/app/components/base/icons/src/vender/line/general/index.ts @@ -1,3 +1,5 @@ +export { default as AssembleVariables } from './AssembleVariables' +export { default as AssembleVariablesAlt } from './AssembleVariablesAlt' export { default as AtSign } from './AtSign' export { default as Bookmark } from './Bookmark' export { default as Check } from './Check' diff --git a/web/app/components/base/prompt-editor/constants.tsx b/web/app/components/base/prompt-editor/constants.tsx index d6b8e9fcb4..9fcf445bfb 100644 --- a/web/app/components/base/prompt-editor/constants.tsx +++ b/web/app/components/base/prompt-editor/constants.tsx @@ -38,13 +38,16 @@ export const getInputVars = (text: string): ValueSelector[] => { if (!text || typeof text !== 'string') return [] - const allVars = text.match(/\{\{#([^#]*)#\}\}/g) + const allVars = text.match(/\{\{[@#]([^@#]*)[@#]\}\}/g) if (allVars && allVars?.length > 0) { // {{#context#}}, {{#query#}} is not input vars const inputVars = allVars .filter(item => item.includes('.')) .map((item) => { - const valueSelector = item.replace('{{#', '').replace('#}}', '').split('.') + const valueSelector = item + .replace(/^\{\{[@#]/, '') + .replace(/[@#]\}\}$/, '') + .split('.') if (valueSelector[1] === 'sys' && /^\d+$/.test(valueSelector[0])) return valueSelector.slice(1) diff --git a/web/app/components/base/prompt-editor/hooks.ts b/web/app/components/base/prompt-editor/hooks.ts index 10578e0004..2e69f0669c 100644 --- a/web/app/components/base/prompt-editor/hooks.ts +++ b/web/app/components/base/prompt-editor/hooks.ts @@ -155,14 +155,13 @@ export type TriggerFn = ( text: string, editor: LexicalEditor, ) => MenuTextMatch | null -export const PUNCTUATION = '\\.,\\+\\*\\?\\$\\@\\|#{}\\(\\)\\^\\-\\[\\]\\\\/!%\'"~=<>_:;' export function useBasicTypeaheadTriggerMatch( trigger: string, { minLength = 1, maxLength = 75 }: { minLength?: number, maxLength?: number }, ): TriggerFn { return useCallback( (text: string) => { - const validChars = `[${PUNCTUATION}\\s]` + const validChars = '[^\\n]' const TypeaheadTriggerRegex = new RegExp( '(.*)(' + `[${trigger}]` diff --git a/web/app/components/base/prompt-editor/index.tsx b/web/app/components/base/prompt-editor/index.tsx index 717039e7ce..59227c3d8e 100644 --- a/web/app/components/base/prompt-editor/index.tsx +++ b/web/app/components/base/prompt-editor/index.tsx @@ -5,6 +5,7 @@ import type { } from 'lexical' import type { FC } from 'react' import type { + AgentBlockType, ContextBlockType, CurrentBlockType, ErrorMessageBlockType, @@ -103,6 +104,7 @@ export type PromptEditorProps = { currentBlock?: CurrentBlockType errorMessageBlock?: ErrorMessageBlockType lastRunBlock?: LastRunBlockType + agentBlock?: AgentBlockType isSupportFileVar?: boolean } @@ -128,6 +130,7 @@ const PromptEditor: FC = ({ currentBlock, errorMessageBlock, lastRunBlock, + agentBlock, isSupportFileVar, }) => { const { eventEmitter } = useEventEmitterContextContext() @@ -139,6 +142,7 @@ const PromptEditor: FC = ({ { replace: TextNode, with: (node: TextNode) => new CustomTextNode(node.__text), + withKlass: CustomTextNode, }, ContextBlockNode, HistoryBlockNode, @@ -212,6 +216,22 @@ const PromptEditor: FC = ({ lastRunBlock={lastRunBlock} isSupportFileVar={isSupportFileVar} /> + {(!agentBlock || agentBlock.show) && ( + + )} value.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') + export const usePromptOptions = ( contextBlock?: ContextBlockType, queryBlock?: QueryBlockType, @@ -154,7 +156,7 @@ export const useVariableOptions = ( if (!queryString) return baseOptions - const regex = new RegExp(queryString, 'i') + const regex = new RegExp(escapeRegExp(queryString), 'i') return baseOptions.filter(option => regex.test(option.key)) }, [editor, queryString, variableBlock]) @@ -232,7 +234,7 @@ export const useExternalToolOptions = ( if (!queryString) return baseToolOptions - const regex = new RegExp(queryString, 'i') + const regex = new RegExp(escapeRegExp(queryString), 'i') return baseToolOptions.filter(option => regex.test(option.key)) }, [editor, queryString, externalToolBlockType]) diff --git a/web/app/components/base/prompt-editor/plugins/component-picker-block/index.tsx b/web/app/components/base/prompt-editor/plugins/component-picker-block/index.tsx index 728ae21a70..78b7f1e753 100644 --- a/web/app/components/base/prompt-editor/plugins/component-picker-block/index.tsx +++ b/web/app/components/base/prompt-editor/plugins/component-picker-block/index.tsx @@ -1,6 +1,7 @@ import type { MenuRenderFn } from '@lexical/react/LexicalTypeaheadMenuPlugin' import type { TextNode } from 'lexical' import type { + AgentBlockType, ContextBlockType, CurrentBlockType, ErrorMessageBlockType, @@ -12,6 +13,8 @@ import type { WorkflowVariableBlockType, } from '../../types' import type { PickerBlockMenuOption } from './menu' +import type { AgentNode } from '@/app/components/base/prompt-editor/types' +import type { ValueSelector } from '@/app/components/workflow/types' import { flip, offset, @@ -20,16 +23,25 @@ import { } from '@floating-ui/react' import { useLexicalComposerContext } from '@lexical/react/LexicalComposerContext' import { LexicalTypeaheadMenuPlugin } from '@lexical/react/LexicalTypeaheadMenuPlugin' -import { KEY_ESCAPE_COMMAND } from 'lexical' +import { + $getRoot, + $getSelection, + $insertNodes, + $isRangeSelection, + KEY_ESCAPE_COMMAND, +} from 'lexical' import { Fragment, memo, useCallback, + useMemo, useState, } from 'react' import ReactDOM from 'react-dom' import { GeneratorType } from '@/app/components/app/configuration/config/automatic/types' +import AgentNodeList from '@/app/components/workflow/nodes/_base/components/agent-node-list' import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars' +import { BlockEnum } from '@/app/components/workflow/types' import { useEventEmitterContextContext } from '@/context/event-emitter' import { useBasicTypeaheadTriggerMatch } from '../../hooks' import { $splitNodeContainingQuery } from '../../utils' @@ -38,6 +50,7 @@ import { INSERT_ERROR_MESSAGE_BLOCK_COMMAND } from '../error-message-block' import { INSERT_LAST_RUN_BLOCK_COMMAND } from '../last-run-block' import { INSERT_VARIABLE_VALUE_BLOCK_COMMAND } from '../variable-block' import { INSERT_WORKFLOW_VARIABLE_BLOCK_COMMAND } from '../workflow-variable-block' +import { $createWorkflowVariableBlockNode } from '../workflow-variable-block/node' import { useOptions } from './hooks' type ComponentPickerProps = { @@ -51,6 +64,7 @@ type ComponentPickerProps = { currentBlock?: CurrentBlockType errorMessageBlock?: ErrorMessageBlockType lastRunBlock?: LastRunBlockType + agentBlock?: AgentBlockType isSupportFileVar?: boolean } const ComponentPicker = ({ @@ -64,6 +78,7 @@ const ComponentPicker = ({ currentBlock, errorMessageBlock, lastRunBlock, + agentBlock, isSupportFileVar, }: ComponentPickerProps) => { const { eventEmitter } = useEventEmitterContextContext() @@ -78,11 +93,26 @@ const ComponentPicker = ({ ], }) const [editor] = useLexicalComposerContext() + const useExternalSearch = triggerString === '/' || triggerString === '@' const checkForTriggerMatch = useBasicTypeaheadTriggerMatch(triggerString, { minLength: 0, - maxLength: 0, + maxLength: useExternalSearch ? 75 : 0, }) + const getMatchFromSelection = useCallback(() => { + const selection = $getSelection() + if (!$isRangeSelection(selection) || !selection.isCollapsed()) + return null + const anchor = selection.anchor + if (anchor.type !== 'text') + return null + const anchorNode = anchor.getNode() + if (!anchorNode.isSimpleText()) + return null + const text = anchorNode.getTextContent().slice(0, anchor.offset) + return checkForTriggerMatch(text, editor) + }, [checkForTriggerMatch, editor]) + const [queryString, setQueryString] = useState(null) eventEmitter?.useSubscription((v: any) => { @@ -103,6 +133,7 @@ const ComponentPicker = ({ currentBlock, errorMessageBlock, lastRunBlock, + useExternalSearch ? (queryString ?? undefined) : undefined, ) const onSelectOption = useCallback( @@ -124,7 +155,10 @@ const ComponentPicker = ({ const handleSelectWorkflowVariable = useCallback((variables: string[]) => { editor.update(() => { - const needRemove = $splitNodeContainingQuery(checkForTriggerMatch(triggerString, editor)!) + const match = getMatchFromSelection() + if (!match) + return + const needRemove = $splitNodeContainingQuery(match) if (needRemove) needRemove.remove() }) @@ -144,19 +178,68 @@ const ComponentPicker = ({ else { editor.dispatchCommand(INSERT_WORKFLOW_VARIABLE_BLOCK_COMMAND, variables) } - }, [editor, currentBlock?.generatorType, checkForTriggerMatch, triggerString]) + }, [editor, currentBlock?.generatorType, getMatchFromSelection]) const handleClose = useCallback(() => { const escapeEvent = new KeyboardEvent('keydown', { key: 'Escape' }) editor.dispatchCommand(KEY_ESCAPE_COMMAND, escapeEvent) }, [editor]) + const handleSelectAssembleVariables = useCallback((): ValueSelector | null => { + editor.update(() => { + const match = getMatchFromSelection() + if (!match) + return + const needRemove = $splitNodeContainingQuery(match) + if (needRemove) + needRemove.remove() + }) + const assembleVariables = workflowVariableBlock?.onAssembleVariables?.() + if (assembleVariables && assembleVariables.length) + editor.dispatchCommand(INSERT_WORKFLOW_VARIABLE_BLOCK_COMMAND, assembleVariables) + handleClose() + return assembleVariables ?? null + }, [editor, getMatchFromSelection, workflowVariableBlock, handleClose]) + + const handleSelectAgent = useCallback((agent: { id: string, title: string }) => { + editor.update(() => { + const match = getMatchFromSelection() + if (!match) + return + const needRemove = $splitNodeContainingQuery(match) + if (needRemove) + needRemove.remove() + + const root = $getRoot() + const firstChild = root.getFirstChild() + if (firstChild) { + const selection = firstChild.selectStart() + if (selection) { + const workflowVariableBlockNode = $createWorkflowVariableBlockNode([agent.id, 'text'], {}, undefined) + $insertNodes([workflowVariableBlockNode]) + } + } + }) + agentBlock?.onSelect?.(agent) + handleClose() + }, [editor, getMatchFromSelection, agentBlock, handleClose]) + + const isAgentTrigger = triggerString === '@' && agentBlock?.show + const showAssembleVariables = triggerString === '/' + const agentNodes: AgentNode[] = useMemo(() => agentBlock?.agentNodes || [], [agentBlock?.agentNodes]) + const renderMenu = useCallback>(( anchorElementRef, { options, selectedIndex, selectOptionAndCleanUp, setHighlightedIndex }, ) => { - if (!(anchorElementRef.current && (allFlattenOptions.length || workflowVariableBlock?.show))) - return null + if (isAgentTrigger) { + if (!(anchorElementRef.current && agentNodes.length)) + return null + } + else { + if (!(anchorElementRef.current && (allFlattenOptions.length || workflowVariableBlock?.show))) + return null + } setTimeout(() => { if (anchorElementRef.current) @@ -167,9 +250,6 @@ const ComponentPicker = ({ <> { ReactDOM.createPortal( - // The `LexicalMenu` will try to calculate the position of the floating menu based on the first child. - // Since we use floating ui, we need to wrap it with a div to prevent the position calculation being affected. - // See https://github.com/facebook/lexical/blob/ac97dfa9e14a73ea2d6934ff566282d7f758e8bb/packages/lexical-react/src/shared/LexicalMenu.ts#L493
- { - workflowVariableBlock?.show && ( -
- { - handleSelectWorkflowVariable(variables) - }} - maxHeightClass="max-h-[34vh]" - isSupportFileVar={isSupportFileVar} + {isAgentTrigger + ? ( + ({ + ...node, + type: BlockEnum.Agent || BlockEnum.LLM, + }))} + onSelect={handleSelectAgent} onClose={handleClose} onBlur={handleClose} - showManageInputField={workflowVariableBlock.showManageInputField} - onManageInputField={workflowVariableBlock.onManageInputField} + maxHeightClass="max-h-[34vh]" autoFocus={false} - isInCodeGeneratorInstructionEditor={currentBlock?.generatorType === GeneratorType.code} + hideSearch={useExternalSearch} + externalSearchText={useExternalSearch ? (queryString ?? '') : undefined} + enableKeyboardNavigation={useExternalSearch} /> -
- ) - } - { - workflowVariableBlock?.show && !!options.length && ( -
- ) - } -
- { - options.map((option, index) => ( - + ) + : ( + <> { - // Divider - index !== 0 && options.at(index - 1)?.group !== option.group && ( + workflowVariableBlock?.show && ( +
+ { + handleSelectWorkflowVariable(variables) + }} + maxHeightClass="max-h-[34vh]" + isSupportFileVar={isSupportFileVar} + onClose={handleClose} + onBlur={handleClose} + showManageInputField={workflowVariableBlock.showManageInputField} + onManageInputField={workflowVariableBlock.onManageInputField} + showAssembleVariables={showAssembleVariables} + onAssembleVariables={showAssembleVariables ? handleSelectAssembleVariables : undefined} + autoFocus={false} + isInCodeGeneratorInstructionEditor={currentBlock?.generatorType === GeneratorType.code} + hideSearch={useExternalSearch} + externalSearchText={useExternalSearch ? (queryString ?? '') : undefined} + enableKeyboardNavigation={useExternalSearch} + /> +
+ ) + } + { + workflowVariableBlock?.show && !!options.length && (
) } - {option.renderMenuOption({ - queryString, - isSelected: selectedIndex === index, - onSelect: () => { - selectOptionAndCleanUp(option) - }, - onSetHighlight: () => { - setHighlightedIndex(index) - }, - })} -
- )) - } -
+
+ { + options.map((option, index) => ( + + { + index !== 0 && options.at(index - 1)?.group !== option.group && ( +
+ ) + } + {option.renderMenuOption({ + queryString, + isSelected: selectedIndex === index, + onSelect: () => { + selectOptionAndCleanUp(option) + }, + onSetHighlight: () => { + setHighlightedIndex(index) + }, + })} +
+ )) + } +
+ + )}
, anchorElementRef.current, @@ -236,7 +341,7 @@ const ComponentPicker = ({ } ) - }, [allFlattenOptions.length, workflowVariableBlock?.show, floatingStyles, isPositioned, refs, workflowVariableOptions, isSupportFileVar, handleClose, currentBlock?.generatorType, handleSelectWorkflowVariable, queryString, workflowVariableBlock?.showManageInputField, workflowVariableBlock?.onManageInputField]) + }, [isAgentTrigger, agentNodes, allFlattenOptions.length, workflowVariableBlock?.show, floatingStyles, isPositioned, refs, handleSelectAgent, handleClose, workflowVariableOptions, isSupportFileVar, currentBlock?.generatorType, handleSelectWorkflowVariable, queryString, workflowVariableBlock?.showManageInputField, workflowVariableBlock?.onManageInputField, showAssembleVariables, handleSelectAssembleVariables, useExternalSearch]) return ( (workflowNodesMap) const node = localWorkflowNodesMap![variables[isRagVar ? 1 : 0]] + const isContextVariable = (node?.type === BlockEnum.Agent || node?.type === BlockEnum.LLM) + && variables[variablesLength - 1] === 'context' const isException = isExceptionVariable(varName, node?.type) const variableValid = useMemo(() => { @@ -134,6 +137,9 @@ const WorkflowVariableBlockComponent = ({ }) }, [node, reactflow, store]) + if (isContextVariable) + return + const Item = ( } getTextContent(): string { - return `{{#${this.getVariables().join('.')}#}}` + const variables = this.getVariables() + const node = this.getWorkflowNodesMap()?.[variables[0]] + const isContextVariable = (node?.type === BlockEnum.Agent || node?.type === BlockEnum.LLM) + && variables[variables.length - 1] === 'context' + const marker = isContextVariable ? '@' : '#' + return `{{${marker}${variables.join('.')}${marker}}}` } } export function $createWorkflowVariableBlockNode(variables: string[], workflowNodesMap: WorkflowNodesMap, getVarType?: GetVarType, environmentVariables?: Var[], conversationVariables?: Var[], ragVariables?: Var[]): WorkflowVariableBlockNode { diff --git a/web/app/components/base/prompt-editor/types.ts b/web/app/components/base/prompt-editor/types.ts index 875f9eab07..3b4565e5c6 100644 --- a/web/app/components/base/prompt-editor/types.ts +++ b/web/app/components/base/prompt-editor/types.ts @@ -71,6 +71,19 @@ export type WorkflowVariableBlockType = { getVarType?: GetVarType showManageInputField?: boolean onManageInputField?: () => void + showAssembleVariables?: boolean + onAssembleVariables?: () => ValueSelector | null +} + +export type AgentNode = { + id: string + title: string +} + +export type AgentBlockType = { + show?: boolean + agentNodes?: AgentNode[] + onSelect?: (agent: AgentNode) => void } export type MenuTextMatch = { diff --git a/web/app/components/sub-graph/components/config-panel.tsx b/web/app/components/sub-graph/components/config-panel.tsx new file mode 100644 index 0000000000..edbd746550 --- /dev/null +++ b/web/app/components/sub-graph/components/config-panel.tsx @@ -0,0 +1,194 @@ +'use client' +import type { FC } from 'react' +import type { Item } from '@/app/components/base/select' +import type { MentionConfig } from '@/app/components/workflow/nodes/_base/types' +import type { Node, NodeOutPutVar, ValueSelector } from '@/app/components/workflow/types' +import { RiCheckLine } from '@remixicon/react' +import { memo, useCallback, useMemo, useState } from 'react' +import { useTranslation } from 'react-i18next' +import { SimpleSelect } from '@/app/components/base/select' +import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' +import Field from '@/app/components/workflow/nodes/_base/components/field' +import VarReferencePicker from '@/app/components/workflow/nodes/_base/components/variable/var-reference-picker' +import Tab, { TabType } from '@/app/components/workflow/nodes/_base/components/workflow-panel/tab' +import { CodeLanguage } from '@/app/components/workflow/nodes/code/types' +import { cn } from '@/utils/classnames' + +type ConfigPanelProps = { + agentName: string + extractorNodeId: string + mentionConfig: MentionConfig + availableNodes: Node[] + availableVars: NodeOutPutVar[] + onMentionConfigChange: (config: MentionConfig) => void +} + +const ConfigPanel: FC = ({ + agentName, + extractorNodeId, + mentionConfig, + availableNodes, + availableVars, + onMentionConfigChange, +}) => { + const { t } = useTranslation() + const [tabType, setTabType] = useState(TabType.settings) + + const resolvedExtractorId = mentionConfig.extractor_node_id || extractorNodeId + + const selectedOutput = useMemo(() => { + if (!resolvedExtractorId || !mentionConfig.output_selector?.length) + return [] + + return [resolvedExtractorId, ...(mentionConfig.output_selector || [])] + }, [mentionConfig.output_selector, resolvedExtractorId]) + + const handleOutputVarChange = useCallback((value: ValueSelector | string) => { + const selector = Array.isArray(value) ? value : [] + const nextExtractorId = selector[0] || resolvedExtractorId + const nextOutputSelector = selector.length > 1 ? selector.slice(1) : [] + + onMentionConfigChange({ + ...mentionConfig, + extractor_node_id: nextExtractorId, + output_selector: nextOutputSelector, + }) + }, [mentionConfig, onMentionConfigChange, resolvedExtractorId]) + + const whenOutputNoneOptions = useMemo(() => ([ + { + value: 'raise_error', + name: t('subGraphModal.whenOutputNone.error', { ns: 'workflow' }), + description: t('subGraphModal.whenOutputNone.errorDesc', { ns: 'workflow' }), + }, + { + value: 'use_default', + name: t('subGraphModal.whenOutputNone.default', { ns: 'workflow' }), + description: t('subGraphModal.whenOutputNone.defaultDesc', { ns: 'workflow' }), + }, + ]), [t]) + const selectedWhenOutputNoneOption = useMemo(() => ( + whenOutputNoneOptions.find(item => item.value === mentionConfig.null_strategy) ?? whenOutputNoneOptions[0] + ), [mentionConfig.null_strategy, whenOutputNoneOptions]) + + const handleNullStrategyChange = useCallback((item: Item) => { + if (typeof item.value !== 'string') + return + onMentionConfigChange({ + ...mentionConfig, + null_strategy: item.value as MentionConfig['null_strategy'], + }) + }, [mentionConfig, onMentionConfigChange]) + + const handleDefaultValueChange = useCallback((value: string) => { + const trimmed = value.trim() + let nextValue: unknown = value + if ((trimmed.startsWith('{') && trimmed.endsWith('}')) || (trimmed.startsWith('[') && trimmed.endsWith(']'))) { + try { + nextValue = JSON.parse(trimmed) + } + catch { + nextValue = value + } + } + + onMentionConfigChange({ + ...mentionConfig, + default_value: nextValue, + }) + }, [mentionConfig, onMentionConfigChange]) + const defaultValue = mentionConfig.default_value ?? '' + const shouldFormatDefaultValue = typeof defaultValue !== 'string' + + return ( +
+
+
+ {t('subGraphModal.internalStructure', { ns: 'workflow' })} +
+
+ {t('subGraphModal.internalStructureDesc', { ns: 'workflow', name: agentName })} +
+
+
+ +
+ {tabType === TabType.lastRun && ( +
+

+ {t('subGraphModal.noRunHistory', { ns: 'workflow' })} +

+
+ )} + {tabType === TabType.settings && ( +
+
+ + + +
+
+ + ( +
+
+ {selected && ( + + )} +
+
+
{item.name}
+
{item.description}
+
+
+ )} + /> +
+ )} + > +
+ {selectedWhenOutputNoneOption?.description && ( +
+ {selectedWhenOutputNoneOption.description} +
+ )} + {mentionConfig.null_strategy === 'use_default' && ( +
+ +
+ )} +
+ +
+
+ )} + + ) +} + +export default memo(ConfigPanel) diff --git a/web/app/components/sub-graph/components/sub-graph-children.tsx b/web/app/components/sub-graph/components/sub-graph-children.tsx new file mode 100644 index 0000000000..4a18a66a1c --- /dev/null +++ b/web/app/components/sub-graph/components/sub-graph-children.tsx @@ -0,0 +1,103 @@ +import type { FC } from 'react' +import type { MentionConfig } from '@/app/components/workflow/nodes/_base/types' +import type { NodeOutPutVar } from '@/app/components/workflow/types' +import { memo, useMemo } from 'react' +import { useStore as useReactFlowStore } from 'reactflow' +import { useShallow } from 'zustand/react/shallow' +import { useIsChatMode, useWorkflowVariables } from '@/app/components/workflow/hooks' +import Panel from '@/app/components/workflow/panel' +import { useStore } from '@/app/components/workflow/store' +import ConfigPanel from './config-panel' + +type SubGraphChildrenProps + = | { + variant: 'agent' + title: string + extractorNodeId: string + mentionConfig: MentionConfig + onMentionConfigChange: (config: MentionConfig) => void + } + | { + variant: 'assemble' + title: string + extractorNodeId: string + } + +const SubGraphChildren: FC = (props) => { + const { + variant, + title, + extractorNodeId, + } = props + const { getNodeAvailableVars } = useWorkflowVariables() + const isChatMode = useIsChatMode() + const nodePanelWidth = useStore(s => s.nodePanelWidth) + + const selectedNode = useReactFlowStore(useShallow((s) => { + return s.getNodes().find(node => node.data.selected) + })) + + const extractorNode = useReactFlowStore(useShallow((s) => { + return s.getNodes().find(node => node.id === extractorNodeId) + })) + + const availableNodes = useMemo(() => { + return extractorNode ? [extractorNode] : [] + }, [extractorNode]) + + const availableVars = useMemo(() => { + if (!extractorNode) + return [] + + const vars = getNodeAvailableVars({ + beforeNodes: [extractorNode], + isChatMode, + filterVar: () => true, + }) + return vars.filter(item => item.nodeId === extractorNode.id) + }, [extractorNode, getNodeAvailableVars, isChatMode]) + + const agentProps = variant === 'agent' ? props : null + + const panelRight = useMemo(() => { + if (!agentProps || selectedNode) + return null + + return ( +
+
+ +
+
+ ) + }, [agentProps, availableNodes, availableVars, extractorNodeId, nodePanelWidth, selectedNode, title]) + + if (variant === 'assemble') { + return ( + + ) + } + + return ( + + ) +} + +export default memo(SubGraphChildren) diff --git a/web/app/components/sub-graph/components/sub-graph-main.tsx b/web/app/components/sub-graph/components/sub-graph-main.tsx new file mode 100644 index 0000000000..a6abcb2d94 --- /dev/null +++ b/web/app/components/sub-graph/components/sub-graph-main.tsx @@ -0,0 +1,141 @@ +import type { FC } from 'react' +import type { Viewport } from 'reactflow' +import type { SyncWorkflowDraft, SyncWorkflowDraftCallback } from '../types' +import type { Shape as HooksStoreShape } from '@/app/components/workflow/hooks-store' +import type { MentionConfig } from '@/app/components/workflow/nodes/_base/types' +import type { Edge, Node } from '@/app/components/workflow/types' +import { useCallback, useMemo } from 'react' +import { useStoreApi } from 'reactflow' +import { WorkflowWithInnerContext } from '@/app/components/workflow' +import { useSetWorkflowVarsWithValue } from '@/app/components/workflow/hooks/use-fetch-workflow-inspect-vars' +import { useInspectVarsCrudCommon } from '@/app/components/workflow/hooks/use-inspect-vars-crud-common' +import { BlockEnum } from '@/app/components/workflow/types' +import { FlowType } from '@/types/common' +import { useAvailableNodesMetaData } from '../hooks' +import SubGraphChildren from './sub-graph-children' + +type SubGraphMainBaseProps = { + nodes: Node[] + edges: Edge[] + viewport: Viewport + title: string + extractorNodeId: string + configsMap?: HooksStoreShape['configsMap'] + selectableNodeTypes?: BlockEnum[] + onSave?: (nodes: Node[], edges: Edge[]) => void + onSyncWorkflowDraft?: SyncWorkflowDraft +} + +type SubGraphMainProps + = | (SubGraphMainBaseProps & { + variant: 'agent' + mentionConfig: MentionConfig + onMentionConfigChange: (config: MentionConfig) => void + }) + | (SubGraphMainBaseProps & { + variant: 'assemble' + }) + +const SubGraphMain: FC = (props) => { + const { + nodes, + edges, + viewport, + variant, + title, + extractorNodeId, + configsMap, + selectableNodeTypes, + onSave, + onSyncWorkflowDraft, + } = props + const reactFlowStore = useStoreApi() + const availableNodesMetaData = useAvailableNodesMetaData() + const flowType = configsMap?.flowType ?? FlowType.appFlow + const flowId = configsMap?.flowId ?? '' + const { fetchInspectVars } = useSetWorkflowVarsWithValue({ + flowType, + flowId, + }) + const inspectVarsCrud = useInspectVarsCrudCommon({ + flowType, + flowId, + }) + + const handleSyncSubGraphDraft = useCallback(async () => { + const { getNodes, edges } = reactFlowStore.getState() + await onSave?.(getNodes() as Node[], edges as Edge[]) + }, [onSave, reactFlowStore]) + + const handleSyncWorkflowDraft = useCallback(async ( + notRefreshWhenSyncError?: boolean, + callback?: SyncWorkflowDraftCallback, + ) => { + try { + await handleSyncSubGraphDraft() + if (onSyncWorkflowDraft) { + await onSyncWorkflowDraft(notRefreshWhenSyncError, callback) + return + } + callback?.onSuccess?.() + } + catch { + callback?.onError?.() + } + finally { + callback?.onSettled?.() + } + }, [handleSyncSubGraphDraft, onSyncWorkflowDraft]) + + const resolvedSelectableTypes = useMemo(() => { + if (selectableNodeTypes && selectableNodeTypes.length > 0) + return selectableNodeTypes + return variant === 'agent' ? [BlockEnum.LLM] : [BlockEnum.Code] + }, [selectableNodeTypes, variant]) + + const hooksStore = useMemo(() => ({ + interactionMode: 'subgraph', + subGraphSelectableNodeTypes: resolvedSelectableTypes, + availableNodesMetaData, + configsMap, + fetchInspectVars, + ...inspectVarsCrud, + doSyncWorkflowDraft: handleSyncWorkflowDraft, + syncWorkflowDraftWhenPageClose: handleSyncSubGraphDraft, + }), [availableNodesMetaData, configsMap, fetchInspectVars, handleSyncSubGraphDraft, handleSyncWorkflowDraft, inspectVarsCrud, resolvedSelectableTypes]) + + const subGraphChildren = variant === 'agent' + ? ( + + ) + : ( + + ) + + return ( + + {subGraphChildren} + + ) +} + +export default SubGraphMain diff --git a/web/app/components/sub-graph/hooks/index.ts b/web/app/components/sub-graph/hooks/index.ts new file mode 100644 index 0000000000..71ef209f64 --- /dev/null +++ b/web/app/components/sub-graph/hooks/index.ts @@ -0,0 +1,2 @@ +export { useAvailableNodesMetaData } from './use-available-nodes-meta-data' +export { useSubGraphNodes } from './use-sub-graph-nodes' diff --git a/web/app/components/sub-graph/hooks/use-available-nodes-meta-data.ts b/web/app/components/sub-graph/hooks/use-available-nodes-meta-data.ts new file mode 100644 index 0000000000..f9a843e7a4 --- /dev/null +++ b/web/app/components/sub-graph/hooks/use-available-nodes-meta-data.ts @@ -0,0 +1,43 @@ +import type { AvailableNodesMetaData } from '@/app/components/workflow/hooks-store/store' +import { useMemo } from 'react' +import { useTranslation } from 'react-i18next' +import { WORKFLOW_COMMON_NODES } from '@/app/components/workflow/constants/node' +import { BlockEnum } from '@/app/components/workflow/types' + +export const useAvailableNodesMetaData = () => { + const { t } = useTranslation() + + const availableNodesMetaData = useMemo(() => WORKFLOW_COMMON_NODES.map((node) => { + const { metaData } = node + const title = t(`blocks.${metaData.type}`, { ns: 'workflow' }) + const description = t(`blocksAbout.${metaData.type}`, { ns: 'workflow' }) + return { + ...node, + metaData: { + ...metaData, + title, + description, + }, + defaultValue: { + ...node.defaultValue, + type: metaData.type, + title, + }, + } + }), [t]) + + const availableNodesMetaDataMap = useMemo(() => availableNodesMetaData.reduce((acc, node) => { + acc![node.metaData.type] = node + return acc + }, {} as AvailableNodesMetaData['nodesMap']), [availableNodesMetaData]) + + return useMemo(() => { + return { + nodes: availableNodesMetaData, + nodesMap: { + ...availableNodesMetaDataMap, + [BlockEnum.VariableAssigner]: availableNodesMetaDataMap?.[BlockEnum.VariableAggregator], + }, + } + }, [availableNodesMetaData, availableNodesMetaDataMap]) +} diff --git a/web/app/components/sub-graph/hooks/use-sub-graph-nodes.ts b/web/app/components/sub-graph/hooks/use-sub-graph-nodes.ts new file mode 100644 index 0000000000..c2a868f05e --- /dev/null +++ b/web/app/components/sub-graph/hooks/use-sub-graph-nodes.ts @@ -0,0 +1,20 @@ +import type { Edge, Node } from '@/app/components/workflow/types' +import { useMemo } from 'react' +import { initialEdges, initialNodes } from '@/app/components/workflow/utils' + +export const useSubGraphNodes = (nodes: Node[], edges: Edge[]) => { + const processedNodes = useMemo( + () => initialNodes(nodes, edges), + [nodes, edges], + ) + + const processedEdges = useMemo( + () => initialEdges(edges, nodes), + [edges, nodes], + ) + + return { + nodes: processedNodes, + edges: processedEdges, + } +} diff --git a/web/app/components/sub-graph/index.tsx b/web/app/components/sub-graph/index.tsx new file mode 100644 index 0000000000..28adbac608 --- /dev/null +++ b/web/app/components/sub-graph/index.tsx @@ -0,0 +1,274 @@ +import type { FC } from 'react' +import type { Viewport } from 'reactflow' +import type { SubGraphProps } from './types' +import type { InjectWorkflowStoreSliceFn } from '@/app/components/workflow/store' +import type { PromptItem, PromptTemplateItem } from '@/app/components/workflow/types' +import { memo, useEffect, useMemo } from 'react' +import WorkflowWithDefaultContext from '@/app/components/workflow' +import { NODE_WIDTH_X_OFFSET, START_INITIAL_POSITION } from '@/app/components/workflow/constants' +import { WorkflowContextProvider } from '@/app/components/workflow/context' +import { CUSTOM_SUB_GRAPH_START_NODE } from '@/app/components/workflow/nodes/sub-graph-start/constants' +import { useStore } from '@/app/components/workflow/store' +import { BlockEnum, EditionType, isPromptMessageContext, PromptRole } from '@/app/components/workflow/types' +import SubGraphMain from './components/sub-graph-main' +import { useSubGraphNodes } from './hooks' +import { createSubGraphSlice } from './store' + +const SUB_GRAPH_EDGE_GAP = 160 +const SUB_GRAPH_ENTRY_POSITION = { + x: START_INITIAL_POSITION.x, + y: 150, +} +const SUB_GRAPH_EXTRACTOR_POSITION = { + x: SUB_GRAPH_ENTRY_POSITION.x + NODE_WIDTH_X_OFFSET - SUB_GRAPH_EDGE_GAP, + y: SUB_GRAPH_ENTRY_POSITION.y, +} + +const defaultViewport: Viewport = { + x: SUB_GRAPH_EDGE_GAP, + y: 50, + zoom: 1.3, +} + +const SubGraphContent: FC = (props) => { + const { + toolNodeId, + paramKey, + toolParamValue, + parentAvailableNodes, + parentAvailableVars, + configsMap, + selectableNodeTypes, + onSave, + onSyncWorkflowDraft, + } = props + + const isAgentVariant = props.variant === 'agent' + const sourceTitle = isAgentVariant ? (props.agentName || '') : (props.title || '') + const resolvedAgentNodeId = isAgentVariant ? props.agentNodeId : '' + + const setParentAvailableVars = useStore(state => state.setParentAvailableVars) + const setParentAvailableNodes = useStore(state => state.setParentAvailableNodes) + + useEffect(() => { + setParentAvailableVars?.(parentAvailableVars || []) + setParentAvailableNodes?.(parentAvailableNodes || []) + }, [parentAvailableNodes, parentAvailableVars, setParentAvailableNodes, setParentAvailableVars]) + + const promptText = useMemo(() => { + if (!isAgentVariant || !toolParamValue) + return '' + // Reason: escape agent id before building a regex pattern. + const escapedAgentId = resolvedAgentNodeId.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') + const leadingPattern = new RegExp(`^\\{\\{[@#]${escapedAgentId}\\.context[@#]\\}\\}`) + return toolParamValue.replace(leadingPattern, '') + }, [isAgentVariant, resolvedAgentNodeId, toolParamValue]) + + const startNode = useMemo(() => { + if (!isAgentVariant) { + return { + id: 'subgraph-source', + type: CUSTOM_SUB_GRAPH_START_NODE, + position: SUB_GRAPH_ENTRY_POSITION, + data: { + type: BlockEnum.Start, + title: sourceTitle, + desc: '', + selected: false, + iconType: 'assemble', + variables: [], + }, + selected: false, + selectable: false, + draggable: false, + connectable: false, + focusable: false, + deletable: false, + } + } + + return { + id: 'subgraph-source', + type: CUSTOM_SUB_GRAPH_START_NODE, + position: SUB_GRAPH_ENTRY_POSITION, + data: { + type: BlockEnum.Start, + title: sourceTitle, + desc: '', + selected: false, + iconType: 'agent', + variables: [], + }, + selected: false, + selectable: false, + draggable: false, + connectable: false, + focusable: false, + deletable: false, + } + }, [isAgentVariant, sourceTitle]) + + const extractorDisplayNode = useMemo(() => { + if (isAgentVariant) { + const extractorNode = props.extractorNode + if (!extractorNode) + return null + + const applyPromptText = (item: PromptItem) => { + if (item.edition_type === EditionType.jinja2) { + return { + ...item, + text: promptText, + jinja2_text: promptText, + } + } + return { ...item, text: promptText } + } + + const nextPromptTemplate = (() => { + const template = extractorNode.data.prompt_template + if (!Array.isArray(template)) + return applyPromptText(template as PromptItem) + + const userIndex = template.findIndex( + item => !isPromptMessageContext(item) && (item as PromptItem).role === PromptRole.user, + ) + if (userIndex >= 0) { + return template.map((item, index) => { + if (index !== userIndex) + return item + return applyPromptText(item as PromptItem) + }) as PromptTemplateItem[] + } + + const useJinja = template.some( + item => !isPromptMessageContext(item) && (item as PromptItem).edition_type === EditionType.jinja2, + ) + const defaultUserPrompt: PromptItem = useJinja + ? { + role: PromptRole.user, + text: promptText, + jinja2_text: promptText, + edition_type: EditionType.jinja2, + } + : { role: PromptRole.user, text: promptText } + return [...template, defaultUserPrompt] as PromptTemplateItem[] + })() + + return { + ...extractorNode, + hidden: false, + selected: false, + position: SUB_GRAPH_EXTRACTOR_POSITION, + data: { + ...extractorNode.data, + selected: false, + prompt_template: nextPromptTemplate, + }, + } + } + + const extractorNode = props.extractorNode + if (!extractorNode) + return null + + return { + ...extractorNode, + hidden: false, + selected: false, + position: SUB_GRAPH_EXTRACTOR_POSITION, + data: { + ...extractorNode.data, + selected: false, + }, + } + }, [isAgentVariant, promptText, props.extractorNode]) + + const nodesSource = useMemo(() => { + if (!extractorDisplayNode) + return [startNode] + + return [startNode, extractorDisplayNode] + }, [extractorDisplayNode, startNode]) + + const edgesSource = useMemo(() => { + if (!extractorDisplayNode) + return [] + + return [ + { + id: `${startNode.id}-${extractorDisplayNode.id}`, + source: startNode.id, + sourceHandle: 'source', + target: extractorDisplayNode.id, + targetHandle: 'target', + type: 'custom', + selectable: false, + data: { + sourceType: BlockEnum.Start, + targetType: isAgentVariant ? BlockEnum.LLM : BlockEnum.Code, + _isTemp: true, + _isSubGraphTemp: true, + }, + }, + ] + }, [extractorDisplayNode, isAgentVariant, startNode]) + + const { nodes, edges } = useSubGraphNodes(nodesSource, edgesSource) + + if (isAgentVariant) { + return ( + + + + ) + } + + return ( + + + + ) +} + +const SubGraph: FC = (props) => { + return ( + + + + ) +} + +export default memo(SubGraph) diff --git a/web/app/components/sub-graph/store/index.ts b/web/app/components/sub-graph/store/index.ts new file mode 100644 index 0000000000..0701cdc3f2 --- /dev/null +++ b/web/app/components/sub-graph/store/index.ts @@ -0,0 +1,12 @@ +import type { CreateSubGraphSlice, SubGraphSliceShape } from '../types' + +const initialState: Omit = { + parentAvailableVars: [], + parentAvailableNodes: [], +} + +export const createSubGraphSlice: CreateSubGraphSlice = set => ({ + ...initialState, + setParentAvailableVars: vars => set(() => ({ parentAvailableVars: vars })), + setParentAvailableNodes: nodes => set(() => ({ parentAvailableNodes: nodes })), +}) diff --git a/web/app/components/sub-graph/types.ts b/web/app/components/sub-graph/types.ts new file mode 100644 index 0000000000..94e3b4584d --- /dev/null +++ b/web/app/components/sub-graph/types.ts @@ -0,0 +1,58 @@ +import type { StateCreator } from 'zustand' +import type { Shape as HooksStoreShape } from '@/app/components/workflow/hooks-store' +import type { MentionConfig } from '@/app/components/workflow/nodes/_base/types' +import type { CodeNodeType } from '@/app/components/workflow/nodes/code/types' +import type { LLMNodeType } from '@/app/components/workflow/nodes/llm/types' +import type { BlockEnum, Edge, Node, NodeOutPutVar, ValueSelector } from '@/app/components/workflow/types' + +export type SyncWorkflowDraftCallback = { + onSuccess?: () => void + onError?: () => void + onSettled?: () => void +} + +export type SyncWorkflowDraft = ( + notRefreshWhenSyncError?: boolean, + callback?: SyncWorkflowDraftCallback, +) => Promise + +export type SubGraphVariant = 'agent' | 'assemble' + +type BaseSubGraphProps = { + toolNodeId: string + paramKey: string + configsMap?: HooksStoreShape['configsMap'] + toolParamValue?: string + parentAvailableNodes?: Node[] + parentAvailableVars?: NodeOutPutVar[] + selectableNodeTypes?: BlockEnum[] + onSave?: (nodes: Node[], edges: Edge[]) => void + onSyncWorkflowDraft?: SyncWorkflowDraft +} + +export type AgentSubGraphProps = BaseSubGraphProps & { + variant: 'agent' + sourceVariable: ValueSelector + agentNodeId: string + agentName: string + mentionConfig: MentionConfig + onMentionConfigChange: (config: MentionConfig) => void + extractorNode?: Node +} + +export type AssembleSubGraphProps = BaseSubGraphProps & { + variant: 'assemble' + title: string + extractorNode?: Node +} + +export type SubGraphProps = AgentSubGraphProps | AssembleSubGraphProps + +export type SubGraphSliceShape = { + parentAvailableVars: NodeOutPutVar[] + parentAvailableNodes: Node[] + setParentAvailableVars: (vars: NodeOutPutVar[]) => void + setParentAvailableNodes: (nodes: Node[]) => void +} + +export type CreateSubGraphSlice = StateCreator diff --git a/web/app/components/workflow/block-icon.tsx b/web/app/components/workflow/block-icon.tsx index 9f9dbdbc83..980f86cd80 100644 --- a/web/app/components/workflow/block-icon.tsx +++ b/web/app/components/workflow/block-icon.tsx @@ -1,6 +1,7 @@ import type { FC } from 'react' import { memo } from 'react' import AppIcon from '@/app/components/base/app-icon' +import { Folder as FolderLine } from '@/app/components/base/icons/src/vender/line/files' import { Agent, Answer, @@ -56,6 +57,7 @@ const DEFAULT_ICON_MAP: Record = { [BlockEnum.VariableAssigner]: 'bg-util-colors-blue-blue-500', [BlockEnum.VariableAggregator]: 'bg-util-colors-blue-blue-500', [BlockEnum.Tool]: 'bg-util-colors-blue-blue-500', + [BlockEnum.Group]: 'bg-util-colors-blue-blue-500', [BlockEnum.Assigner]: 'bg-util-colors-blue-blue-500', [BlockEnum.ParameterExtractor]: 'bg-util-colors-blue-blue-500', [BlockEnum.DocExtractor]: 'bg-util-colors-green-green-500', diff --git a/web/app/components/workflow/constants.ts b/web/app/components/workflow/constants.ts index 04846ca98e..27ca318503 100644 --- a/web/app/components/workflow/constants.ts +++ b/web/app/components/workflow/constants.ts @@ -132,6 +132,11 @@ export const SUPPORT_OUTPUT_VARS_NODE = [ ] export const AGENT_OUTPUT_STRUCT: Var[] = [ + { + variable: 'context', + type: VarType.arrayObject, + schemaType: 'List[promptMessage]', + }, { variable: 'usage', type: VarType.object, @@ -143,6 +148,11 @@ export const LLM_OUTPUT_STRUCT: Var[] = [ variable: 'text', type: VarType.string, }, + { + variable: 'context', + type: VarType.arrayObject, + schemaType: 'List[promptMessage]', + }, { variable: 'reasoning_content', type: VarType.string, diff --git a/web/app/components/workflow/custom-edge.tsx b/web/app/components/workflow/custom-edge.tsx index 0440ca0c3e..f727d1fa0d 100644 --- a/web/app/components/workflow/custom-edge.tsx +++ b/web/app/components/workflow/custom-edge.tsx @@ -25,7 +25,8 @@ import { useAvailableBlocks, useNodesInteractions, } from './hooks' -import { NodeRunningStatus } from './types' +import { useHooksStore } from './hooks-store' +import { BlockEnum, NodeRunningStatus } from './types' import { getEdgeColor } from './utils' const CustomEdge = ({ @@ -56,6 +57,8 @@ const CustomEdge = ({ }) const [open, setOpen] = useState(false) const { handleNodeAdd } = useNodesInteractions() + const interactionMode = useHooksStore(s => s.interactionMode) + const allowGraphActions = interactionMode !== 'subgraph' const { availablePrevBlocks } = useAvailableBlocks((data as Edge['data'])!.targetType, (data as Edge['data'])?.isInIteration || (data as Edge['data'])?.isInLoop) const { availableNextBlocks } = useAvailableBlocks((data as Edge['data'])!.sourceType, (data as Edge['data'])?.isInIteration || (data as Edge['data'])?.isInLoop) const { @@ -136,35 +139,37 @@ const CustomEdge = ({ stroke, strokeWidth: 2, opacity: data._dimmed ? 0.3 : (data._waitingRun ? 0.7 : 1), - strokeDasharray: data._isTemp ? '8 8' : undefined, + strokeDasharray: (data._isTemp && !data._isSubGraphTemp && data.sourceType !== BlockEnum.Group && data.targetType !== BlockEnum.Group) ? '8 8' : undefined, }} /> - -
- 'hover:scale-150 transition-all'} - /> -
-
+ {allowGraphActions && ( + +
+ 'hover:scale-150 transition-all'} + /> +
+
+ )} ) } diff --git a/web/app/components/workflow/custom-group-node/constants.ts b/web/app/components/workflow/custom-group-node/constants.ts new file mode 100644 index 0000000000..5b65aaa80b --- /dev/null +++ b/web/app/components/workflow/custom-group-node/constants.ts @@ -0,0 +1,11 @@ +export const CUSTOM_GROUP_NODE = 'custom-group' +export const CUSTOM_GROUP_INPUT_NODE = 'custom-group-input' +export const CUSTOM_GROUP_EXIT_PORT_NODE = 'custom-group-exit-port' + +export const GROUP_CHILDREN_Z_INDEX = 1002 + +export const UI_ONLY_GROUP_NODE_TYPES = new Set([ + CUSTOM_GROUP_NODE, + CUSTOM_GROUP_INPUT_NODE, + CUSTOM_GROUP_EXIT_PORT_NODE, +]) diff --git a/web/app/components/workflow/custom-group-node/custom-group-exit-port-node.tsx b/web/app/components/workflow/custom-group-node/custom-group-exit-port-node.tsx new file mode 100644 index 0000000000..969cf69935 --- /dev/null +++ b/web/app/components/workflow/custom-group-node/custom-group-exit-port-node.tsx @@ -0,0 +1,54 @@ +'use client' + +import type { FC } from 'react' +import type { CustomGroupExitPortNodeData } from './types' +import { memo } from 'react' +import { Handle, Position } from 'reactflow' +import { cn } from '@/utils/classnames' + +type CustomGroupExitPortNodeProps = { + id: string + data: CustomGroupExitPortNodeData +} + +const CustomGroupExitPortNode: FC = ({ id: _id, data }) => { + return ( +
+ {/* Target handle - receives internal connections from leaf nodes */} + + + {/* Source handle - connects to external nodes */} + + + {/* Icon */} + + + +
+ ) +} + +export default memo(CustomGroupExitPortNode) diff --git a/web/app/components/workflow/custom-group-node/custom-group-input-node.tsx b/web/app/components/workflow/custom-group-node/custom-group-input-node.tsx new file mode 100644 index 0000000000..3476b3d154 --- /dev/null +++ b/web/app/components/workflow/custom-group-node/custom-group-input-node.tsx @@ -0,0 +1,55 @@ +'use client' + +import type { FC } from 'react' +import type { CustomGroupInputNodeData } from './types' +import { memo } from 'react' +import { Handle, Position } from 'reactflow' +import { cn } from '@/utils/classnames' + +type CustomGroupInputNodeProps = { + id: string + data: CustomGroupInputNodeData +} + +const CustomGroupInputNode: FC = ({ id: _id, data }) => { + return ( +
+ {/* Target handle - receives external connections */} + + + {/* Source handle - connects to entry nodes */} + + + {/* Icon */} + + + + +
+ ) +} + +export default memo(CustomGroupInputNode) diff --git a/web/app/components/workflow/custom-group-node/custom-group-node.tsx b/web/app/components/workflow/custom-group-node/custom-group-node.tsx new file mode 100644 index 0000000000..c51418a5de --- /dev/null +++ b/web/app/components/workflow/custom-group-node/custom-group-node.tsx @@ -0,0 +1,94 @@ +'use client' + +import type { FC } from 'react' +import type { CustomGroupNodeData } from './types' +import { memo } from 'react' +import { Handle, Position } from 'reactflow' +import { Plus02 } from '@/app/components/base/icons/src/vender/line/general' +import { cn } from '@/utils/classnames' + +type CustomGroupNodeProps = { + id: string + data: CustomGroupNodeData +} + +const CustomGroupNode: FC = ({ data }) => { + const { group } = data + const exitPorts = group.exitPorts ?? [] + const connectedSourceHandleIds = data._connectedSourceHandleIds ?? [] + + return ( +
+ {/* Group Header */} +
+ + {group.title} + +
+ + {/* Target handle for incoming connections */} + + +
+ {exitPorts.map((port, index) => { + const connected = connectedSourceHandleIds.includes(port.portNodeId) + + return ( +
+
+ {port.name} +
+ + + + {/* Visual "+" indicator (styling aligned with existing branch handles) */} + +
+ ) + })} +
+
+ ) +} + +export default memo(CustomGroupNode) diff --git a/web/app/components/workflow/custom-group-node/index.ts b/web/app/components/workflow/custom-group-node/index.ts new file mode 100644 index 0000000000..af8fa042e8 --- /dev/null +++ b/web/app/components/workflow/custom-group-node/index.ts @@ -0,0 +1,19 @@ +export { + CUSTOM_GROUP_EXIT_PORT_NODE, + CUSTOM_GROUP_INPUT_NODE, + CUSTOM_GROUP_NODE, + GROUP_CHILDREN_Z_INDEX, + UI_ONLY_GROUP_NODE_TYPES, +} from './constants' + +export { default as CustomGroupExitPortNode } from './custom-group-exit-port-node' + +export { default as CustomGroupInputNode } from './custom-group-input-node' +export { default as CustomGroupNode } from './custom-group-node' +export type { + CustomGroupExitPortNodeData, + CustomGroupInputNodeData, + CustomGroupNodeData, + ExitPortInfo, + GroupMember, +} from './types' diff --git a/web/app/components/workflow/custom-group-node/types.ts b/web/app/components/workflow/custom-group-node/types.ts new file mode 100644 index 0000000000..baf7b2362a --- /dev/null +++ b/web/app/components/workflow/custom-group-node/types.ts @@ -0,0 +1,82 @@ +import type { BlockEnum } from '../types' + +/** + * Exit port info stored in Group node + */ +export type ExitPortInfo = { + portNodeId: string + leafNodeId: string + sourceHandle: string + name: string +} + +/** + * Group node data structure + * node.type = 'custom-group' + * node.data.type = '' (empty string to bypass backend NodeType validation) + */ +export type CustomGroupNodeData = { + type: '' // Empty string bypasses backend NodeType validation + title: string + desc?: string + _connectedSourceHandleIds?: string[] + _connectedTargetHandleIds?: string[] + group: { + groupId: string + title: string + memberNodeIds: string[] + entryNodeIds: string[] + inputNodeId: string + exitPorts: ExitPortInfo[] + collapsed: boolean + } + width?: number + height?: number + selected?: boolean + _isTempNode?: boolean +} + +/** + * Group Input node data structure + * node.type = 'custom-group-input' + * node.data.type = '' + */ +export type CustomGroupInputNodeData = { + type: '' + title: string + desc?: string + groupInput: { + groupId: string + title: string + } + selected?: boolean + _isTempNode?: boolean +} + +/** + * Exit Port node data structure + * node.type = 'custom-group-exit-port' + * node.data.type = '' + */ +export type CustomGroupExitPortNodeData = { + type: '' + title: string + desc?: string + exitPort: { + groupId: string + leafNodeId: string + sourceHandle: string + name: string + } + selected?: boolean + _isTempNode?: boolean +} + +/** + * Member node info for display + */ +export type GroupMember = { + id: string + type: BlockEnum + label?: string +} diff --git a/web/app/components/workflow/hooks-store/store.ts b/web/app/components/workflow/hooks-store/store.ts index 44014fc0d7..716d77b7a7 100644 --- a/web/app/components/workflow/hooks-store/store.ts +++ b/web/app/components/workflow/hooks-store/store.ts @@ -23,6 +23,7 @@ export type AvailableNodesMetaData = { nodesMap?: Record> } export type CommonHooksFnMap = { + interactionMode?: 'default' | 'subgraph' doSyncWorkflowDraft: ( notRefreshWhenSyncError?: boolean, callback?: { @@ -45,6 +46,7 @@ export type CommonHooksFnMap = { handleWorkflowTriggerWebhookRunInWorkflow: (params: { nodeId: string }) => void handleWorkflowTriggerPluginRunInWorkflow: (nodeId?: string) => void handleWorkflowRunAllTriggersInWorkflow: (nodeIds: string[]) => void + subGraphSelectableNodeTypes?: BlockEnum[] availableNodesMetaData?: AvailableNodesMetaData getWorkflowRunAndTraceUrl: (runId?: string) => { runUrl: string, traceUrl: string } exportCheck?: () => Promise @@ -76,6 +78,7 @@ export type Shape = { } & CommonHooksFnMap export const createHooksStore = ({ + interactionMode = 'default', doSyncWorkflowDraft = async () => noop(), syncWorkflowDraftWhenPageClose = noop, handleRefreshWorkflowDraft = noop, @@ -91,6 +94,7 @@ export const createHooksStore = ({ handleWorkflowTriggerWebhookRunInWorkflow = noop, handleWorkflowTriggerPluginRunInWorkflow = noop, handleWorkflowRunAllTriggersInWorkflow = noop, + subGraphSelectableNodeTypes, availableNodesMetaData = { nodes: [], }, @@ -118,6 +122,7 @@ export const createHooksStore = ({ }: Partial) => { return createStore(set => ({ refreshAll: props => set(state => ({ ...state, ...props })), + interactionMode, doSyncWorkflowDraft, syncWorkflowDraftWhenPageClose, handleRefreshWorkflowDraft, @@ -133,6 +138,7 @@ export const createHooksStore = ({ handleWorkflowTriggerWebhookRunInWorkflow, handleWorkflowTriggerPluginRunInWorkflow, handleWorkflowRunAllTriggersInWorkflow, + subGraphSelectableNodeTypes, availableNodesMetaData, getWorkflowRunAndTraceUrl, exportCheck, diff --git a/web/app/components/workflow/hooks/use-checklist.ts b/web/app/components/workflow/hooks/use-checklist.ts index 5a9e4dacb7..a721951390 100644 --- a/web/app/components/workflow/hooks/use-checklist.ts +++ b/web/app/components/workflow/hooks/use-checklist.ts @@ -197,7 +197,8 @@ export const useChecklist = (nodes: Node[], edges: Edge[]) => { // Start nodes and Trigger nodes should not show unConnected error if they have validation errors // or if they are valid start nodes (even without incoming connections) const isStartNodeMeta = nodesExtraData?.[node.data.type as BlockEnum]?.metaData.isStart ?? false - const canSkipConnectionCheck = shouldCheckStartNode ? isStartNodeMeta : true + const isSubGraphNode = Boolean((node.data as { parent_node_id?: string }).parent_node_id) + const canSkipConnectionCheck = isSubGraphNode || (shouldCheckStartNode ? isStartNodeMeta : true) const isUnconnected = !validNodes.find(n => n.id === node.id) const shouldShowError = errorMessage || (isUnconnected && !canSkipConnectionCheck) @@ -390,7 +391,8 @@ export const useChecklistBeforePublish = () => { } const isStartNodeMeta = nodesExtraData?.[node.data.type as BlockEnum]?.metaData.isStart ?? false - const canSkipConnectionCheck = shouldCheckStartNode ? isStartNodeMeta : true + const isSubGraphNode = Boolean((node.data as { parent_node_id?: string }).parent_node_id) + const canSkipConnectionCheck = isSubGraphNode || (shouldCheckStartNode ? isStartNodeMeta : true) const isUnconnected = !validNodes.find(n => n.id === node.id) if (isUnconnected && !canSkipConnectionCheck) { diff --git a/web/app/components/workflow/hooks/use-edges-interactions.ts b/web/app/components/workflow/hooks/use-edges-interactions.ts index 5104b47ef4..6d17f3ce75 100644 --- a/web/app/components/workflow/hooks/use-edges-interactions.ts +++ b/web/app/components/workflow/hooks/use-edges-interactions.ts @@ -10,6 +10,7 @@ import { useCallback } from 'react' import { useStoreApi, } from 'reactflow' +import { BlockEnum } from '../types' import { getNodesConnectedSourceOrTargetHandleIdsMap } from '../utils' import { useNodesSyncDraft } from './use-nodes-sync-draft' import { useNodesReadOnly } from './use-workflow' @@ -108,6 +109,50 @@ export const useEdgesInteractions = () => { return const currentEdge = edges[currentEdgeIndex] const nodes = getNodes() + + // collect edges to delete (including corresponding real edges for temp edges) + const edgesToDelete: Set = new Set([currentEdge.id]) + + // if deleting a temp edge connected to a group, also delete the corresponding real hidden edge + if (currentEdge.data?._isTemp) { + const groupNode = nodes.find(n => + n.data.type === BlockEnum.Group + && (n.id === currentEdge.source || n.id === currentEdge.target), + ) + + if (groupNode) { + const memberIds = new Set((groupNode.data.members || []).map((m: { id: string }) => m.id)) + + if (currentEdge.target === groupNode.id) { + // inbound temp edge: find real edge with same source, target is a head node + edges.forEach((edge) => { + if (edge.source === currentEdge.source + && memberIds.has(edge.target) + && edge.sourceHandle === currentEdge.sourceHandle) { + edgesToDelete.add(edge.id) + } + }) + } + else if (currentEdge.source === groupNode.id) { + // outbound temp edge: sourceHandle format is "leafNodeId-originalHandle" + const sourceHandle = currentEdge.sourceHandle || '' + const lastDashIndex = sourceHandle.lastIndexOf('-') + if (lastDashIndex > 0) { + const leafNodeId = sourceHandle.substring(0, lastDashIndex) + const originalHandle = sourceHandle.substring(lastDashIndex + 1) + + edges.forEach((edge) => { + if (edge.source === leafNodeId + && edge.target === currentEdge.target + && (edge.sourceHandle || 'source') === originalHandle) { + edgesToDelete.add(edge.id) + } + }) + } + } + } + } + const nodesConnectedSourceOrTargetHandleIdsMap = getNodesConnectedSourceOrTargetHandleIdsMap( [ { type: 'remove', edge: currentEdge }, @@ -126,7 +171,10 @@ export const useEdgesInteractions = () => { }) setNodes(newNodes) const newEdges = produce(edges, (draft) => { - draft.splice(currentEdgeIndex, 1) + for (let i = draft.length - 1; i >= 0; i--) { + if (edgesToDelete.has(draft[i].id)) + draft.splice(i, 1) + } }) setEdges(newEdges) handleSyncWorkflowDraft() diff --git a/web/app/components/workflow/hooks/use-make-group.ts b/web/app/components/workflow/hooks/use-make-group.ts new file mode 100644 index 0000000000..321f0e393a --- /dev/null +++ b/web/app/components/workflow/hooks/use-make-group.ts @@ -0,0 +1,138 @@ +import type { PredecessorHandle } from '../utils' +import { useMemo } from 'react' +import { useStore as useReactFlowStore } from 'reactflow' +import { shallow } from 'zustand/shallow' +import { BlockEnum } from '../types' +import { getCommonPredecessorHandles } from '../utils' + +export type MakeGroupAvailability = { + canMakeGroup: boolean + branchEntryNodeIds: string[] + commonPredecessorHandle?: PredecessorHandle +} + +type MinimalEdge = { + id: string + source: string + sourceHandle: string + target: string +} + +/** + * Pure function to check if the selected nodes can be grouped. + * Can be called both from React hooks and imperatively. + */ +export const checkMakeGroupAvailability = ( + selectedNodeIds: string[], + edges: MinimalEdge[], + hasGroupNode = false, +): MakeGroupAvailability => { + if (selectedNodeIds.length <= 1 || hasGroupNode) { + return { + canMakeGroup: false, + branchEntryNodeIds: [], + commonPredecessorHandle: undefined, + } + } + + const selectedNodeIdSet = new Set(selectedNodeIds) + const inboundFromOutsideTargets = new Set() + const incomingEdgeCounts = new Map() + const incomingFromSelectedTargets = new Set() + + edges.forEach((edge) => { + // Only consider edges whose target is inside the selected subgraph. + if (!selectedNodeIdSet.has(edge.target)) + return + + incomingEdgeCounts.set(edge.target, (incomingEdgeCounts.get(edge.target) ?? 0) + 1) + + if (selectedNodeIdSet.has(edge.source)) + incomingFromSelectedTargets.add(edge.target) + else + inboundFromOutsideTargets.add(edge.target) + }) + + // Branch head (entry) definition: + // - has at least one incoming edge + // - and all its incoming edges come from outside the selected subgraph + const branchEntryNodeIds = selectedNodeIds.filter((nodeId) => { + const incomingEdgeCount = incomingEdgeCounts.get(nodeId) ?? 0 + if (incomingEdgeCount === 0) + return false + + return !incomingFromSelectedTargets.has(nodeId) + }) + + // No branch head means we cannot tell how many branches are represented by this selection. + if (branchEntryNodeIds.length === 0) { + return { + canMakeGroup: false, + branchEntryNodeIds, + commonPredecessorHandle: undefined, + } + } + + // Guardrail: disallow side entrances into the selected subgraph. + // If an outside node connects to a non-entry node inside the selection, the grouping boundary is ambiguous. + const branchEntryNodeIdSet = new Set(branchEntryNodeIds) + const hasInboundToNonEntryNode = Array.from(inboundFromOutsideTargets).some(nodeId => !branchEntryNodeIdSet.has(nodeId)) + + if (hasInboundToNonEntryNode) { + return { + canMakeGroup: false, + branchEntryNodeIds, + commonPredecessorHandle: undefined, + } + } + + // Compare the branch heads by their common predecessor "handler" (source node + sourceHandle). + // This is required for multi-handle nodes like If-Else / Classifier where different branches use different handles. + const commonPredecessorHandles = getCommonPredecessorHandles( + branchEntryNodeIds, + // Only look at edges coming from outside the selected subgraph when determining the "pre" handler. + edges.filter(edge => !selectedNodeIdSet.has(edge.source)), + ) + + if (commonPredecessorHandles.length !== 1) { + return { + canMakeGroup: false, + branchEntryNodeIds, + commonPredecessorHandle: undefined, + } + } + + return { + canMakeGroup: true, + branchEntryNodeIds, + commonPredecessorHandle: commonPredecessorHandles[0], + } +} + +export const useMakeGroupAvailability = (selectedNodeIds: string[]): MakeGroupAvailability => { + const edgeKeys = useReactFlowStore((state) => { + const delimiter = '\u0000' + const keys = state.edges.map(edge => `${edge.source}${delimiter}${edge.sourceHandle || 'source'}${delimiter}${edge.target}`) + keys.sort() + return keys + }, shallow) + + const hasGroupNode = useReactFlowStore((state) => { + return state.getNodes().some(node => node.selected && node.data.type === BlockEnum.Group) + }) + + return useMemo(() => { + const delimiter = '\u0000' + const edges = edgeKeys.map((key) => { + const [source, handleId, target] = key.split(delimiter) + return { + id: key, + source, + sourceHandle: handleId || 'source', + target, + } + }) + + return checkMakeGroupAvailability(selectedNodeIds, edges, hasGroupNode) + }, [edgeKeys, selectedNodeIds, hasGroupNode]) +} diff --git a/web/app/components/workflow/hooks/use-nodes-interactions.ts b/web/app/components/workflow/hooks/use-nodes-interactions.ts index 8277e7dac8..6a43b91b9c 100644 --- a/web/app/components/workflow/hooks/use-nodes-interactions.ts +++ b/web/app/components/workflow/hooks/use-nodes-interactions.ts @@ -8,6 +8,7 @@ import type { ResizeParamsWithDirection, } from 'reactflow' import type { PluginDefaultValue } from '../block-selector/types' +import type { GroupHandler, GroupMember, GroupNodeData } from '../nodes/group/types' import type { IterationNodeType } from '../nodes/iteration/types' import type { LoopNodeType } from '../nodes/loop/types' import type { VariableAssignerNodeType } from '../nodes/variable-assigner/types' @@ -52,6 +53,7 @@ import { useWorkflowHistoryStore } from '../workflow-history-store' import { useAutoGenerateWebhookUrl } from './use-auto-generate-webhook-url' import { useHelpline } from './use-helpline' import useInspectVarsCrud from './use-inspect-vars-crud' +import { checkMakeGroupAvailability } from './use-make-group' import { useNodesMetaData } from './use-nodes-meta-data' import { useNodesSyncDraft } from './use-nodes-sync-draft' import { @@ -73,6 +75,151 @@ const ENTRY_NODE_WRAPPER_OFFSET = { y: 21, // Adjusted based on visual testing feedback } as const +/** + * Parse group handler id to get original node id and sourceHandle + * Handler id format: `${nodeId}-${sourceHandle}` + */ +function parseGroupHandlerId(handlerId: string): { originalNodeId: string, originalSourceHandle: string } { + const lastDashIndex = handlerId.lastIndexOf('-') + return { + originalNodeId: handlerId.substring(0, lastDashIndex), + originalSourceHandle: handlerId.substring(lastDashIndex + 1), + } +} + +/** + * Create a pair of edges for group node connections: + * - realEdge: hidden edge from original node to target (persisted to backend) + * - uiEdge: visible temp edge from group to target (UI-only, not persisted) + */ +function createGroupEdgePair(params: { + groupNodeId: string + handlerId: string + targetNodeId: string + targetHandle: string + nodes: Node[] + baseEdgeData?: Partial + zIndex?: number +}): { realEdge: Edge, uiEdge: Edge } | null { + const { groupNodeId, handlerId, targetNodeId, targetHandle, nodes, baseEdgeData = {}, zIndex = 0 } = params + + const groupNode = nodes.find(node => node.id === groupNodeId) + const groupData = groupNode?.data as GroupNodeData | undefined + const handler = groupData?.handlers?.find(h => h.id === handlerId) + + let originalNodeId: string + let originalSourceHandle: string + + if (handler?.nodeId && handler?.sourceHandle) { + originalNodeId = handler.nodeId + originalSourceHandle = handler.sourceHandle + } + else { + const parsed = parseGroupHandlerId(handlerId) + originalNodeId = parsed.originalNodeId + originalSourceHandle = parsed.originalSourceHandle + } + + const originalNode = nodes.find(node => node.id === originalNodeId) + const targetNode = nodes.find(node => node.id === targetNodeId) + + if (!originalNode || !targetNode) + return null + + // Create the real edge (from original node to target) - hidden because original node is in group + const realEdge: Edge = { + id: `${originalNodeId}-${originalSourceHandle}-${targetNodeId}-${targetHandle}`, + type: CUSTOM_EDGE, + source: originalNodeId, + sourceHandle: originalSourceHandle, + target: targetNodeId, + targetHandle, + hidden: true, + data: { + ...baseEdgeData, + sourceType: originalNode.data.type, + targetType: targetNode.data.type, + _hiddenInGroupId: groupNodeId, + }, + zIndex, + } + + // Create the UI edge (from group to target) - temporary, not persisted to backend + const uiEdge: Edge = { + id: `${groupNodeId}-${handlerId}-${targetNodeId}-${targetHandle}`, + type: CUSTOM_EDGE, + source: groupNodeId, + sourceHandle: handlerId, + target: targetNodeId, + targetHandle, + data: { + ...baseEdgeData, + sourceType: BlockEnum.Group, + targetType: targetNode.data.type, + _isTemp: true, + }, + zIndex, + } + + return { realEdge, uiEdge } +} + +function createGroupInboundEdges(params: { + sourceNodeId: string + sourceHandle: string + groupNodeId: string + groupData: GroupNodeData + nodes: Node[] + baseEdgeData?: Partial + zIndex?: number +}): { realEdges: Edge[], uiEdge: Edge } | null { + const { sourceNodeId, sourceHandle, groupNodeId, groupData, nodes, baseEdgeData = {}, zIndex = 0 } = params + + const sourceNode = nodes.find(node => node.id === sourceNodeId) + const headNodeIds = groupData.headNodeIds || [] + + if (!sourceNode || headNodeIds.length === 0) + return null + + const realEdges: Edge[] = headNodeIds.map((headNodeId) => { + const headNode = nodes.find(node => node.id === headNodeId) + return { + id: `${sourceNodeId}-${sourceHandle}-${headNodeId}-target`, + type: CUSTOM_EDGE, + source: sourceNodeId, + sourceHandle, + target: headNodeId, + targetHandle: 'target', + hidden: true, + data: { + ...baseEdgeData, + sourceType: sourceNode.data.type, + targetType: headNode?.data.type, + _hiddenInGroupId: groupNodeId, + }, + zIndex, + } as Edge + }) + + const uiEdge: Edge = { + id: `${sourceNodeId}-${sourceHandle}-${groupNodeId}-target`, + type: CUSTOM_EDGE, + source: sourceNodeId, + sourceHandle, + target: groupNodeId, + targetHandle: 'target', + data: { + ...baseEdgeData, + sourceType: sourceNode.data.type, + targetType: BlockEnum.Group, + _isTemp: true, + }, + zIndex, + } + + return { realEdges, uiEdge } +} + export const useNodesInteractions = () => { const { t } = useTranslation() const store = useStoreApi() @@ -448,6 +595,146 @@ export const useNodesInteractions = () => { return } + // Check if source is a group node - need special handling + const isSourceGroup = sourceNode?.data.type === BlockEnum.Group + + if (isSourceGroup && sourceHandle && target && targetHandle) { + const { originalNodeId, originalSourceHandle } = parseGroupHandlerId(sourceHandle) + + // Check if real edge already exists + if (edges.find(edge => + edge.source === originalNodeId + && edge.sourceHandle === originalSourceHandle + && edge.target === target + && edge.targetHandle === targetHandle, + )) { + return + } + + const parentNode = nodes.find(node => node.id === targetNode?.parentId) + const isInIteration = parentNode && parentNode.data.type === BlockEnum.Iteration + const isInLoop = !!parentNode && parentNode.data.type === BlockEnum.Loop + + const edgePair = createGroupEdgePair({ + groupNodeId: source!, + handlerId: sourceHandle, + targetNodeId: target, + targetHandle, + nodes, + baseEdgeData: { + isInIteration, + iteration_id: isInIteration ? targetNode?.parentId : undefined, + isInLoop, + loop_id: isInLoop ? targetNode?.parentId : undefined, + }, + }) + + if (!edgePair) + return + + const { realEdge, uiEdge } = edgePair + + // Update connected handle ids for the original node + const nodesConnectedSourceOrTargetHandleIdsMap + = getNodesConnectedSourceOrTargetHandleIdsMap( + [{ type: 'add', edge: realEdge }], + nodes, + ) + const newNodes = produce(nodes, (draft: Node[]) => { + draft.forEach((node) => { + if (nodesConnectedSourceOrTargetHandleIdsMap[node.id]) { + node.data = { + ...node.data, + ...nodesConnectedSourceOrTargetHandleIdsMap[node.id], + } + } + }) + }) + const newEdges = produce(edges, (draft) => { + draft.push(realEdge) + draft.push(uiEdge) + }) + + setNodes(newNodes) + setEdges(newEdges) + + handleSyncWorkflowDraft() + saveStateToHistory(WorkflowHistoryEvent.NodeConnect, { + nodeId: targetNode?.id, + }) + return + } + + const isTargetGroup = targetNode?.data.type === BlockEnum.Group + + if (isTargetGroup && source && sourceHandle) { + const groupData = targetNode.data as GroupNodeData + const headNodeIds = groupData.headNodeIds || [] + + if (edges.find(edge => + edge.source === source + && edge.sourceHandle === sourceHandle + && edge.target === target + && edge.targetHandle === targetHandle, + )) { + return + } + + const parentNode = nodes.find(node => node.id === sourceNode?.parentId) + const isInIteration = parentNode && parentNode.data.type === BlockEnum.Iteration + const isInLoop = !!parentNode && parentNode.data.type === BlockEnum.Loop + + const inboundResult = createGroupInboundEdges({ + sourceNodeId: source, + sourceHandle, + groupNodeId: target!, + groupData, + nodes, + baseEdgeData: { + isInIteration, + iteration_id: isInIteration ? sourceNode?.parentId : undefined, + isInLoop, + loop_id: isInLoop ? sourceNode?.parentId : undefined, + }, + }) + + if (!inboundResult) + return + + const { realEdges, uiEdge } = inboundResult + + const edgeChanges = realEdges.map(edge => ({ type: 'add' as const, edge })) + const nodesConnectedSourceOrTargetHandleIdsMap + = getNodesConnectedSourceOrTargetHandleIdsMap(edgeChanges, nodes) + + const newNodes = produce(nodes, (draft: Node[]) => { + draft.forEach((node) => { + if (nodesConnectedSourceOrTargetHandleIdsMap[node.id]) { + node.data = { + ...node.data, + ...nodesConnectedSourceOrTargetHandleIdsMap[node.id], + } + } + }) + }) + + const newEdges = produce(edges, (draft) => { + realEdges.forEach((edge) => { + draft.push(edge) + }) + draft.push(uiEdge) + }) + + setNodes(newNodes) + setEdges(newEdges) + + handleSyncWorkflowDraft() + saveStateToHistory(WorkflowHistoryEvent.NodeConnect, { + nodeId: headNodeIds[0], + }) + return + } + if ( edges.find( edge => @@ -909,8 +1196,34 @@ export const useNodesInteractions = () => { } } - let newEdge = null - if (nodeType !== BlockEnum.DataSource) { + // Check if prevNode is a group node - need special handling + const isPrevNodeGroup = prevNode.data.type === BlockEnum.Group + let newEdge: Edge | null = null + let newUiEdge: Edge | null = null + + if (isPrevNodeGroup && prevNodeSourceHandle && nodeType !== BlockEnum.DataSource) { + const edgePair = createGroupEdgePair({ + groupNodeId: prevNodeId, + handlerId: prevNodeSourceHandle, + targetNodeId: newNode.id, + targetHandle, + nodes: [...nodes, newNode], + baseEdgeData: { + isInIteration, + isInLoop, + iteration_id: isInIteration ? prevNode.parentId : undefined, + loop_id: isInLoop ? prevNode.parentId : undefined, + _connectedNodeIsSelected: true, + }, + }) + + if (edgePair) { + newEdge = edgePair.realEdge + newUiEdge = edgePair.uiEdge + } + } + else if (nodeType !== BlockEnum.DataSource) { + // Normal case: prevNode is not a group newEdge = { id: `${prevNodeId}-${prevNodeSourceHandle}-${newNode.id}-${targetHandle}`, type: CUSTOM_EDGE, @@ -935,9 +1248,10 @@ export const useNodesInteractions = () => { } } + const edgesToAdd = [newEdge, newUiEdge].filter(Boolean).map(edge => ({ type: 'add' as const, edge: edge! })) const nodesConnectedSourceOrTargetHandleIdsMap = getNodesConnectedSourceOrTargetHandleIdsMap( - (newEdge ? [{ type: 'add', edge: newEdge }] : []), + edgesToAdd, nodes, ) const newNodes = produce(nodes, (draft: Node[]) => { @@ -1006,6 +1320,8 @@ export const useNodesInteractions = () => { }) if (newEdge) draft.push(newEdge) + if (newUiEdge) + draft.push(newUiEdge) }) setNodes(newNodes) @@ -1090,7 +1406,7 @@ export const useNodesInteractions = () => { const afterNodesInSameBranch = getAfterNodesInSameBranch(nextNodeId!) const afterNodesInSameBranchIds = afterNodesInSameBranch.map( - node => node.id, + (node: Node) => node.id, ) const newNodes = produce(nodes, (draft) => { draft.forEach((node) => { @@ -1200,37 +1516,113 @@ export const useNodesInteractions = () => { } } - const currentEdgeIndex = edges.findIndex( - edge => edge.source === prevNodeId && edge.target === nextNodeId, - ) - let newPrevEdge = null + // Check if prevNode is a group node - need special handling + const isPrevNodeGroup = prevNode.data.type === BlockEnum.Group + let newPrevEdge: Edge | null = null + let newPrevUiEdge: Edge | null = null + const edgesToRemove: string[] = [] - if (nodeType !== BlockEnum.DataSource) { - newPrevEdge = { - id: `${prevNodeId}-${prevNodeSourceHandle}-${newNode.id}-${targetHandle}`, - type: CUSTOM_EDGE, - source: prevNodeId, - sourceHandle: prevNodeSourceHandle, - target: newNode.id, + if (isPrevNodeGroup && prevNodeSourceHandle && nodeType !== BlockEnum.DataSource) { + const { originalNodeId, originalSourceHandle } = parseGroupHandlerId(prevNodeSourceHandle) + + // Find edges to remove: both hidden real edge and UI temp edge from group to nextNode + const hiddenEdge = edges.find( + edge => edge.source === originalNodeId + && edge.sourceHandle === originalSourceHandle + && edge.target === nextNodeId, + ) + const uiTempEdge = edges.find( + edge => edge.source === prevNodeId + && edge.sourceHandle === prevNodeSourceHandle + && edge.target === nextNodeId, + ) + if (hiddenEdge) + edgesToRemove.push(hiddenEdge.id) + if (uiTempEdge) + edgesToRemove.push(uiTempEdge.id) + + const edgePair = createGroupEdgePair({ + groupNodeId: prevNodeId, + handlerId: prevNodeSourceHandle, + targetNodeId: newNode.id, targetHandle, - data: { - sourceType: prevNode.data.type, - targetType: newNode.data.type, + nodes: [...nodes, newNode], + baseEdgeData: { isInIteration, isInLoop, iteration_id: isInIteration ? prevNode.parentId : undefined, loop_id: isInLoop ? prevNode.parentId : undefined, _connectedNodeIsSelected: true, }, - zIndex: prevNode.parentId - ? isInIteration - ? ITERATION_CHILDREN_Z_INDEX - : LOOP_CHILDREN_Z_INDEX - : 0, + }) + + if (edgePair) { + newPrevEdge = edgePair.realEdge + newPrevUiEdge = edgePair.uiEdge + } + } + else { + const isNextNodeGroupForRemoval = nextNode.data.type === BlockEnum.Group + + if (isNextNodeGroupForRemoval) { + const groupData = nextNode.data as GroupNodeData + const headNodeIds = groupData.headNodeIds || [] + + headNodeIds.forEach((headNodeId) => { + const realEdge = edges.find( + edge => edge.source === prevNodeId + && edge.sourceHandle === prevNodeSourceHandle + && edge.target === headNodeId, + ) + if (realEdge) + edgesToRemove.push(realEdge.id) + }) + + const uiEdge = edges.find( + edge => edge.source === prevNodeId + && edge.sourceHandle === prevNodeSourceHandle + && edge.target === nextNodeId, + ) + if (uiEdge) + edgesToRemove.push(uiEdge.id) + } + else { + const currentEdge = edges.find( + edge => edge.source === prevNodeId && edge.target === nextNodeId, + ) + if (currentEdge) + edgesToRemove.push(currentEdge.id) + } + + if (nodeType !== BlockEnum.DataSource) { + newPrevEdge = { + id: `${prevNodeId}-${prevNodeSourceHandle}-${newNode.id}-${targetHandle}`, + type: CUSTOM_EDGE, + source: prevNodeId, + sourceHandle: prevNodeSourceHandle, + target: newNode.id, + targetHandle, + data: { + sourceType: prevNode.data.type, + targetType: newNode.data.type, + isInIteration, + isInLoop, + iteration_id: isInIteration ? prevNode.parentId : undefined, + loop_id: isInLoop ? prevNode.parentId : undefined, + _connectedNodeIsSelected: true, + }, + zIndex: prevNode.parentId + ? isInIteration + ? ITERATION_CHILDREN_Z_INDEX + : LOOP_CHILDREN_Z_INDEX + : 0, + } } } let newNextEdge: Edge | null = null + let newNextUiEdge: Edge | null = null + const newNextRealEdges: Edge[] = [] const nextNodeParentNode = nodes.find(node => node.id === nextNode.parentId) || null @@ -1241,49 +1633,113 @@ export const useNodesInteractions = () => { = !!nextNodeParentNode && nextNodeParentNode.data.type === BlockEnum.Loop + const isNextNodeGroup = nextNode.data.type === BlockEnum.Group + if ( nodeType !== BlockEnum.IfElse && nodeType !== BlockEnum.QuestionClassifier && nodeType !== BlockEnum.LoopEnd ) { - newNextEdge = { - id: `${newNode.id}-${sourceHandle}-${nextNodeId}-${nextNodeTargetHandle}`, - type: CUSTOM_EDGE, - source: newNode.id, - sourceHandle, - target: nextNodeId, - targetHandle: nextNodeTargetHandle, - data: { - sourceType: newNode.data.type, - targetType: nextNode.data.type, - isInIteration: isNextNodeInIteration, - isInLoop: isNextNodeInLoop, - iteration_id: isNextNodeInIteration - ? nextNode.parentId - : undefined, - loop_id: isNextNodeInLoop ? nextNode.parentId : undefined, - _connectedNodeIsSelected: true, - }, - zIndex: nextNode.parentId - ? isNextNodeInIteration - ? ITERATION_CHILDREN_Z_INDEX - : LOOP_CHILDREN_Z_INDEX - : 0, + if (isNextNodeGroup) { + const groupData = nextNode.data as GroupNodeData + const headNodeIds = groupData.headNodeIds || [] + + headNodeIds.forEach((headNodeId) => { + const headNode = nodes.find(node => node.id === headNodeId) + newNextRealEdges.push({ + id: `${newNode.id}-${sourceHandle}-${headNodeId}-target`, + type: CUSTOM_EDGE, + source: newNode.id, + sourceHandle, + target: headNodeId, + targetHandle: 'target', + hidden: true, + data: { + sourceType: newNode.data.type, + targetType: headNode?.data.type, + isInIteration: isNextNodeInIteration, + isInLoop: isNextNodeInLoop, + iteration_id: isNextNodeInIteration ? nextNode.parentId : undefined, + loop_id: isNextNodeInLoop ? nextNode.parentId : undefined, + _hiddenInGroupId: nextNodeId, + _connectedNodeIsSelected: true, + }, + zIndex: nextNode.parentId + ? isNextNodeInIteration + ? ITERATION_CHILDREN_Z_INDEX + : LOOP_CHILDREN_Z_INDEX + : 0, + } as Edge) + }) + + newNextUiEdge = { + id: `${newNode.id}-${sourceHandle}-${nextNodeId}-target`, + type: CUSTOM_EDGE, + source: newNode.id, + sourceHandle, + target: nextNodeId, + targetHandle: 'target', + data: { + sourceType: newNode.data.type, + targetType: BlockEnum.Group, + isInIteration: isNextNodeInIteration, + isInLoop: isNextNodeInLoop, + iteration_id: isNextNodeInIteration ? nextNode.parentId : undefined, + loop_id: isNextNodeInLoop ? nextNode.parentId : undefined, + _isTemp: true, + _connectedNodeIsSelected: true, + }, + zIndex: nextNode.parentId + ? isNextNodeInIteration + ? ITERATION_CHILDREN_Z_INDEX + : LOOP_CHILDREN_Z_INDEX + : 0, + } + } + else { + newNextEdge = { + id: `${newNode.id}-${sourceHandle}-${nextNodeId}-${nextNodeTargetHandle}`, + type: CUSTOM_EDGE, + source: newNode.id, + sourceHandle, + target: nextNodeId, + targetHandle: nextNodeTargetHandle, + data: { + sourceType: newNode.data.type, + targetType: nextNode.data.type, + isInIteration: isNextNodeInIteration, + isInLoop: isNextNodeInLoop, + iteration_id: isNextNodeInIteration + ? nextNode.parentId + : undefined, + loop_id: isNextNodeInLoop ? nextNode.parentId : undefined, + _connectedNodeIsSelected: true, + }, + zIndex: nextNode.parentId + ? isNextNodeInIteration + ? ITERATION_CHILDREN_Z_INDEX + : LOOP_CHILDREN_Z_INDEX + : 0, + } } } + const edgeChanges = [ + ...edgesToRemove.map(id => ({ type: 'remove' as const, edge: edges.find(e => e.id === id)! })).filter(c => c.edge), + ...(newPrevEdge ? [{ type: 'add' as const, edge: newPrevEdge }] : []), + ...(newPrevUiEdge ? [{ type: 'add' as const, edge: newPrevUiEdge }] : []), + ...(newNextEdge ? [{ type: 'add' as const, edge: newNextEdge }] : []), + ...newNextRealEdges.map(edge => ({ type: 'add' as const, edge })), + ...(newNextUiEdge ? [{ type: 'add' as const, edge: newNextUiEdge }] : []), + ] const nodesConnectedSourceOrTargetHandleIdsMap = getNodesConnectedSourceOrTargetHandleIdsMap( - [ - { type: 'remove', edge: edges[currentEdgeIndex] }, - ...(newPrevEdge ? [{ type: 'add', edge: newPrevEdge }] : []), - ...(newNextEdge ? [{ type: 'add', edge: newNextEdge }] : []), - ], + edgeChanges, [...nodes, newNode], ) const afterNodesInSameBranch = getAfterNodesInSameBranch(nextNodeId!) const afterNodesInSameBranchIds = afterNodesInSameBranch.map( - node => node.id, + (node: Node) => node.id, ) const newNodes = produce(nodes, (draft) => { draft.forEach((node) => { @@ -1342,7 +1798,10 @@ export const useNodesInteractions = () => { }) } const newEdges = produce(edges, (draft) => { - draft.splice(currentEdgeIndex, 1) + const filteredDraft = draft.filter(edge => !edgesToRemove.includes(edge.id)) + draft.length = 0 + draft.push(...filteredDraft) + draft.forEach((item) => { item.data = { ...item.data, @@ -1351,9 +1810,15 @@ export const useNodesInteractions = () => { }) if (newPrevEdge) draft.push(newPrevEdge) - + if (newPrevUiEdge) + draft.push(newPrevUiEdge) if (newNextEdge) draft.push(newNextEdge) + newNextRealEdges.forEach((edge) => { + draft.push(edge) + }) + if (newNextUiEdge) + draft.push(newNextUiEdge) }) setEdges(newEdges) } @@ -2087,6 +2552,302 @@ export const useNodesInteractions = () => { setEdges(newEdges) }, [store]) + // Check if there are any nodes selected via box selection + const hasBundledNodes = useCallback(() => { + const { getNodes } = store.getState() + const nodes = getNodes() + return nodes.some(node => node.data._isBundled) + }, [store]) + + const getCanMakeGroup = useCallback(() => { + const { getNodes, edges } = store.getState() + const nodes = getNodes() + const bundledNodes = nodes.filter(node => node.data._isBundled) + + if (bundledNodes.length <= 1) + return false + + const bundledNodeIds = bundledNodes.map(node => node.id) + const minimalEdges = edges.map(edge => ({ + id: edge.id, + source: edge.source, + sourceHandle: edge.sourceHandle || 'source', + target: edge.target, + })) + const hasGroupNode = bundledNodes.some(node => node.data.type === BlockEnum.Group) + + const { canMakeGroup } = checkMakeGroupAvailability(bundledNodeIds, minimalEdges, hasGroupNode) + return canMakeGroup + }, [store]) + + const handleMakeGroup = useCallback(() => { + const { getNodes, setNodes, edges, setEdges } = store.getState() + const nodes = getNodes() + const bundledNodes = nodes.filter(node => node.data._isBundled) + + if (bundledNodes.length <= 1) + return + + const bundledNodeIds = bundledNodes.map(node => node.id) + const minimalEdges = edges.map(edge => ({ + id: edge.id, + source: edge.source, + sourceHandle: edge.sourceHandle || 'source', + target: edge.target, + })) + const hasGroupNode = bundledNodes.some(node => node.data.type === BlockEnum.Group) + + const { canMakeGroup } = checkMakeGroupAvailability(bundledNodeIds, minimalEdges, hasGroupNode) + if (!canMakeGroup) + return + + const bundledNodeIdSet = new Set(bundledNodeIds) + const bundledNodeIdIsLeaf = new Set() + const inboundEdges = edges.filter(edge => !bundledNodeIdSet.has(edge.source) && bundledNodeIdSet.has(edge.target)) + const outboundEdges = edges.filter(edge => bundledNodeIdSet.has(edge.source) && !bundledNodeIdSet.has(edge.target)) + + // leaf node: no outbound edges to other nodes in the selection + const handlers: GroupHandler[] = [] + const leafNodeIdSet = new Set() + + bundledNodes.forEach((node: Node) => { + const targetBranches = node.data._targetBranches || [{ id: 'source', name: node.data.title }] + targetBranches.forEach((branch) => { + // A branch should be a handler if it's either: + // 1. Connected to a node OUTSIDE the group + // 2. NOT connected to any node INSIDE the group + const isConnectedInside = edges.some(edge => + edge.source === node.id + && (edge.sourceHandle === branch.id || (!edge.sourceHandle && branch.id === 'source')) + && bundledNodeIdSet.has(edge.target), + ) + const isConnectedOutside = edges.some(edge => + edge.source === node.id + && (edge.sourceHandle === branch.id || (!edge.sourceHandle && branch.id === 'source')) + && !bundledNodeIdSet.has(edge.target), + ) + + if (isConnectedOutside || !isConnectedInside) { + const handlerId = `${node.id}-${branch.id}` + handlers.push({ + id: handlerId, + label: branch.name || node.data.title || node.id, + nodeId: node.id, + sourceHandle: branch.id, + }) + leafNodeIdSet.add(node.id) + } + }) + }) + + const leafNodeIds = Array.from(leafNodeIdSet) + leafNodeIds.forEach(id => bundledNodeIdIsLeaf.add(id)) + + const members: GroupMember[] = bundledNodes.map((node) => { + return { + id: node.id, + type: node.data.type, + label: node.data.title, + } + }) + + // head nodes: nodes that receive input from outside the group + const headNodeIds = [...new Set(inboundEdges.map(edge => edge.target))] + + // put the group node at the top-left corner of the selection, slightly offset + const { x: minX, y: minY } = getTopLeftNodePosition(bundledNodes) + + const groupNodeData: GroupNodeData = { + title: t('operator.makeGroup', { ns: 'workflow' }), + desc: '', + type: BlockEnum.Group, + members, + handlers, + headNodeIds, + leafNodeIds, + selected: true, + _targetBranches: handlers.map(handler => ({ + id: handler.id, + name: handler.label || handler.id, + })), + } + + const { newNode: groupNode } = generateNewNode({ + data: groupNodeData, + position: { + x: minX - 20, + y: minY - 20, + }, + }) + + const nodeTypeMap = new Map(nodes.map(node => [node.id, node.data.type])) + + const newNodes = produce(nodes, (draft) => { + draft.forEach((node) => { + if (bundledNodeIdSet.has(node.id)) { + node.data._isBundled = false + node.selected = false + node.hidden = true + node.data._hiddenInGroupId = groupNode.id + } + else { + node.data._isBundled = false + } + }) + draft.push(groupNode) + }) + + const newEdges = produce(edges, (draft) => { + draft.forEach((edge) => { + if (bundledNodeIdSet.has(edge.source) || bundledNodeIdSet.has(edge.target)) { + edge.hidden = true + edge.data = { + ...edge.data, + _hiddenInGroupId: groupNode.id, + _isBundled: false, + } + } + else if (edge.data?._isBundled) { + edge.data._isBundled = false + } + }) + + // re-add the external inbound edges to the group node as UI-only edges (not persisted to backend) + inboundEdges.forEach((edge) => { + draft.push({ + id: `${edge.id}__to-${groupNode.id}`, + type: edge.type || CUSTOM_EDGE, + source: edge.source, + target: groupNode.id, + sourceHandle: edge.sourceHandle, + targetHandle: 'target', + data: { + ...edge.data, + sourceType: nodeTypeMap.get(edge.source)!, + targetType: BlockEnum.Group, + _hiddenInGroupId: undefined, + _isBundled: false, + _isTemp: true, // UI-only edge, not persisted to backend + }, + zIndex: edge.zIndex, + }) + }) + + // outbound edges of the group node as UI-only edges (not persisted to backend) + outboundEdges.forEach((edge) => { + if (!bundledNodeIdIsLeaf.has(edge.source)) + return + + // Use the same handler id format: nodeId-sourceHandle + const originalSourceHandle = edge.sourceHandle || 'source' + const handlerId = `${edge.source}-${originalSourceHandle}` + + draft.push({ + id: `${groupNode.id}-${edge.target}-${edge.targetHandle || 'target'}-${handlerId}`, + type: edge.type || CUSTOM_EDGE, + source: groupNode.id, + target: edge.target, + sourceHandle: handlerId, + targetHandle: edge.targetHandle, + data: { + ...edge.data, + sourceType: BlockEnum.Group, + targetType: nodeTypeMap.get(edge.target)!, + _hiddenInGroupId: undefined, + _isBundled: false, + _isTemp: true, + }, + zIndex: edge.zIndex, + }) + }) + }) + + setNodes(newNodes) + setEdges(newEdges) + workflowStore.setState({ + selectionMenu: undefined, + }) + handleSyncWorkflowDraft() + saveStateToHistory(WorkflowHistoryEvent.NodeAdd, { + nodeId: groupNode.id, + }) + }, [handleSyncWorkflowDraft, saveStateToHistory, store, t, workflowStore]) + + // check if the current selection can be ungrouped (single selected Group node) + const getCanUngroup = useCallback(() => { + const { getNodes } = store.getState() + const nodes = getNodes() + const selectedNodes = nodes.filter(node => node.selected) + + if (selectedNodes.length !== 1) + return false + + return selectedNodes[0].data.type === BlockEnum.Group + }, [store]) + + // get the selected group node id for ungroup operation + const getSelectedGroupId = useCallback(() => { + const { getNodes } = store.getState() + const nodes = getNodes() + const selectedNodes = nodes.filter(node => node.selected) + + if (selectedNodes.length === 1 && selectedNodes[0].data.type === BlockEnum.Group) + return selectedNodes[0].id + + return undefined + }, [store]) + + const handleUngroup = useCallback((groupId: string) => { + const { getNodes, setNodes, edges, setEdges } = store.getState() + const nodes = getNodes() + const groupNode = nodes.find(n => n.id === groupId) + + if (!groupNode || groupNode.data.type !== BlockEnum.Group) + return + + const memberIds = new Set((groupNode.data.members || []).map((m: { id: string }) => m.id)) + + // restore hidden member nodes + const newNodes = produce(nodes, (draft) => { + draft.forEach((node) => { + if (memberIds.has(node.id)) { + node.hidden = false + delete node.data._hiddenInGroupId + } + }) + // remove group node + const groupIndex = draft.findIndex(n => n.id === groupId) + if (groupIndex !== -1) + draft.splice(groupIndex, 1) + }) + + // restore hidden edges and remove temp edges in single pass O(E) + const newEdges = produce(edges, (draft) => { + const indicesToRemove: number[] = [] + + for (let i = 0; i < draft.length; i++) { + const edge = draft[i] + // restore hidden edges that involve member nodes + if (edge.hidden && (memberIds.has(edge.source) || memberIds.has(edge.target))) + edge.hidden = false + // collect temp edges connected to group for removal + if (edge.data?._isTemp && (edge.source === groupId || edge.target === groupId)) + indicesToRemove.push(i) + } + + // remove collected indices in reverse order to avoid index shift + for (let i = indicesToRemove.length - 1; i >= 0; i--) + draft.splice(indicesToRemove[i], 1) + }) + + setNodes(newNodes) + setEdges(newEdges) + handleSyncWorkflowDraft() + saveStateToHistory(WorkflowHistoryEvent.NodeDelete, { + nodeId: groupId, + }) + }, [handleSyncWorkflowDraft, saveStateToHistory, store]) + return { handleNodeDragStart, handleNodeDrag, @@ -2107,11 +2868,17 @@ export const useNodesInteractions = () => { handleNodesPaste, handleNodesDuplicate, handleNodesDelete, + handleMakeGroup, + handleUngroup, handleNodeResize, handleNodeDisconnect, handleHistoryBack, handleHistoryForward, dimOtherNodes, undimAllNodes, + hasBundledNodes, + getCanMakeGroup, + getCanUngroup, + getSelectedGroupId, } } diff --git a/web/app/components/workflow/hooks/use-nodes-meta-data.ts b/web/app/components/workflow/hooks/use-nodes-meta-data.ts index 2ea2fd9e9f..36c071f4d4 100644 --- a/web/app/components/workflow/hooks/use-nodes-meta-data.ts +++ b/web/app/components/workflow/hooks/use-nodes-meta-data.ts @@ -1,8 +1,10 @@ import type { AvailableNodesMetaData } from '@/app/components/workflow/hooks-store' import type { Node } from '@/app/components/workflow/types' import { useMemo } from 'react' +import { useTranslation } from 'react-i18next' import { CollectionType } from '@/app/components/tools/types' import { useHooksStore } from '@/app/components/workflow/hooks-store' +import GroupDefault from '@/app/components/workflow/nodes/group/default' import { useStore } from '@/app/components/workflow/store' import { BlockEnum } from '@/app/components/workflow/types' import { useGetLanguage } from '@/context/i18n' @@ -25,6 +27,7 @@ export const useNodesMetaData = () => { } export const useNodeMetaData = (node: Node) => { + const { t } = useTranslation() const language = useGetLanguage() const { data: buildInTools } = useAllBuiltInTools() const { data: customTools } = useAllCustomTools() @@ -34,6 +37,9 @@ export const useNodeMetaData = (node: Node) => { const { data } = node const nodeMetaData = availableNodesMetaData.nodesMap?.[data.type] const author = useMemo(() => { + if (data.type === BlockEnum.Group) + return GroupDefault.metaData.author + if (data.type === BlockEnum.DataSource) return dataSourceList?.find(dataSource => dataSource.plugin_id === data.plugin_id)?.author @@ -48,6 +54,9 @@ export const useNodeMetaData = (node: Node) => { }, [data, buildInTools, customTools, workflowTools, nodeMetaData, dataSourceList]) const description = useMemo(() => { + if (data.type === BlockEnum.Group) + return t('blocksAbout.group', { ns: 'workflow' }) + if (data.type === BlockEnum.DataSource) return dataSourceList?.find(dataSource => dataSource.plugin_id === data.plugin_id)?.description[language] if (data.type === BlockEnum.Tool) { @@ -58,7 +67,7 @@ export const useNodeMetaData = (node: Node) => { return customTools?.find(toolWithProvider => toolWithProvider.id === data.provider_id)?.description[language] } return nodeMetaData?.metaData.description - }, [data, buildInTools, customTools, workflowTools, nodeMetaData, dataSourceList, language]) + }, [data, buildInTools, customTools, workflowTools, nodeMetaData, dataSourceList, language, t]) return useMemo(() => { return { diff --git a/web/app/components/workflow/hooks/use-shortcuts.ts b/web/app/components/workflow/hooks/use-shortcuts.ts index 1b3d141d8d..64e9e9f794 100644 --- a/web/app/components/workflow/hooks/use-shortcuts.ts +++ b/web/app/components/workflow/hooks/use-shortcuts.ts @@ -17,7 +17,7 @@ import { } from '../utils' import { useWorkflowHistoryStore } from '../workflow-history-store' -export const useShortcuts = (): void => { +export const useShortcuts = (enabled = true): void => { const { handleNodesCopy, handleNodesPaste, @@ -27,6 +27,12 @@ export const useShortcuts = (): void => { handleHistoryForward, dimOtherNodes, undimAllNodes, + hasBundledNodes, + getCanMakeGroup, + handleMakeGroup, + getCanUngroup, + getSelectedGroupId, + handleUngroup, } = useNodesInteractions() const { shortcutsEnabled: workflowHistoryShortcutsEnabled } = useWorkflowHistoryStore() const { handleSyncWorkflowDraft } = useNodesSyncDraft() @@ -60,13 +66,17 @@ export const useShortcuts = (): void => { } const shouldHandleShortcut = useCallback((e: KeyboardEvent) => { + if (!enabled) + return false return !isEventTargetInputArea(e.target as HTMLElement) - }, []) + }, [enabled]) const shouldHandleCopy = useCallback(() => { + if (!enabled) + return false const selection = document.getSelection() return !selection || selection.isCollapsed - }, []) + }, [enabled]) useKeyPress(['delete', 'backspace'], (e) => { if (shouldHandleShortcut(e)) { @@ -78,7 +88,8 @@ export const useShortcuts = (): void => { useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.c`, (e) => { const { showDebugAndPreviewPanel } = workflowStore.getState() - if (shouldHandleShortcut(e) && shouldHandleCopy() && !showDebugAndPreviewPanel) { + // Only intercept when nodes are selected via box selection + if (shouldHandleShortcut(e) && shouldHandleCopy() && !showDebugAndPreviewPanel && hasBundledNodes()) { e.preventDefault() handleNodesCopy() } @@ -99,6 +110,26 @@ export const useShortcuts = (): void => { } }, { exactMatch: true, useCapture: true }) + useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.g`, (e) => { + // Only intercept when the selection can be grouped + if (shouldHandleShortcut(e) && getCanMakeGroup()) { + e.preventDefault() + // Close selection context menu if open + workflowStore.setState({ selectionMenu: undefined }) + handleMakeGroup() + } + }, { exactMatch: true, useCapture: true }) + + useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.shift.g`, (e) => { + // Only intercept when the selection can be ungrouped + if (shouldHandleShortcut(e) && getCanUngroup()) { + e.preventDefault() + const groupId = getSelectedGroupId() + if (groupId) + handleUngroup(groupId) + } + }, { exactMatch: true, useCapture: true }) + useKeyPress(`${getKeyboardKeyCodeBySystem('alt')}.r`, (e) => { if (shouldHandleShortcut(e)) { e.preventDefault() @@ -255,6 +286,8 @@ export const useShortcuts = (): void => { // Listen for zen toggle event from /zen command useEffect(() => { + if (!enabled) + return const handleZenToggle = () => { handleToggleMaximizeCanvas() } @@ -263,5 +296,5 @@ export const useShortcuts = (): void => { return () => { window.removeEventListener(ZEN_TOGGLE_EVENT, handleZenToggle) } - }, [handleToggleMaximizeCanvas]) + }, [enabled, handleToggleMaximizeCanvas]) } diff --git a/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-finished.ts b/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-finished.ts index cf0d9bcef1..c18ab909a9 100644 --- a/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-finished.ts +++ b/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-finished.ts @@ -37,7 +37,10 @@ export const useWorkflowNodeFinished = () => { })) const newNodes = produce(nodes, (draft) => { - const currentNode = draft.find(node => node.id === data.node_id)! + const currentNode = draft.find(node => node.id === data.node_id) + // Skip if node not found (e.g., virtual extraction nodes) + if (!currentNode) + return currentNode.data._runningStatus = data.status if (data.status === NodeRunningStatus.Exception) { if (data.execution_metadata?.error_strategy === ErrorHandleTypeEnum.failBranch) diff --git a/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-started.ts b/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-started.ts index 03c7387d38..282e35fbd6 100644 --- a/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-started.ts +++ b/web/app/components/workflow/hooks/use-workflow-run-event/use-workflow-node-started.ts @@ -45,6 +45,11 @@ export const useWorkflowNodeStarted = () => { } = reactflow const currentNodeIndex = nodes.findIndex(node => node.id === data.node_id) const currentNode = nodes[currentNodeIndex] + + // Skip if node not found (e.g., virtual extraction nodes) + if (!currentNode) + return + const position = currentNode.position const zoom = transform[2] diff --git a/web/app/components/workflow/hooks/use-workflow.ts b/web/app/components/workflow/hooks/use-workflow.ts index 990c8c950d..1daf06dd02 100644 --- a/web/app/components/workflow/hooks/use-workflow.ts +++ b/web/app/components/workflow/hooks/use-workflow.ts @@ -1,10 +1,10 @@ import type { Connection, } from 'reactflow' +import type { GroupNodeData } from '../nodes/group/types' import type { IterationNodeType } from '../nodes/iteration/types' import type { LoopNodeType } from '../nodes/loop/types' import type { - BlockEnum, Edge, Node, ValueSelector, @@ -28,14 +28,12 @@ import { } from '../constants' import { findUsedVarNodes, getNodeOutputVars, updateNodeVars } from '../nodes/_base/components/variable/utils' import { CUSTOM_NOTE_NODE } from '../note-node/constants' - import { useStore, useWorkflowStore, } from '../store' -import { - WorkflowRunningStatus, -} from '../types' + +import { BlockEnum, WorkflowRunningStatus } from '../types' import { getWorkflowEntryNode, isWorkflowEntryNode, @@ -381,7 +379,7 @@ export const useWorkflow = () => { return startNodes }, [nodesMap, getRootNodesById]) - const isValidConnection = useCallback(({ source, sourceHandle: _sourceHandle, target }: Connection) => { + const isValidConnection = useCallback(({ source, sourceHandle, target }: Connection) => { const { edges, getNodes, @@ -396,15 +394,42 @@ export const useWorkflow = () => { if (sourceNode.parentId !== targetNode.parentId) return false + // For Group nodes, use the leaf node's type for validation + // sourceHandle format: "${leafNodeId}-${originalSourceHandle}" + let actualSourceType = sourceNode.data.type + if (sourceNode.data.type === BlockEnum.Group && sourceHandle) { + const lastDashIndex = sourceHandle.lastIndexOf('-') + if (lastDashIndex > 0) { + const leafNodeId = sourceHandle.substring(0, lastDashIndex) + const leafNode = nodes.find(node => node.id === leafNodeId) + if (leafNode) + actualSourceType = leafNode.data.type + } + } + if (sourceNode && targetNode) { - const sourceNodeAvailableNextNodes = getAvailableBlocks(sourceNode.data.type, !!sourceNode.parentId).availableNextBlocks + const sourceNodeAvailableNextNodes = getAvailableBlocks(actualSourceType, !!sourceNode.parentId).availableNextBlocks const targetNodeAvailablePrevNodes = getAvailableBlocks(targetNode.data.type, !!targetNode.parentId).availablePrevBlocks - if (!sourceNodeAvailableNextNodes.includes(targetNode.data.type)) - return false + if (targetNode.data.type === BlockEnum.Group) { + const groupData = targetNode.data as GroupNodeData + const headNodeIds = groupData.headNodeIds || [] + if (headNodeIds.length > 0) { + const headNode = nodes.find(node => node.id === headNodeIds[0]) + if (headNode) { + const headNodeAvailablePrevNodes = getAvailableBlocks(headNode.data.type, !!targetNode.parentId).availablePrevBlocks + if (!headNodeAvailablePrevNodes.includes(actualSourceType)) + return false + } + } + } + else { + if (!sourceNodeAvailableNextNodes.includes(targetNode.data.type)) + return false - if (!targetNodeAvailablePrevNodes.includes(sourceNode.data.type)) - return false + if (!targetNodeAvailablePrevNodes.includes(actualSourceType)) + return false + } } const hasCycle = (node: Node, visited = new Set()) => { @@ -473,13 +498,9 @@ export const useNodesReadOnly = () => { const isRestoring = useStore(s => s.isRestoring) const getNodesReadOnly = useCallback((): boolean => { - const { - workflowRunningData, - historyWorkflowData, - isRestoring, - } = workflowStore.getState() + const state = workflowStore.getState() - return !!(workflowRunningData?.result.status === WorkflowRunningStatus.Running || historyWorkflowData || isRestoring) + return !!(state.workflowRunningData?.result.status === WorkflowRunningStatus.Running || state.historyWorkflowData || state.isRestoring) }, [workflowStore]) return { @@ -525,6 +546,7 @@ export const useIsNodeInLoop = (loopId: string) => { return false if (node.parentId === loopId) + return true return false diff --git a/web/app/components/workflow/index.tsx b/web/app/components/workflow/index.tsx index 1543bce714..185bd9c34a 100644 --- a/web/app/components/workflow/index.tsx +++ b/web/app/components/workflow/index.tsx @@ -2,6 +2,7 @@ import type { FC } from 'react' import type { + NodeMouseHandler, Viewport, } from 'reactflow' import type { Shape as HooksStoreShape } from './hooks-store' @@ -54,6 +55,14 @@ import { } from './constants' import CustomConnectionLine from './custom-connection-line' import CustomEdge from './custom-edge' +import { + CUSTOM_GROUP_EXIT_PORT_NODE, + CUSTOM_GROUP_INPUT_NODE, + CUSTOM_GROUP_NODE, + CustomGroupExitPortNode, + CustomGroupInputNode, + CustomGroupNode, +} from './custom-group-node' import DatasetsDetailProvider from './datasets-detail-store/provider' import HelpLine from './help-line' import { @@ -80,6 +89,8 @@ import CustomIterationStartNode from './nodes/iteration-start' import { CUSTOM_ITERATION_START_NODE } from './nodes/iteration-start/constants' import CustomLoopStartNode from './nodes/loop-start' import { CUSTOM_LOOP_START_NODE } from './nodes/loop-start/constants' +import CustomSubGraphStartNode from './nodes/sub-graph-start' +import { CUSTOM_SUB_GRAPH_START_NODE } from './nodes/sub-graph-start/constants' import CustomNoteNode from './note-node' import { CUSTOM_NOTE_NODE } from './note-node/constants' import Operator from './operator' @@ -94,6 +105,7 @@ import { } from './store' import SyncingDataModal from './syncing-data-modal' import { + BlockEnum, ControlMode, } from './types' import { setupScrollToNodeListener } from './utils/node-navigation' @@ -109,9 +121,13 @@ const nodeTypes = { [CUSTOM_NODE]: CustomNode, [CUSTOM_NOTE_NODE]: CustomNoteNode, [CUSTOM_SIMPLE_NODE]: CustomSimpleNode, + [CUSTOM_SUB_GRAPH_START_NODE]: CustomSubGraphStartNode, [CUSTOM_ITERATION_START_NODE]: CustomIterationStartNode, [CUSTOM_LOOP_START_NODE]: CustomLoopStartNode, [CUSTOM_DATA_SOURCE_EMPTY_NODE]: CustomDataSourceEmptyNode, + [CUSTOM_GROUP_NODE]: CustomGroupNode, + [CUSTOM_GROUP_INPUT_NODE]: CustomGroupInputNode, + [CUSTOM_GROUP_EXIT_PORT_NODE]: CustomGroupExitPortNode, } const edgeTypes = { [CUSTOM_EDGE]: CustomEdge, @@ -123,6 +139,9 @@ export type WorkflowProps = { viewport?: Viewport children?: React.ReactNode onWorkflowDataUpdate?: (v: any) => void + allowSelectionWhenReadOnly?: boolean + canvasReadOnly?: boolean + interactionMode?: 'default' | 'subgraph' } export const Workflow: FC = memo(({ nodes: originalNodes, @@ -130,6 +149,9 @@ export const Workflow: FC = memo(({ viewport, children, onWorkflowDataUpdate, + allowSelectionWhenReadOnly = false, + canvasReadOnly = false, + interactionMode = 'default', }) => { const workflowContainerRef = useRef(null) const workflowStore = useWorkflowStore() @@ -182,9 +204,10 @@ export const Workflow: FC = memo(({ id: node.id, data: node.data, })) - if (!isEqual(oldData, nodesData)) + if (!isEqual(oldData, nodesData)) { setNodesInStore(nodes) - }, [setNodesInStore, workflowStore]) + } + }, [setNodesInStore]) useEffect(() => { setNodesOnlyChangeWithData(currentNodes as Node[]) }, [currentNodes, setNodesOnlyChangeWithData]) @@ -316,7 +339,8 @@ export const Workflow: FC = memo(({ }, }) - useShortcuts() + const isSubGraph = interactionMode === 'subgraph' + useShortcuts(!isSubGraph) // Initialize workflow node search functionality useWorkflowSearch() @@ -334,6 +358,7 @@ export const Workflow: FC = memo(({ const dataSourceList = useStore(s => s.dataSourceList) // buildInTools, customTools, workflowTools, mcpTools, dataSourceList const configsMap = useHooksStore(s => s.configsMap) + const subGraphSelectableNodeTypes = useHooksStore(s => s.subGraphSelectableNodeTypes) const [isLoadedVars, setIsLoadedVars] = useState(false) const [vars, setVars] = useState([]) useEffect(() => { @@ -370,6 +395,21 @@ export const Workflow: FC = memo(({ } } + const handleNodeClickInMode = useCallback( + (event, node) => { + if (isSubGraph) { + const allowTypes = subGraphSelectableNodeTypes?.length + ? subGraphSelectableNodeTypes + : [BlockEnum.LLM] + if (!allowTypes.includes(node.data.type)) + return + } + + handleNodeClick(event, node) + }, + [handleNodeClick, isSubGraph, subGraphSelectableNodeTypes], + ) + return (
= memo(({ ref={workflowContainerRef} > - + {!isSubGraph && }
- + {!isSubGraph && }
- - - - + {!isSubGraph && } + {!isSubGraph && } + {!isSubGraph && } + {!isSubGraph && } { !!showConfirm && ( = memo(({ onNodeDragStop={handleNodeDragStop} onNodeMouseEnter={handleNodeEnter} onNodeMouseLeave={handleNodeLeave} - onNodeClick={handleNodeClick} - onNodeContextMenu={handleNodeContextMenu} - onConnect={handleNodeConnect} - onConnectStart={handleNodeConnectStart} - onConnectEnd={handleNodeConnectEnd} + onNodeClick={handleNodeClickInMode} + onNodeContextMenu={isSubGraph ? undefined : handleNodeContextMenu} + onConnect={isSubGraph ? undefined : handleNodeConnect} + onConnectStart={isSubGraph ? undefined : handleNodeConnectStart} + onConnectEnd={isSubGraph ? undefined : handleNodeConnectEnd} onEdgeMouseEnter={handleEdgeEnter} onEdgeMouseLeave={handleEdgeLeave} onEdgesChange={handleEdgesChange} - onSelectionStart={handleSelectionStart} - onSelectionChange={handleSelectionChange} - onSelectionDrag={handleSelectionDrag} - onPaneContextMenu={handlePaneContextMenu} - onSelectionContextMenu={handleSelectionContextMenu} + onSelectionStart={isSubGraph ? undefined : handleSelectionStart} + onSelectionChange={isSubGraph ? undefined : handleSelectionChange} + onSelectionDrag={isSubGraph ? undefined : handleSelectionDrag} + onPaneContextMenu={isSubGraph ? undefined : handlePaneContextMenu} + onSelectionContextMenu={isSubGraph ? undefined : handleSelectionContextMenu} connectionLineComponent={CustomConnectionLine} // NOTE: For LOOP node, how to distinguish between ITERATION and LOOP here? Maybe both are the same? connectionLineContainerStyle={{ zIndex: ITERATION_CHILDREN_Z_INDEX }} defaultViewport={viewport} multiSelectionKeyCode={null} deleteKeyCode={null} - nodesDraggable={!nodesReadOnly} - nodesConnectable={!nodesReadOnly} - nodesFocusable={!nodesReadOnly} - edgesFocusable={!nodesReadOnly} - panOnScroll={controlMode === ControlMode.Pointer && !workflowReadOnly} - panOnDrag={controlMode === ControlMode.Hand || [1]} - zoomOnPinch={true} - zoomOnScroll={true} - zoomOnDoubleClick={true} + nodesDraggable={!(nodesReadOnly || canvasReadOnly || isSubGraph)} + nodesConnectable={!(nodesReadOnly || canvasReadOnly || isSubGraph)} + nodesFocusable={allowSelectionWhenReadOnly ? true : !nodesReadOnly} + edgesFocusable={isSubGraph ? false : (allowSelectionWhenReadOnly ? true : !nodesReadOnly)} + panOnScroll={!isSubGraph && controlMode === ControlMode.Pointer && !workflowReadOnly} + panOnDrag={!isSubGraph && (controlMode === ControlMode.Hand || [1])} + selectionOnDrag={!isSubGraph && controlMode === ControlMode.Pointer && !workflowReadOnly && !canvasReadOnly} + zoomOnPinch={!isSubGraph} + zoomOnScroll={!isSubGraph} + zoomOnDoubleClick={!isSubGraph} isValidConnection={isValidConnection} selectionKeyCode={null} selectionMode={SelectionMode.Partial} - selectionOnDrag={controlMode === ControlMode.Pointer && !workflowReadOnly} minZoom={0.25} > void + isHighlighted?: boolean + onSetHighlight?: () => void + registerRef?: (element: HTMLButtonElement | null) => void +} + +const Item: FC = ({ node, onSelect, isHighlighted, onSetHighlight, registerRef }) => { + const [isHovering, setIsHovering] = useState(false) + + return ( + + ) +} + +type Props = { + nodes: AgentNode[] + onSelect: (node: AgentNode) => void + onClose?: () => void + onBlur?: () => void + hideSearch?: boolean + searchBoxClassName?: string + maxHeightClass?: string + autoFocus?: boolean + externalSearchText?: string + enableKeyboardNavigation?: boolean +} + +const AgentNodeList: FC = ({ + nodes, + onSelect, + onClose, + onBlur, + hideSearch, + searchBoxClassName, + maxHeightClass, + autoFocus = true, + externalSearchText, + enableKeyboardNavigation = false, +}) => { + const { t } = useTranslation() + const [searchText, setSearchText] = useState('') + const normalizedSearchText = externalSearchText === undefined ? searchText : externalSearchText + const normalizedSearchTextTrimmed = normalizedSearchText.trim() + const normalizedSearchTextLower = normalizedSearchTextTrimmed.toLowerCase() + const shouldShowSearchInput = !hideSearch && externalSearchText === undefined + + const handleKeyDown = (e: React.KeyboardEvent) => { + if (e.key === 'Escape') { + e.preventDefault() + onClose?.() + } + } + + const filteredNodes = useMemo(() => nodes.filter((node) => { + if (!normalizedSearchTextTrimmed) + return true + return node.title.toLowerCase().includes(normalizedSearchTextLower) + }), [nodes, normalizedSearchTextLower, normalizedSearchTextTrimmed]) + + const [activeIndex, setActiveIndex] = useState(-1) + const itemRefs = useRef>([]) + const lastInteractionRef = useRef<'keyboard' | 'mouse' | 'filter' | null>(null) + const filteredNodesRef = useRef(filteredNodes) + const activeIndexRef = useRef(activeIndex) + const onCloseRef = useRef(onClose) + const resolvedActiveIndex = useMemo(() => { + if (!enableKeyboardNavigation || filteredNodes.length === 0) + return -1 + if (activeIndex < 0 || activeIndex >= filteredNodes.length) + return 0 + return activeIndex + }, [activeIndex, enableKeyboardNavigation, filteredNodes.length]) + + useEffect(() => { + itemRefs.current = [] + }, [filteredNodes.length]) + + useEffect(() => { + filteredNodesRef.current = filteredNodes + }, [filteredNodes]) + + useEffect(() => { + activeIndexRef.current = resolvedActiveIndex + }, [resolvedActiveIndex]) + + useEffect(() => { + onCloseRef.current = onClose + }, [onClose]) + + const handleHighlightIndex = useCallback((index: number, source: 'keyboard' | 'mouse' | 'filter') => { + lastInteractionRef.current = source + setActiveIndex(index) + }, []) + + useEffect(() => { + if (!enableKeyboardNavigation || filteredNodes.length === 0) { + lastInteractionRef.current = 'filter' + return + } + if (activeIndex < 0 || activeIndex >= filteredNodes.length) + lastInteractionRef.current = 'filter' + }, [activeIndex, enableKeyboardNavigation, filteredNodes.length]) + + useEffect(() => { + if (!enableKeyboardNavigation || resolvedActiveIndex < 0) + return + if (lastInteractionRef.current !== 'keyboard') + return + const target = itemRefs.current[resolvedActiveIndex] + if (target) + target.scrollIntoView({ block: 'nearest' }) + lastInteractionRef.current = null + }, [enableKeyboardNavigation, filteredNodes.length, resolvedActiveIndex]) + + const handleSelectItem = useCallback((node: AgentNode) => { + onSelect(node) + }, [onSelect]) + + useEffect(() => { + if (!enableKeyboardNavigation) + return + const handleKeyDown = (event: KeyboardEvent) => { + const nodes = filteredNodesRef.current + if (nodes.length === 0) + return + if (!['ArrowDown', 'ArrowUp', 'Enter', 'Escape'].includes(event.key)) + return + event.preventDefault() + event.stopPropagation() + if (event.key === 'Escape') { + onCloseRef.current?.() + return + } + if (event.key === 'Enter') { + const index = activeIndexRef.current + if (index < 0 || index >= nodes.length) + return + handleSelectItem(nodes[index]) + return + } + const delta = event.key === 'ArrowDown' ? 1 : -1 + const baseIndex = activeIndexRef.current < 0 ? 0 : activeIndexRef.current + const nextIndex = Math.min(Math.max(baseIndex + delta, 0), nodes.length - 1) + handleHighlightIndex(nextIndex, 'keyboard') + } + document.addEventListener('keydown', handleKeyDown, true) + return () => { + document.removeEventListener('keydown', handleKeyDown, true) + } + }, [enableKeyboardNavigation, handleHighlightIndex, handleSelectItem]) + + return ( + <> + {shouldShowSearchInput && ( + <> +
+ setSearchText(e.target.value)} + onClick={e => e.stopPropagation()} + onKeyDown={handleKeyDown} + onClear={() => setSearchText('')} + onBlur={onBlur} + autoFocus={autoFocus} + /> +
+
+ + )} + + {filteredNodes.length > 0 + ? ( +
+ {filteredNodes.map((node, index) => ( + handleHighlightIndex(index, 'mouse') : undefined} + registerRef={enableKeyboardNavigation + ? (element) => { + itemRefs.current[index] = element + } + : undefined} + /> + ))} +
+ ) + : ( +
+ {t('common.noAgentNodes', { ns: 'workflow' })} +
+ )} + + ) +} + +export default React.memo(AgentNodeList) diff --git a/web/app/components/workflow/nodes/_base/components/form-input-item.tsx b/web/app/components/workflow/nodes/_base/components/form-input-item.tsx index 2dfdc32ad3..7b3a3a108f 100644 --- a/web/app/components/workflow/nodes/_base/components/form-input-item.tsx +++ b/web/app/components/workflow/nodes/_base/components/form-input-item.tsx @@ -1,6 +1,6 @@ 'use client' import type { FC } from 'react' -import type { ResourceVarInputs } from '../types' +import type { MentionConfig, ResourceVarInputs } from '../types' import type { CredentialFormSchema, FormOption } from '@/app/components/header/account-setting/model-provider-page/declarations' import type { Event, Tool } from '@/app/components/tools/types' import type { TriggerWithProvider } from '@/app/components/workflow/block-selector/types' @@ -313,13 +313,33 @@ const FormInputItem: FC = ({ } } - const handleValueChange = (newValue: any) => { + const handleValueChange = (newValue: any, newType?: VarKindType, mentionConfig?: MentionConfig | null) => { + const normalizedValue = isNumber ? Number.parseFloat(newValue) : newValue + const assemblePlaceholder = nodeId && variable + ? `{{#${nodeId}_ext_${variable}.result#}}` + : '' + const isAssembleValue = typeof normalizedValue === 'string' + && assemblePlaceholder + && normalizedValue.includes(assemblePlaceholder) + const resolvedType = isAssembleValue + ? VarKindType.mixed + : newType ?? (varInput?.type === VarKindType.mention ? VarKindType.mention : getVarKindType()) + const resolvedMentionConfig = resolvedType === VarKindType.mention + ? (mentionConfig ?? varInput?.mention_config ?? { + extractor_node_id: '', + output_selector: [], + null_strategy: 'use_default', + default_value: '', + }) + : undefined + onChange({ ...value, [variable]: { ...varInput, - type: getVarKindType(), - value: isNumber ? Number.parseFloat(newValue) : newValue, + type: resolvedType, + value: normalizedValue, + mention_config: resolvedMentionConfig, }, }) } @@ -435,6 +455,8 @@ const FormInputItem: FC = ({ currentTool={currentTool} currentProvider={currentProvider} isFilterFileVar={isBoolean} + toolNodeId={nodeId} + paramKey={variable} /> )} {isNumber && isConstant && ( @@ -481,13 +503,13 @@ const FormInputItem: FC = ({ placeholder={placeholder?.[language] || placeholder?.en_US} renderOption={options.some((opt: any) => opt.icon) ? ({ item }) => ( -
- {item.icon && ( - - )} - {item.name} -
- ) +
+ {item.icon && ( + + )} + {item.name} +
+ ) : undefined} /> )} @@ -592,14 +614,14 @@ const FormInputItem: FC = ({ {isLoadingOptions ? ( - - ) + + ) : ( - diff --git a/web/app/components/workflow/nodes/_base/components/node-control.tsx b/web/app/components/workflow/nodes/_base/components/node-control.tsx index 0c705191d5..610ec6d2a9 100644 --- a/web/app/components/workflow/nodes/_base/components/node-control.tsx +++ b/web/app/components/workflow/nodes/_base/components/node-control.tsx @@ -13,6 +13,7 @@ import { Stop, } from '@/app/components/base/icons/src/vender/line/mediaAndDevices' import Tooltip from '@/app/components/base/tooltip' +import { useHooksStore } from '@/app/components/workflow/hooks-store' import { useWorkflowStore } from '@/app/components/workflow/store' import { useNodesInteractions, @@ -30,12 +31,18 @@ const NodeControl: FC = ({ const [open, setOpen] = useState(false) const { handleNodeSelect } = useNodesInteractions() const workflowStore = useWorkflowStore() + const interactionMode = useHooksStore(s => s.interactionMode) const isSingleRunning = data._singleRunningStatus === NodeRunningStatus.Running const handleOpenChange = useCallback((newOpen: boolean) => { setOpen(newOpen) }, []) const isChildNode = !!(data.isInIteration || data.isInLoop) + const allowNodeMenu = interactionMode !== 'subgraph' + const canSingleRun = canRunBySingle(data.type, isChildNode) + + if (!allowNodeMenu && !canSingleRun) + return null return (
= ({ onClick={e => e.stopPropagation()} > { - canRunBySingle(data.type, isChildNode) && ( + canSingleRun && (
{ @@ -80,13 +87,15 @@ const NodeControl: FC = ({
) } - + {allowNodeMenu && ( + + )}
) diff --git a/web/app/components/workflow/nodes/_base/components/node-handle.tsx b/web/app/components/workflow/nodes/_base/components/node-handle.tsx index 1bd8ea84e8..30e23e4f6f 100644 --- a/web/app/components/workflow/nodes/_base/components/node-handle.tsx +++ b/web/app/components/workflow/nodes/_base/components/node-handle.tsx @@ -12,6 +12,7 @@ import { Handle, Position, } from 'reactflow' +import { useHooksStore } from '@/app/components/workflow/hooks-store' import { cn } from '@/utils/classnames' import BlockSelector from '../../../block-selector' import { @@ -46,6 +47,8 @@ export const NodeTargetHandle = memo(({ const [open, setOpen] = useState(false) const { handleNodeAdd } = useNodesInteractions() const { getNodesReadOnly } = useNodesReadOnly() + const interactionMode = useHooksStore(s => s.interactionMode) + const allowGraphActions = interactionMode !== 'subgraph' const connected = data._connectedTargetHandleIds?.includes(handleId) const { availablePrevBlocks } = useAvailableBlocks(data.type, data.isInIteration || data.isInLoop) const isConnectable = !!availablePrevBlocks.length @@ -55,9 +58,9 @@ export const NodeTargetHandle = memo(({ }, []) const handleHandleClick = useCallback((e: MouseEvent) => { e.stopPropagation() - if (!connected) + if (!connected && allowGraphActions) setOpen(v => !v) - }, [connected]) + }, [allowGraphActions, connected]) const handleSelect = useCallback((type: BlockEnum, pluginDefaultValue?: PluginDefaultValue) => { handleNodeAdd( { @@ -91,11 +94,11 @@ export const NodeTargetHandle = memo(({ || data.type === BlockEnum.TriggerPlugin) && 'opacity-0', handleClassName, )} - isConnectable={isConnectable} - onClick={handleHandleClick} + isConnectable={allowGraphActions && isConnectable} + onClick={allowGraphActions ? handleHandleClick : undefined} > { - !connected && isConnectable && !getNodesReadOnly() && ( + allowGraphActions && !connected && isConnectable && !getNodesReadOnly() && ( s.interactionMode) + const allowGraphActions = interactionMode !== 'subgraph' const { availableNextBlocks } = useAvailableBlocks(data.type, data.isInIteration || data.isInLoop) const isConnectable = !!availableNextBlocks.length const isChatMode = useIsChatMode() @@ -145,8 +150,9 @@ export const NodeSourceHandle = memo(({ }, []) const handleHandleClick = useCallback((e: MouseEvent) => { e.stopPropagation() - setOpen(v => !v) - }, []) + if (allowGraphActions) + setOpen(v => !v) + }, [allowGraphActions]) const handleSelect = useCallback((type: BlockEnum, pluginDefaultValue?: PluginDefaultValue) => { handleNodeAdd( { @@ -161,7 +167,7 @@ export const NodeSourceHandle = memo(({ }, [handleNodeAdd, id, handleId]) useEffect(() => { - if (!shouldAutoOpenStartNodeSelector) + if (!shouldAutoOpenStartNodeSelector || !allowGraphActions) return if (isChatMode) { @@ -198,8 +204,8 @@ export const NodeSourceHandle = memo(({ !connected && 'after:opacity-0', handleClassName, )} - isConnectable={isConnectable} - onClick={handleHandleClick} + isConnectable={allowGraphActions && isConnectable} + onClick={allowGraphActions ? handleHandleClick : undefined} >
@@ -214,7 +220,7 @@ export const NodeSourceHandle = memo(({
{ - isConnectable && !getNodesReadOnly() && ( + allowGraphActions && isConnectable && !getNodesReadOnly() && ( edge.target === id) const nodeMetaData = useNodeMetaData({ id, data } as Node) - const showChangeBlock = !nodeMetaData.isTypeFixed && !nodesReadOnly + const showChangeBlock = !nodeMetaData.isTypeFixed && !nodesReadOnly && data.type !== BlockEnum.Group const isChildNode = !!(data.isInIteration || data.isInLoop) const { data: workflowTools } = useAllWorkflowTools() @@ -61,6 +62,25 @@ const PanelOperatorPopup = ({ return (
+ { + !nodesReadOnly && data.type === BlockEnum.Group && ( + <> +
+
{ + onClosePopup() + handleUngroup(id) + }} + > + {t('panel.ungroup', { ns: 'workflow' })} + +
+
+
+ + ) + } { (showChangeBlock || canRunBySingle(data.type, isChildNode)) && ( <> diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx index 6dfcbaf4d8..bca8b79c14 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-picker.tsx @@ -84,6 +84,7 @@ type Props = { currentTool?: Tool currentProvider?: ToolWithProvider | TriggerWithProvider preferSchemaType?: boolean + hideSearch?: boolean } const DEFAULT_VALUE_SELECTOR: Props['value'] = [] @@ -117,6 +118,7 @@ const VarReferencePicker: FC = ({ currentTool, currentProvider, preferSchemaType, + hideSearch, }) => { const { t } = useTranslation() const store = useStoreApi() @@ -636,6 +638,7 @@ const VarReferencePicker: FC = ({ isSupportFileVar={isSupportFileVar} zIndex={zIndex} preferSchemaType={preferSchemaType} + hideSearch={hideSearch} /> )} diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx index 6184bcad9f..561016132b 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-popup.tsx @@ -15,6 +15,7 @@ type Props = { onChange: (value: ValueSelector, varDetail: Var) => void itemWidth?: number isSupportFileVar?: boolean + hideSearch?: boolean zIndex?: number preferSchemaType?: boolean } @@ -24,6 +25,7 @@ const VarReferencePopup: FC = ({ onChange, itemWidth, isSupportFileVar = true, + hideSearch, zIndex, preferSchemaType, }) => { @@ -35,7 +37,7 @@ const VarReferencePopup: FC = ({ // max-h-[300px] overflow-y-auto todo: use portal to handle long list return (
= ({ showManageInputField={showManageRagInputFields} onManageInputField={() => setShowInputFieldPanel?.(true)} preferSchemaType={preferSchemaType} + hideSearch={hideSearch} /> )}
diff --git a/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx b/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx index d44f560e08..176ff0a760 100644 --- a/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx +++ b/web/app/components/workflow/nodes/_base/components/variable/var-reference-vars.tsx @@ -6,10 +6,10 @@ import type { NodeOutPutVar, ValueSelector, Var } from '@/app/components/workflo import { useHover } from 'ahooks' import { noop } from 'es-toolkit/function' import * as React from 'react' -import { useEffect, useMemo, useRef, useState } from 'react' +import { useCallback, useEffect, useMemo, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { ChevronRight } from '@/app/components/base/icons/src/vender/line/arrows' -import { CodeAssistant, MagicEdit } from '@/app/components/base/icons/src/vender/line/general' +import { AssembleVariables, CodeAssistant, MagicEdit } from '@/app/components/base/icons/src/vender/line/general' import { Variable02 } from '@/app/components/base/icons/src/vender/solid/development' import Input from '@/app/components/base/input' import { @@ -43,6 +43,31 @@ type ItemProps = { zIndex?: number className?: string preferSchemaType?: boolean + isHighlighted?: boolean + onSetHighlight?: () => void + registerRef?: (element: HTMLDivElement | null) => void +} + +const buildValueSelector = ({ + nodeId, + objPath, + itemData, + isFlat, +}: { + nodeId: string + objPath: string[] + itemData: Var + isFlat?: boolean +}): ValueSelector => { + if (isFlat) + return [itemData.variable] + const isSys = itemData.variable.startsWith('sys.') + const isEnv = itemData.variable.startsWith('env.') + const isChatVar = itemData.variable.startsWith('conversation.') + const isRagVariable = itemData.isRagVariable + if (isSys || isEnv || isChatVar || isRagVariable) + return [...objPath, ...itemData.variable.split('.')] + return [nodeId, ...objPath, itemData.variable] } const Item: FC = ({ @@ -60,6 +85,9 @@ const Item: FC = ({ zIndex, className, preferSchemaType, + isHighlighted, + onSetHighlight, + registerRef, }) => { const isStructureOutput = itemData.type === VarType.object && (itemData.children as StructuredOutput)?.schema?.properties const isFile = itemData.type === VarType.file && !isStructureOutput @@ -123,6 +151,10 @@ const Item: FC = ({ })() const itemRef = useRef(null) + const setItemRef = useCallback((element: HTMLDivElement | null) => { + itemRef.current = element + registerRef?.(element) + }, [registerRef]) const [isItemHovering, setIsItemHovering] = useState(false) useHover(itemRef, { onChange: (hovering) => { @@ -152,15 +184,12 @@ const Item: FC = ({ if (!isSupportFileVar && isFile) return - if (isFlat) { - onChange([itemData.variable], itemData) - } - else if (isSys || isEnv || isChatVar || isRagVariable) { // system variable | environment variable | conversation variable - onChange([...objPath, ...itemData.variable.split('.')], itemData) - } - else { - onChange([nodeId, ...objPath, itemData.variable], itemData) - } + onChange(buildValueSelector({ + nodeId, + objPath, + itemData, + isFlat, + }), itemData) } const variableCategory = useMemo(() => { if (isEnv) @@ -181,14 +210,15 @@ const Item: FC = ({ >
e.preventDefault()} >
@@ -255,8 +285,12 @@ type Props = { isInCodeGeneratorInstructionEditor?: boolean showManageInputField?: boolean onManageInputField?: () => void + showAssembleVariables?: boolean + onAssembleVariables?: () => ValueSelector | null autoFocus?: boolean preferSchemaType?: boolean + externalSearchText?: string + enableKeyboardNavigation?: boolean } const VarReferenceVars: FC = ({ hideSearch, @@ -272,11 +306,19 @@ const VarReferenceVars: FC = ({ isInCodeGeneratorInstructionEditor, showManageInputField, onManageInputField, + showAssembleVariables, + onAssembleVariables, autoFocus = true, preferSchemaType, + externalSearchText, + enableKeyboardNavigation = false, }) => { const { t } = useTranslation() const [searchText, setSearchText] = useState('') + const normalizedSearchText = externalSearchText === undefined ? searchText : externalSearchText + const normalizedSearchTextTrimmed = normalizedSearchText.trim() + const normalizedSearchTextLower = normalizedSearchTextTrimmed.toLowerCase() + const shouldShowSearchInput = !hideSearch && externalSearchText === undefined const handleKeyDown = (e: React.KeyboardEvent) => { if (e.key === 'Escape') { @@ -285,35 +327,166 @@ const VarReferenceVars: FC = ({ } } - const filteredVars = vars.filter((v) => { - const children = v.vars.filter(v => checkKeys([v.variable], false).isValid || isSpecialVar(v.variable.split('.')[0])) - return children.length > 0 - }).filter((node) => { - if (!searchText) - return node - const children = node.vars.filter((v) => { - const searchTextLower = searchText.toLowerCase() - return v.variable.toLowerCase().includes(searchTextLower) || node.title.toLowerCase().includes(searchTextLower) - }) - return children.length > 0 - }).map((node) => { - let vars = node.vars.filter(v => checkKeys([v.variable], false).isValid || isSpecialVar(v.variable.split('.')[0])) - if (searchText) { - const searchTextLower = searchText.toLowerCase() - if (!node.title.toLowerCase().includes(searchTextLower)) - vars = vars.filter(v => v.variable.toLowerCase().includes(searchText.toLowerCase())) - } + const handleAssembleVariables = (e: React.MouseEvent) => { + e.preventDefault() + e.stopPropagation() + onAssembleVariables?.() + onClose?.() + } - return { - ...node, - vars, + const validatedVars = useMemo(() => { + const res: NodeOutPutVar[] = [] + vars.forEach((node) => { + const nodeVars = node.vars.filter(v => checkKeys([v.variable], false).isValid || isSpecialVar(v.variable.split('.')[0])) + if (nodeVars.length === 0) + return + res.push({ + ...node, + vars: nodeVars, + }) + }) + return res + }, [vars]) + + const filteredVars = useMemo(() => { + if (!normalizedSearchTextTrimmed) + return validatedVars + const res: NodeOutPutVar[] = [] + validatedVars.forEach((node) => { + const titleLower = node.title.toLowerCase() + const matchedByTitle = titleLower.includes(normalizedSearchTextLower) + const nodeVars = matchedByTitle + ? node.vars + : node.vars.filter(v => v.variable.toLowerCase().includes(normalizedSearchTextLower)) + if (nodeVars.length === 0) + return + res.push({ + ...node, + vars: nodeVars, + }) + }) + return res + }, [normalizedSearchTextLower, normalizedSearchTextTrimmed, validatedVars]) + + const flatItems = useMemo(() => { + const items: Array<{ node: NodeOutPutVar, itemData: Var }> = [] + filteredVars.forEach((node) => { + node.vars.forEach((itemData) => { + items.push({ node, itemData }) + }) + }) + return items + }, [filteredVars]) + const [activeIndex, setActiveIndex] = useState(-1) + const itemRefs = useRef>([]) + const lastInteractionRef = useRef<'keyboard' | 'mouse' | 'filter' | null>(null) + const flatItemsRef = useRef(flatItems) + const activeIndexRef = useRef(activeIndex) + const onCloseRef = useRef(onClose) + const resolvedActiveIndex = useMemo(() => { + if (!enableKeyboardNavigation || flatItems.length === 0) + return -1 + if (activeIndex < 0 || activeIndex >= flatItems.length) + return 0 + return activeIndex + }, [activeIndex, enableKeyboardNavigation, flatItems.length]) + + useEffect(() => { + itemRefs.current = [] + }, [flatItems.length]) + + useEffect(() => { + flatItemsRef.current = flatItems + }, [flatItems]) + + useEffect(() => { + activeIndexRef.current = resolvedActiveIndex + }, [resolvedActiveIndex]) + + useEffect(() => { + onCloseRef.current = onClose + }, [onClose]) + + const handleHighlightIndex = useCallback((index: number, source: 'keyboard' | 'mouse' | 'filter') => { + lastInteractionRef.current = source + setActiveIndex(index) + }, []) + + useEffect(() => { + if (!enableKeyboardNavigation || flatItems.length === 0) { + lastInteractionRef.current = 'filter' + return } - }) + if (activeIndex < 0 || activeIndex >= flatItems.length) + lastInteractionRef.current = 'filter' + }, [activeIndex, enableKeyboardNavigation, flatItems.length]) + + useEffect(() => { + if (!enableKeyboardNavigation || resolvedActiveIndex < 0) + return + if (lastInteractionRef.current !== 'keyboard') + return + const target = itemRefs.current[resolvedActiveIndex] + if (target) + target.scrollIntoView({ block: 'nearest' }) + lastInteractionRef.current = null + }, [enableKeyboardNavigation, flatItems.length, resolvedActiveIndex]) + + const handleSelectItem = useCallback((item: { node: NodeOutPutVar, itemData: Var }) => { + const isStructureOutput = item.itemData.type === VarType.object + && (item.itemData.children as StructuredOutput | undefined)?.schema?.properties + const isFile = item.itemData.type === VarType.file && !isStructureOutput + if (!isSupportFileVar && isFile) + return + const valueSelector = buildValueSelector({ + nodeId: item.node.nodeId, + objPath: [], + itemData: item.itemData, + isFlat: item.node.isFlat, + }) + onChange(valueSelector, item.itemData) + onClose?.() + }, [onChange, onClose, isSupportFileVar]) + + useEffect(() => { + if (!enableKeyboardNavigation) + return + const handleKeyDown = (event: KeyboardEvent) => { + const items = flatItemsRef.current + if (items.length === 0) + return + if (!['ArrowDown', 'ArrowUp', 'Enter', 'Escape'].includes(event.key)) + return + event.preventDefault() + event.stopPropagation() + if (event.key === 'Escape') { + onCloseRef.current?.() + return + } + if (event.key === 'Enter') { + const index = activeIndexRef.current + if (index < 0 || index >= items.length) + return + handleSelectItem(items[index]) + return + } + const delta = event.key === 'ArrowDown' ? 1 : -1 + const baseIndex = activeIndexRef.current < 0 ? 0 : activeIndexRef.current + const nextIndex = Math.min(Math.max(baseIndex + delta, 0), items.length - 1) + handleHighlightIndex(nextIndex, 'keyboard') + } + document.addEventListener('keydown', handleKeyDown, true) + return () => { + document.removeEventListener('keydown', handleKeyDown, true) + } + }, [enableKeyboardNavigation, handleHighlightIndex, handleSelectItem]) + + let runningIndex = -1 return ( <> { - !hideSearch && ( + shouldShowSearchInput && ( <>
e.stopPropagation()}> = ({ ) } + { + showAssembleVariables && ( +
+ +
+ ) + } {filteredVars.length > 0 ? (
@@ -355,24 +547,35 @@ const VarReferenceVars: FC = ({ {item.title}
)} - {item.vars.map((v, j) => ( - - ))} + {item.vars.map((v, j) => { + runningIndex += 1 + const itemIndex = runningIndex + return ( + handleHighlightIndex(itemIndex, 'mouse') : undefined} + registerRef={enableKeyboardNavigation + ? (element) => { + itemRefs.current[itemIndex] = element + } + : undefined} + /> + ) + })} {item.isFlat && !filteredVars[i + 1]?.isFlat && !!filteredVars.find(item => !item.isFlat) && (
diff --git a/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx b/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx index c834f29ab3..0191c1e144 100644 --- a/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx +++ b/web/app/components/workflow/nodes/_base/components/workflow-panel/index.tsx @@ -231,6 +231,8 @@ const BasePanel: FC = ({ } = useNodesMetaData() const configsMap = useHooksStore(s => s.configsMap) + const interactionMode = useHooksStore(s => s.interactionMode) + const allowGraphActions = interactionMode !== 'subgraph' const { isShowSingleRun, hideSingleRun, @@ -514,9 +516,9 @@ const BasePanel: FC = ({ ) } - - -
+ {allowGraphActions && } + {allowGraphActions && } + {allowGraphActions &&
}
handleNodeSelect(id, true)} @@ -594,7 +596,7 @@ const BasePanel: FC = ({ ) } { - !needsToolAuth && !currentDataSource && !currentTriggerPlugin && ( + !needsToolAuth && !currentDataSource && !currentTriggerPlugin && data.type !== BlockEnum.Group && (
= ({
) } - + {data.type !== BlockEnum.Group && }
- {tabType === TabType.settings && ( + {(tabType === TabType.settings || data.type === BlockEnum.Group) && (
{cloneElement(children as any, { @@ -639,7 +641,7 @@ const BasePanel: FC = ({ ) } { - !!availableNextBlocks.length && ( + allowGraphActions && !!availableNextBlocks.length && (
{t('panel.nextStep', { ns: 'workflow' }).toLocaleUpperCase()} @@ -651,7 +653,7 @@ const BasePanel: FC = ({
) } - {readmeEntranceComponent} + {allowGraphActions ? readmeEntranceComponent : null}
)} diff --git a/web/app/components/workflow/nodes/_base/components/workflow-panel/last-run/use-last-run.ts b/web/app/components/workflow/nodes/_base/components/workflow-panel/last-run/use-last-run.ts index dafac33124..6baa94c94a 100644 --- a/web/app/components/workflow/nodes/_base/components/workflow-panel/last-run/use-last-run.ts +++ b/web/app/components/workflow/nodes/_base/components/workflow-panel/last-run/use-last-run.ts @@ -57,6 +57,7 @@ const singleRunFormParamsHooks: Record = { [BlockEnum.VariableAggregator]: useVariableAggregatorSingleRunFormParams, [BlockEnum.Assigner]: useVariableAssignerSingleRunFormParams, [BlockEnum.KnowledgeBase]: useKnowledgeBaseSingleRunFormParams, + [BlockEnum.Group]: undefined, [BlockEnum.VariableAssigner]: undefined, [BlockEnum.End]: undefined, [BlockEnum.Answer]: undefined, @@ -105,6 +106,7 @@ const getDataForCheckMoreHooks: Record = { [BlockEnum.DataSource]: undefined, [BlockEnum.DataSourceEmpty]: undefined, [BlockEnum.KnowledgeBase]: undefined, + [BlockEnum.Group]: undefined, [BlockEnum.TriggerWebhook]: undefined, [BlockEnum.TriggerSchedule]: undefined, [BlockEnum.TriggerPlugin]: useTriggerPluginGetDataForCheckMore, diff --git a/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts b/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts index f226900899..e687813b69 100644 --- a/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts +++ b/web/app/components/workflow/nodes/_base/hooks/use-available-var-list.ts @@ -71,14 +71,35 @@ const useAvailableVarList = (nodeId: string, { hideEnv, hideChatVar, }), ...dataSourceRagVars] + const availableNodesWithParent = [ + ...availableNodes, + ...(isDataSourceNode ? [currNode] : []), + ] + const llmNodeIds = new Set( + availableNodesWithParent + .filter(node => node?.data.type === BlockEnum.LLM) + .map(node => node!.id), + ) + const filteredAvailableVars = llmNodeIds.size + ? availableVars + .map((nodeVar) => { + if (!llmNodeIds.has(nodeVar.nodeId)) + return nodeVar + const nextVars = nodeVar.vars.filter(item => item.variable !== 'context') + if (nextVars.length === nodeVar.vars.length) + return nodeVar + return { + ...nodeVar, + vars: nextVars, + } + }) + .filter(nodeVar => nodeVar.vars.length > 0) + : availableVars return { - availableVars, + availableVars: filteredAvailableVars, availableNodes, - availableNodesWithParent: [ - ...availableNodes, - ...(isDataSourceNode ? [currNode] : []), - ], + availableNodesWithParent, } } diff --git a/web/app/components/workflow/nodes/_base/node.tsx b/web/app/components/workflow/nodes/_base/node.tsx index d2cc8195e4..c484ff48f2 100644 --- a/web/app/components/workflow/nodes/_base/node.tsx +++ b/web/app/components/workflow/nodes/_base/node.tsx @@ -63,6 +63,11 @@ const BaseNode: FC = ({ const { t } = useTranslation() const nodeRef = useRef(null) const { nodesReadOnly } = useNodesReadOnly() + const { _subGraphEntry, _iconTypeOverride } = data as { + _subGraphEntry?: boolean + _iconTypeOverride?: BlockEnum + } + const iconType = _iconTypeOverride ?? data.type const { handleNodeIterationChildSizeChange } = useNodeIterationInteractions() const { handleNodeLoopChildSizeChange } = useNodeLoopInteractions() @@ -138,6 +143,48 @@ const BaseNode: FC = ({ return null }, [data._loopIndex, data._runningStatus, t]) + if (_subGraphEntry) { + return ( +
+ +
+
+ +
+ {data.title} +
+
+
+
+ ) + } + const nodeContent = (
= ({ ) } { - data.type !== BlockEnum.IfElse && data.type !== BlockEnum.QuestionClassifier && !data._isCandidate && ( + data.type !== BlockEnum.IfElse && data.type !== BlockEnum.QuestionClassifier && data.type !== BlockEnum.Group && !data._isCandidate && ( = ({ > @@ -344,8 +391,9 @@ const BaseNode: FC = ({ const isStartNode = data.type === BlockEnum.Start const isEntryNode = isTriggerNode(data.type as any) || isStartNode + const shouldWrapEntryNode = isEntryNode && !(isStartNode && _subGraphEntry) - return isEntryNode + return shouldWrapEntryNode ? ( // Base resource interface diff --git a/web/app/components/workflow/nodes/code/use-config.ts b/web/app/components/workflow/nodes/code/use-config.ts index fdb1c8ce51..2655dd1d43 100644 --- a/web/app/components/workflow/nodes/code/use-config.ts +++ b/web/app/components/workflow/nodes/code/use-config.ts @@ -56,17 +56,21 @@ const useConfig = (id: string, payload: CodeNodeType) => { setInputs, }) - const [outputKeyOrders, setOutputKeyOrders] = useState([]) + const [outputKeyOrders, setOutputKeyOrders] = useState(() => Object.keys(payload.outputs || {})) const syncOutputKeyOrders = useCallback((outputs: OutputVar) => { setOutputKeyOrders(Object.keys(outputs)) }, []) useEffect(() => { - if (inputs.code) { - if (inputs.outputs && Object.keys(inputs.outputs).length > 0) - syncOutputKeyOrders(inputs.outputs) + const outputKeys = inputs.outputs ? Object.keys(inputs.outputs) : [] + if (outputKeys.length > 0 && outputKeyOrders.length === 0) + syncOutputKeyOrders(inputs.outputs) + const hasExistingConfig = Boolean(inputs.code) + || (inputs.variables?.length ?? 0) > 0 + || outputKeys.length > 0 + + if (hasExistingConfig) return - } const isReady = defaultConfig && Object.keys(defaultConfig).length > 0 if (isReady) { @@ -76,7 +80,7 @@ const useConfig = (id: string, payload: CodeNodeType) => { }) syncOutputKeyOrders(defaultConfig.outputs) } - }, [defaultConfig]) + }, [defaultConfig, inputs.code, inputs.outputs, inputs.variables, outputKeyOrders.length, setInputs, syncOutputKeyOrders]) const handleCodeChange = useCallback((code: string) => { const newInputs = produce(inputs, (draft) => { diff --git a/web/app/components/workflow/nodes/components.ts b/web/app/components/workflow/nodes/components.ts index 5a82a496d9..211ec0bccf 100644 --- a/web/app/components/workflow/nodes/components.ts +++ b/web/app/components/workflow/nodes/components.ts @@ -16,6 +16,8 @@ import DocExtractorNode from './document-extractor/node' import DocExtractorPanel from './document-extractor/panel' import EndNode from './end/node' import EndPanel from './end/panel' +import GroupNode from './group/node' +import GroupPanel from './group/panel' import HttpNode from './http/node' import HttpPanel from './http/panel' import IfElseNode from './if-else/node' @@ -78,6 +80,7 @@ export const NodeComponentMap: Record> = { [BlockEnum.TriggerWebhook]: TriggerWebhookNode, [BlockEnum.TriggerPlugin]: TriggerPluginNode, [BlockEnum.Command]: CommandNode, + [BlockEnum.Group]: GroupNode, } export const PanelComponentMap: Record> = { @@ -107,4 +110,5 @@ export const PanelComponentMap: Record> = { [BlockEnum.TriggerWebhook]: TriggerWebhookPanel, [BlockEnum.TriggerPlugin]: TriggerPluginPanel, [BlockEnum.Command]: CommandPanel, + [BlockEnum.Group]: GroupPanel, } diff --git a/web/app/components/workflow/nodes/group/default.ts b/web/app/components/workflow/nodes/group/default.ts new file mode 100644 index 0000000000..b46d3544b6 --- /dev/null +++ b/web/app/components/workflow/nodes/group/default.ts @@ -0,0 +1,26 @@ +import type { NodeDefault } from '../../types' +import type { GroupNodeData } from './types' +import { BlockEnum } from '@/app/components/workflow/types' +import { genNodeMetaData } from '@/app/components/workflow/utils' + +const metaData = genNodeMetaData({ + sort: 100, + type: BlockEnum.Group, +}) + +const nodeDefault: NodeDefault = { + metaData, + defaultValue: { + members: [], + handlers: [], + headNodeIds: [], + leafNodeIds: [], + }, + checkValid() { + return { + isValid: true, + } + }, +} + +export default nodeDefault diff --git a/web/app/components/workflow/nodes/group/node.tsx b/web/app/components/workflow/nodes/group/node.tsx new file mode 100644 index 0000000000..37cd5e0419 --- /dev/null +++ b/web/app/components/workflow/nodes/group/node.tsx @@ -0,0 +1,94 @@ +import type { GroupHandler, GroupMember, GroupNodeData } from './types' +import type { BlockEnum, NodeProps } from '@/app/components/workflow/types' +import { RiArrowRightSLine } from '@remixicon/react' +import { memo, useMemo } from 'react' +import BlockIcon from '@/app/components/workflow/block-icon' +import { cn } from '@/utils/classnames' +import { NodeSourceHandle } from '../_base/components/node-handle' + +const MAX_MEMBER_ICONS = 12 + +const GroupNode = (props: NodeProps) => { + const { data } = props + + // show the explicitly passed members first; otherwise use the _children information to fill the type + const members: GroupMember[] = useMemo(() => ( + data.members?.length + ? data.members + : data._children?.length + ? data._children.map(child => ({ + id: child.nodeId, + type: child.nodeType as BlockEnum, + label: child.nodeType, + })) + : [] + ), [data._children, data.members]) + + const handlers: GroupHandler[] = useMemo(() => ( + data.handlers?.length + ? data.handlers + : members.length + ? members.map(member => ({ + id: `${member.id}-source`, + label: member.label || member.id, + nodeId: member.id, + sourceHandle: 'source', + })) + : [] + ), [data.handlers, members]) + + return ( +
+ {members.length > 0 && ( +
+
+ {members.slice(0, MAX_MEMBER_ICONS).map(member => ( +
+ +
+ ))} + {members.length > MAX_MEMBER_ICONS && ( +
+ + + {members.length - MAX_MEMBER_ICONS} +
+ )} +
+ +
+ )} + {handlers.length > 0 && ( +
+ {handlers.map(handler => ( +
+ {handler.label || handler.id} + +
+ ))} +
+ )} +
+ ) +} + +GroupNode.displayName = 'GroupNode' + +export default memo(GroupNode) diff --git a/web/app/components/workflow/nodes/group/panel.tsx b/web/app/components/workflow/nodes/group/panel.tsx new file mode 100644 index 0000000000..a36d074e9d --- /dev/null +++ b/web/app/components/workflow/nodes/group/panel.tsx @@ -0,0 +1,9 @@ +import { memo } from 'react' + +const GroupPanel = () => { + return null +} + +GroupPanel.displayName = 'GroupPanel' + +export default memo(GroupPanel) diff --git a/web/app/components/workflow/nodes/group/types.ts b/web/app/components/workflow/nodes/group/types.ts new file mode 100644 index 0000000000..5f16b0e981 --- /dev/null +++ b/web/app/components/workflow/nodes/group/types.ts @@ -0,0 +1,21 @@ +import type { BlockEnum, CommonNodeType } from '../../types' + +export type GroupMember = { + id: string + type: BlockEnum + label?: string +} + +export type GroupHandler = { + id: string + label?: string + nodeId?: string // leaf node id for multi-branch nodes + sourceHandle?: string // original sourceHandle (e.g., case_id for if-else) +} + +export type GroupNodeData = CommonNodeType<{ + members?: GroupMember[] + handlers?: GroupHandler[] + headNodeIds?: string[] // nodes that receive input from outside the group + leafNodeIds?: string[] // nodes that send output to outside the group +}> diff --git a/web/app/components/workflow/nodes/llm/components/config-context-item.tsx b/web/app/components/workflow/nodes/llm/components/config-context-item.tsx new file mode 100644 index 0000000000..6960b27b54 --- /dev/null +++ b/web/app/components/workflow/nodes/llm/components/config-context-item.tsx @@ -0,0 +1,130 @@ +'use client' +import type { FC } from 'react' +import type { PromptMessageContext, ValueSelector } from '../../../types' +import type { Node, NodeOutPutVar, Var } from '@/app/components/workflow/types' +import { RiArrowDownSLine, RiDeleteBinLine } from '@remixicon/react' +import { memo, useCallback, useMemo, useState } from 'react' +import { useTranslation } from 'react-i18next' +import { + PortalToFollowElem, + PortalToFollowElemContent, + PortalToFollowElemTrigger, +} from '@/app/components/base/portal-to-follow-elem' +import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars' +import VariableLabelInSelect from '@/app/components/workflow/nodes/_base/components/variable/variable-label/variable-label-in-select' +import { BlockEnum } from '@/app/components/workflow/types' +import { cn } from '@/utils/classnames' + +type Props = { + readOnly: boolean + payload: PromptMessageContext + contextVars: NodeOutPutVar[] + availableNodes: Node[] + onChange: (value: ValueSelector) => void + onRemove: () => void +} + +const ConfigContextItem: FC = ({ + readOnly, + payload, + contextVars, + availableNodes, + onChange, + onRemove, +}) => { + const { t } = useTranslation() + const [open, setOpen] = useState(false) + + const selectedNodeId = Array.isArray(payload.$context) ? payload.$context[0] : '' + const selectedNode = useMemo(() => { + return availableNodes.find(node => node.id === selectedNodeId) + }, [availableNodes, selectedNodeId]) + const hasOptions = contextVars.length > 0 + + const handleChange = useCallback((value: ValueSelector, _item?: Var) => { + onChange(value) + setOpen(false) + }, [onChange]) + + const handleToggle = useCallback(() => { + if (readOnly) + return + setOpen(prev => !prev) + }, [readOnly]) + + const handleRemove = useCallback(() => { + onRemove() + setOpen(false) + }, [onRemove]) + + return ( + + + + + +
+ {hasOptions + ? ( + setOpen(false)} + onBlur={() => setOpen(false)} + autoFocus={false} + preferSchemaType + /> + ) + : ( +
+ {t('common.noAgentNodes', { ns: 'workflow' })} +
+ )} + {!readOnly && ( +
+ +
+ )} +
+
+
+ ) +} + +export default memo(ConfigContextItem) diff --git a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx index 5b28c9b48f..d88ec95f34 100644 --- a/web/app/components/workflow/nodes/llm/components/config-prompt.tsx +++ b/web/app/components/workflow/nodes/llm/components/config-prompt.tsx @@ -1,31 +1,37 @@ 'use client' import type { FC } from 'react' -import type { ModelConfig, PromptItem, ValueSelector, Var, Variable } from '../../../types' +import type { ModelConfig, Node, NodeOutPutVar, PromptItem, PromptMessageContext, PromptTemplateItem, ValueSelector, Var, Variable } from '../../../types' import { produce } from 'immer' import * as React from 'react' -import { useCallback } from 'react' +import { useCallback, useMemo, useRef, useState } from 'react' import { useTranslation } from 'react-i18next' import { ReactSortable } from 'react-sortablejs' import { v4 as uuid4 } from 'uuid' import { DragHandle } from '@/app/components/base/icons/src/vender/line/others' +import { + PortalToFollowElem, + PortalToFollowElemContent, + PortalToFollowElemTrigger, +} from '@/app/components/base/portal-to-follow-elem' import AddButton from '@/app/components/workflow/nodes/_base/components/add-button' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' +import VarReferenceVars from '@/app/components/workflow/nodes/_base/components/variable/var-reference-vars' import { cn } from '@/utils/classnames' -import { useWorkflowStore } from '../../../store' -import { EditionType, PromptRole } from '../../../types' +import { useStore, useWorkflowStore } from '../../../store' +import { BlockEnum, EditionType, isPromptMessageContext, PromptRole, VarType } from '../../../types' import useAvailableVarList from '../../_base/hooks/use-available-var-list' +import ConfigContextItem from './config-context-item' import ConfigPromptItem from './config-prompt-item' const i18nPrefix = 'nodes.llm' - type Props = { readOnly: boolean nodeId: string filterVar: (payload: Var, selector: ValueSelector) => boolean isChatModel: boolean isChatApp: boolean - payload: PromptItem | PromptItem[] - onChange: (payload: PromptItem | PromptItem[]) => void + payload: PromptItem | PromptTemplateItem[] + onChange: (payload: PromptItem | PromptTemplateItem[]) => void isShowContext: boolean hasSetBlockStatus: { context: boolean @@ -56,6 +62,10 @@ const ConfigPrompt: FC = ({ const { setControlPromptEditorRerenderKey, } = workflowStore.getState() + + const [isContextMenuOpen, setIsContextMenuOpen] = useState(false) + const contextMenuTriggerRef = useRef(null) + const payloadWithIds = (isChatModel && Array.isArray(payload)) ? payload.map((item) => { const id = uuid4() @@ -75,11 +85,59 @@ const ConfigPrompt: FC = ({ onlyLeafNodeVar: false, filterVar, }) + const parentAvailableVars = useStore(state => state.parentAvailableVars) || [] + const parentAvailableNodes = useStore(state => state.parentAvailableNodes) || [] + + const mergedAvailableVars = useMemo(() => { + if (!parentAvailableVars.length) + return availableVars + const merged = new Map() + availableVars.forEach((item) => { + merged.set(item.nodeId, item) + }) + parentAvailableVars.forEach((item) => { + if (!merged.has(item.nodeId)) + merged.set(item.nodeId, item) + }) + return Array.from(merged.values()) + }, [availableVars, parentAvailableVars]) + + const mergedAvailableNodesWithParent = useMemo(() => { + if (!parentAvailableNodes.length) + return availableNodesWithParent + const merged = new Map() + availableNodesWithParent.forEach((node) => { + merged.set(node.id, node) + }) + parentAvailableNodes.forEach((node) => { + if (!merged.has(node.id)) + merged.set(node.id, node) + }) + return Array.from(merged.values()) + }, [availableNodesWithParent, parentAvailableNodes]) + + const contextVarOptions = useMemo(() => { + return mergedAvailableNodesWithParent + .filter(node => node.data.type === BlockEnum.Agent || node.data.type === BlockEnum.LLM) + .map(node => ({ + nodeId: node.id, + title: node.data.title, + vars: [ + { + variable: 'context', + type: VarType.arrayObject, + schemaType: 'List[promptMessage]', + }, + ], + })) + }, [mergedAvailableNodesWithParent]) const handleChatModePromptChange = useCallback((index: number) => { return (prompt: string) => { - const newPrompt = produce(payload as PromptItem[], (draft) => { - draft[index][draft[index].edition_type === EditionType.jinja2 ? 'jinja2_text' : 'text'] = prompt + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { + const item = draft[index] + if (!isPromptMessageContext(item)) + item[item.edition_type === EditionType.jinja2 ? 'jinja2_text' : 'text'] = prompt }) onChange(newPrompt) } @@ -87,8 +145,10 @@ const ConfigPrompt: FC = ({ const handleChatModeEditionTypeChange = useCallback((index: number) => { return (editionType: EditionType) => { - const newPrompt = produce(payload as PromptItem[], (draft) => { - draft[index].edition_type = editionType + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { + const item = draft[index] + if (!isPromptMessageContext(item)) + item.edition_type = editionType }) onChange(newPrompt) } @@ -96,29 +156,80 @@ const ConfigPrompt: FC = ({ const handleChatModeMessageRoleChange = useCallback((index: number) => { return (role: PromptRole) => { - const newPrompt = produce(payload as PromptItem[], (draft) => { - draft[index].role = role + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { + const item = draft[index] + if (!isPromptMessageContext(item)) + item.role = role }) onChange(newPrompt) } }, [onChange, payload]) const handleAddPrompt = useCallback(() => { - const newPrompt = produce(payload as PromptItem[], (draft) => { + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { if (draft.length === 0) { draft.push({ role: PromptRole.system, text: '', id: uuid4() }) - return } - const isLastItemUser = draft[draft.length - 1].role === PromptRole.user + const lastPromptItem = [...draft].reverse().find(item => !isPromptMessageContext(item)) as PromptItem | undefined + const isLastItemUser = lastPromptItem?.role === PromptRole.user draft.push({ role: isLastItemUser ? PromptRole.assistant : PromptRole.user, text: '', id: uuid4() }) }) onChange(newPrompt) }, [onChange, payload]) + const handleAddContext = useCallback((agentNodeId: string) => { + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { + const contextItem: PromptMessageContext = { + id: uuid4(), + $context: [agentNodeId, 'context'], + } + + const lastUserIndex = draft + .map((item, idx) => ({ item, idx })) + .reverse() + .find(({ item }) => !isPromptMessageContext(item) && (item as PromptItem).role === PromptRole.user) + ?.idx + + if (lastUserIndex !== undefined) { + draft.splice(lastUserIndex, 0, contextItem) + return + } + + const promptItems = draft.filter(item => !isPromptMessageContext(item)) as PromptItem[] + const hasOnlySystem = promptItems.length === 1 && promptItems[0].role === PromptRole.system + if (hasOnlySystem) { + draft.push({ role: PromptRole.user, text: '', id: uuid4() }) + draft.splice(draft.length - 1, 0, contextItem) + return + } + + draft.push(contextItem) + }) + onChange(newPrompt) + setIsContextMenuOpen(false) + }, [onChange, payload]) + + const handleAddContextVar = useCallback((value: ValueSelector, _item?: Var) => { + if (!Array.isArray(value) || value.length < 2) + return + handleAddContext(value[0]) + }, [handleAddContext]) + + const handleContextChange = useCallback((index: number) => { + return (value: ValueSelector) => { + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { + const item = draft[index] + if (isPromptMessageContext(item)) + item.$context = value + }) + onChange(newPrompt) + } + }, [onChange, payload]) + const handleRemove = useCallback((index: number) => { return () => { - const newPrompt = produce(payload as PromptItem[], (draft) => { + const newPrompt = produce(payload as PromptTemplateItem[], (draft) => { draft.splice(index, 1) }) onChange(newPrompt) @@ -145,11 +256,12 @@ const ConfigPrompt: FC = ({ }, [onChange, payload]) const canChooseSystemRole = (() => { - if (isChatModel && Array.isArray(payload)) - return !payload.find(item => item.role === PromptRole.system) - + if (isChatModel && Array.isArray(payload)) { + return !payload.find(item => !isPromptMessageContext(item) && (item as PromptItem).role === PromptRole.system) + } return false })() + return (
{(isChatModel && Array.isArray(payload)) @@ -160,9 +272,12 @@ const ConfigPrompt: FC = ({ className="space-y-1" list={payloadWithIds} setList={(list) => { - if ((payload as PromptItem[])?.[0]?.role === PromptRole.system && list[0].p?.role !== PromptRole.system) - return - + const firstItem = (payload as PromptTemplateItem[])?.[0] + if (firstItem && !isPromptMessageContext(firstItem) && firstItem.role === PromptRole.system) { + const newFirstItem = list[0]?.p + if (newFirstItem && !isPromptMessageContext(newFirstItem) && newFirstItem.role !== PromptRole.system) + return + } onChange(list.map(item => item.p)) }} handle=".handle" @@ -170,7 +285,23 @@ const ConfigPrompt: FC = ({ animation={150} > { - (payload as PromptItem[]).map((item, index) => { + (payload as PromptTemplateItem[]).map((item, index) => { + if (isPromptMessageContext(item)) { + return ( +
+ {!readOnly && } + +
+ ) + } + const canDrag = (() => { if (readOnly) return false @@ -182,7 +313,7 @@ const ConfigPrompt: FC = ({ })() return (
- {canDrag && } + {canDrag && } = ({ onRemove={handleRemove(index)} isShowContext={isShowContext} hasSetBlockStatus={hasSetBlockStatus} - availableVars={availableVars} - availableNodes={availableNodesWithParent} + availableVars={mergedAvailableVars} + availableNodes={mergedAvailableNodesWithParent} varList={varList} handleAddVariable={handleAddVariable} modelConfig={modelConfig} @@ -213,11 +344,48 @@ const ConfigPrompt: FC = ({ }
- +
+ + + setIsContextMenuOpen(!isContextMenuOpen)}> +
+ {}} + /> +
+
+ +
+ {contextVarOptions.length > 0 + ? ( + setIsContextMenuOpen(false)} + onBlur={() => setIsContextMenuOpen(false)} + autoFocus={false} + preferSchemaType + /> + ) + : ( +
+ {t('common.noAgentNodes', { ns: 'workflow' })} +
+ )} +
+
+
+
) : ( @@ -232,8 +400,8 @@ const ConfigPrompt: FC = ({ isChatApp={isChatApp} isShowContext={isShowContext} hasSetBlockStatus={hasSetBlockStatus} - nodesOutputVars={availableVars} - availableNodes={availableNodesWithParent} + nodesOutputVars={mergedAvailableVars} + availableNodes={mergedAvailableNodesWithParent} isSupportPromptGenerator isSupportJinja editionType={(payload as PromptItem).edition_type} diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx index d5ccc44e27..2808714bb4 100644 --- a/web/app/components/workflow/nodes/llm/panel.tsx +++ b/web/app/components/workflow/nodes/llm/panel.tsx @@ -5,6 +5,7 @@ import { RiAlertFill, RiQuestionLine } from '@remixicon/react' import * as React from 'react' import { useCallback } from 'react' import { useTranslation } from 'react-i18next' +import Badge from '@/app/components/base/badge' import AddButton2 from '@/app/components/base/button/add-button' import Switch from '@/app/components/base/switch' import Toast from '@/app/components/base/toast' diff --git a/web/app/components/workflow/nodes/llm/types.ts b/web/app/components/workflow/nodes/llm/types.ts index 7e95c05a29..3f11cc16f7 100644 --- a/web/app/components/workflow/nodes/llm/types.ts +++ b/web/app/components/workflow/nodes/llm/types.ts @@ -1,5 +1,5 @@ import type { ToolValue } from '@/app/components/workflow/block-selector/types' -import type { CommonNodeType, Memory, ModelConfig, PromptItem, ValueSelector, Variable, VisionSetting } from '@/app/components/workflow/types' +import type { CommonNodeType, Memory, ModelConfig, PromptItem, PromptTemplateItem, ValueSelector, Variable, VisionSetting } from '@/app/components/workflow/types' export type Tool = { enabled: boolean @@ -15,7 +15,7 @@ export type Tool = { export type LLMNodeType = CommonNodeType & { model: ModelConfig - prompt_template: PromptItem[] | PromptItem + prompt_template: PromptTemplateItem[] | PromptItem prompt_config?: { jinja2_variables?: Variable[] } diff --git a/web/app/components/workflow/nodes/llm/use-config.ts b/web/app/components/workflow/nodes/llm/use-config.ts index e885f108bb..6922a8989f 100644 --- a/web/app/components/workflow/nodes/llm/use-config.ts +++ b/web/app/components/workflow/nodes/llm/use-config.ts @@ -1,4 +1,4 @@ -import type { Memory, PromptItem, ValueSelector, Var, Variable } from '../../types' +import type { Memory, PromptItem, PromptTemplateItem, ValueSelector, Var, Variable } from '../../types' import type { LLMNodeType, StructuredOutput } from './types' import { produce } from 'immer' import { useCallback, useEffect, useRef, useState } from 'react' @@ -249,7 +249,7 @@ const useConfig = (id: string, payload: LLMNodeType) => { setInputs(newInputs) }, [setInputs]) - const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => { + const handlePromptChange = useCallback((newPrompt: PromptTemplateItem[] | PromptItem) => { const newInputs = produce(inputRef.current, (draft) => { draft.prompt_template = newPrompt }) diff --git a/web/app/components/workflow/nodes/sub-graph-start/constants.ts b/web/app/components/workflow/nodes/sub-graph-start/constants.ts new file mode 100644 index 0000000000..4cb8c08038 --- /dev/null +++ b/web/app/components/workflow/nodes/sub-graph-start/constants.ts @@ -0,0 +1 @@ +export const CUSTOM_SUB_GRAPH_START_NODE = 'custom-sub-graph-start' diff --git a/web/app/components/workflow/nodes/sub-graph-start/index.tsx b/web/app/components/workflow/nodes/sub-graph-start/index.tsx new file mode 100644 index 0000000000..b5fff65994 --- /dev/null +++ b/web/app/components/workflow/nodes/sub-graph-start/index.tsx @@ -0,0 +1,60 @@ +import type { NodeProps } from 'reactflow' +import type { CommonNodeType } from '@/app/components/workflow/types' +import { memo } from 'react' +import { useTranslation } from 'react-i18next' +import { AssembleVariablesAlt } from '@/app/components/base/icons/src/vender/line/general' +import { Agent } from '@/app/components/base/icons/src/vender/workflow' +import Tooltip from '@/app/components/base/tooltip' +import { NodeSourceHandle } from '@/app/components/workflow/nodes/_base/components/node-handle' +import { cn } from '@/utils/classnames' + +type SubGraphStartNodeData = CommonNodeType<{ + tooltip?: string + iconType?: string +}> + +type IconComponent = typeof Agent + +const iconMap: Record = { + agent: Agent, + assemble: AssembleVariablesAlt, +} + +const SubGraphStartNode = ({ id, data }: NodeProps) => { + const { t } = useTranslation() + const iconType = data?.iconType || 'agent' + const Icon = iconMap[iconType] || Agent + const rawTitle = data?.title?.trim() || '' + const showTitle = iconType === 'agent' && !!rawTitle + const displayTitle = showTitle && (rawTitle.startsWith('@') ? rawTitle : `@${rawTitle}`) + const tooltip = data?.tooltip + || (iconType === 'assemble' ? t('blocks.start', { ns: 'workflow' }) : (data?.title || t('blocks.start', { ns: 'workflow' }))) + + return ( +
+ +
+ +
+
+ {showTitle && ( + + {displayTitle} + + )} + +
+ ) +} + +export default memo(SubGraphStartNode) diff --git a/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/agent-header-bar.tsx b/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/agent-header-bar.tsx new file mode 100644 index 0000000000..b4445a747d --- /dev/null +++ b/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/agent-header-bar.tsx @@ -0,0 +1,74 @@ +import type { FC } from 'react' +import { RiCloseLine, RiEqualizer2Line } from '@remixicon/react' +import { memo } from 'react' +import { useTranslation } from 'react-i18next' +import { AssembleVariables } from '@/app/components/base/icons/src/vender/line/general' +import AlertTriangle from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback/AlertTriangle' +import { Agent } from '@/app/components/base/icons/src/vender/workflow' +import { cn } from '@/utils/classnames' + +type AgentHeaderBarProps = { + agentName: string + onRemove: () => void + onViewInternals?: () => void + hasWarning?: boolean + showAtPrefix?: boolean +} + +const AgentHeaderBar: FC = ({ + agentName, + onRemove, + onViewInternals, + hasWarning, + showAtPrefix = true, +}) => { + const { t } = useTranslation() + + return ( +
+
+
+
+ {showAtPrefix ? : } +
+ + {showAtPrefix && '@'} + {agentName} + + +
+
+ {onViewInternals && ( + + )} +
+ ) +} + +export default memo(AgentHeaderBar) diff --git a/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/index.tsx b/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/index.tsx index ceef2c3489..a0dbcd1070 100644 --- a/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/index.tsx +++ b/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/index.tsx @@ -1,27 +1,155 @@ +import type { AgentNode, WorkflowVariableBlockType } from '@/app/components/base/prompt-editor/types' +import type { StrategyDetail, StrategyPluginDetail } from '@/app/components/plugins/types' +import type { MentionConfig, VarKindType } from '@/app/components/workflow/nodes/_base/types' +import type { AgentNodeType } from '@/app/components/workflow/nodes/agent/types' +import type { CodeNodeType } from '@/app/components/workflow/nodes/code/types' +import type { LLMNodeType } from '@/app/components/workflow/nodes/llm/types' import type { - Node, + CommonNodeType, NodeOutPutVar, + PromptItem, + PromptTemplateItem, + ValueSelector, + Node as WorkflowNode, } from '@/app/components/workflow/types' import { memo, + useCallback, + useMemo, + useState, } from 'react' import { useTranslation } from 'react-i18next' +import { useNodes, useStoreApi } from 'reactflow' import PromptEditor from '@/app/components/base/prompt-editor' +import { useNodesMetaData, useNodesSyncDraft } from '@/app/components/workflow/hooks' +import { VarKindType as VarKindTypeEnum } from '@/app/components/workflow/nodes/_base/types' +import { Type } from '@/app/components/workflow/nodes/llm/types' import { useStore } from '@/app/components/workflow/store' -import { BlockEnum } from '@/app/components/workflow/types' +import { BlockEnum, EditionType, isPromptMessageContext, PromptRole, VarType } from '@/app/components/workflow/types' +import { generateNewNode, getNodeCustomTypeByNodeDataType, mergeNodeDefaultData } from '@/app/components/workflow/utils' +import { useGetLanguage } from '@/context/i18n' +import { useStrategyProviders } from '@/service/use-strategy' import { cn } from '@/utils/classnames' +import SubGraphModal from '../sub-graph-modal' +import AgentHeaderBar from './agent-header-bar' import Placeholder from './placeholder' +/** + * Matches agent context variable syntax: {{@nodeId.context@}} + * Example: {{@agent-123.context@}} + */ +const AGENT_CONTEXT_VAR_PATTERN = /\{\{@[^.@#]+\.context@\}\}/g +const AGENT_CONTEXT_VAR_PREFIX = '{{@' +const AGENT_CONTEXT_VAR_SUFFIX = '.context@}}' +const getAgentNodeIdFromContextVar = (placeholder: string) => { + if (!placeholder.startsWith(AGENT_CONTEXT_VAR_PREFIX) || !placeholder.endsWith(AGENT_CONTEXT_VAR_SUFFIX)) + return '' + return placeholder.slice(AGENT_CONTEXT_VAR_PREFIX.length, -AGENT_CONTEXT_VAR_SUFFIX.length) +} + +const buildAssemblePlaceholder = (toolNodeId?: string, paramKey?: string) => { + if (!toolNodeId || !paramKey) + return '' + return `{{#${toolNodeId}_ext_${paramKey}.result#}}` +} +const DEFAULT_MENTION_CONFIG: MentionConfig = { + extractor_node_id: '', + output_selector: [], + null_strategy: 'use_default', + default_value: '', +} +type AgentCheckValidContext = { + provider?: StrategyPluginDetail + strategy?: StrategyDetail + language: string + isReadyForCheckValid: boolean +} + +type WorkflowNodesMap = NonNullable + +const resolvePromptText = (item?: PromptItem) => { + if (!item) + return '' + if (item.edition_type === EditionType.jinja2) + return item.jinja2_text || item.text || '' + return item.text || '' +} + +const getUserPromptText = (promptTemplate?: PromptTemplateItem[] | PromptItem) => { + if (!promptTemplate) + return '' + if (Array.isArray(promptTemplate)) { + const userPrompt = promptTemplate.find( + item => !isPromptMessageContext(item) && item.role === PromptRole.user, + ) as PromptItem | undefined + return resolvePromptText(userPrompt) + } + return resolvePromptText(promptTemplate) +} + +const hasUserPromptTemplate = (promptTemplate: PromptTemplateItem[] | PromptItem) => { + if (!Array.isArray(promptTemplate)) + return true + return promptTemplate.some(item => !isPromptMessageContext(item) && item.role === PromptRole.user) +} + +const applyPromptText = (item: PromptItem, text: string) => { + if (item.edition_type === EditionType.jinja2) { + return { + ...item, + text, + jinja2_text: text, + } + } + return { + ...item, + text, + } +} + +const buildPromptTemplateWithText = (promptTemplate: PromptTemplateItem[] | PromptItem, text: string) => { + if (!Array.isArray(promptTemplate)) + return applyPromptText(promptTemplate as PromptItem, text) + + const userIndex = promptTemplate.findIndex( + item => !isPromptMessageContext(item) && item.role === PromptRole.user, + ) + if (userIndex >= 0) { + return promptTemplate.map((item, index) => { + if (index !== userIndex || isPromptMessageContext(item)) + return item + return applyPromptText(item as PromptItem, text) + }) as PromptTemplateItem[] + } + + const useJinja = promptTemplate.some( + item => !isPromptMessageContext(item) && (item as PromptItem).edition_type === EditionType.jinja2, + ) + const defaultUserPrompt: PromptItem = useJinja + ? { + role: PromptRole.user, + text, + jinja2_text: text, + edition_type: EditionType.jinja2, + } + : { role: PromptRole.user, text } + + return [...promptTemplate, defaultUserPrompt] as PromptTemplateItem[] +} + type MixedVariableTextInputProps = { readOnly?: boolean nodesOutputVars?: NodeOutPutVar[] - availableNodes?: Node[] + availableNodes?: WorkflowNode[] value?: string - onChange?: (text: string) => void + onChange?: (text: string, type?: VarKindType, mentionConfig?: MentionConfig | null) => void showManageInputField?: boolean onManageInputField?: () => void disableVariableInsertion?: boolean + toolNodeId?: string + paramKey?: string } + const MixedVariableTextInput = ({ readOnly = false, nodesOutputVars, @@ -31,43 +159,505 @@ const MixedVariableTextInput = ({ showManageInputField, onManageInputField, disableVariableInsertion = false, + toolNodeId, + paramKey = '', }: MixedVariableTextInputProps) => { const { t } = useTranslation() + const language = useGetLanguage() + const { data: strategyProviders } = useStrategyProviders() + const reactFlowStore = useStoreApi() + const nodes = useNodes() const controlPromptEditorRerenderKey = useStore(s => s.controlPromptEditorRerenderKey) + const setControlPromptEditorRerenderKey = useStore(s => s.setControlPromptEditorRerenderKey) + const nodesDefaultConfigs = useStore(s => s.nodesDefaultConfigs) + const { nodesMap: nodesMetaDataMap } = useNodesMetaData() + const { handleSyncWorkflowDraft } = useNodesSyncDraft() + const [isSubGraphModalOpen, setIsSubGraphModalOpen] = useState(false) + + const nodesByIdMap = useMemo(() => { + return availableNodes.reduce((acc, node) => { + acc[node.id] = node + return acc + }, {} as Record) + }, [availableNodes]) + + const assemblePlaceholder = useMemo(() => { + return buildAssemblePlaceholder(toolNodeId, paramKey) + }, [paramKey, toolNodeId]) + + const isAssembleValue = useMemo(() => { + if (!assemblePlaceholder) + return false + return value.includes(assemblePlaceholder) + }, [assemblePlaceholder, value]) + + const contextNodeIds = useMemo(() => { + const ids = new Set() + availableNodes.forEach((node) => { + if (node.data.type === BlockEnum.Agent || node.data.type === BlockEnum.LLM) + ids.add(node.id) + }) + return ids + }, [availableNodes]) + + const nodesById = useMemo(() => { + return nodes.reduce((acc, node) => { + acc[node.id] = node + return acc + }, {} as Record) + }, [nodes]) + + const assembleExtractorNodeId = useMemo(() => { + if (!toolNodeId || !paramKey) + return '' + return `${toolNodeId}_ext_${paramKey}` + }, [paramKey, toolNodeId]) + + const ensureExtractorNode = useCallback((payload: { + extractorNodeId: string + nodeType: BlockEnum + data: Partial + }) => { + if (!toolNodeId) + return null + const metaDefault = nodesMetaDataMap?.[payload.nodeType]?.defaultValue as Partial | undefined + const appDefault = nodesDefaultConfigs?.[payload.nodeType] as Partial | undefined + if (!metaDefault && !appDefault) + return null + + const { getNodes, setNodes } = reactFlowStore.getState() + const currentNodes = getNodes() + const existingNode = currentNodes.find(node => node.id === payload.extractorNodeId) + const shouldReplace = existingNode && existingNode.data.type !== payload.nodeType + if (!existingNode || shouldReplace) { + const nextNodes = shouldReplace + ? currentNodes.filter(node => node.id !== payload.extractorNodeId) + : currentNodes + const mergedData = mergeNodeDefaultData({ + nodeType: payload.nodeType, + metaDefault, + appDefault, + overrideData: payload.data, + }) + const resolvedTitle = mergedData.title ?? metaDefault?.title ?? appDefault?.title ?? '' + const resolvedDesc = mergedData.desc ?? metaDefault?.desc ?? appDefault?.desc ?? '' + const { newNode } = generateNewNode({ + id: payload.extractorNodeId, + type: getNodeCustomTypeByNodeDataType(payload.nodeType), + data: { + ...mergedData, + type: payload.nodeType, + title: resolvedTitle, + desc: resolvedDesc, + parent_node_id: toolNodeId, + }, + position: { + x: 0, + y: 0, + }, + hidden: true, + }) + setNodes([...nextNodes, newNode]) + handleSyncWorkflowDraft() + return newNode + } + + return existingNode + }, [handleSyncWorkflowDraft, nodesDefaultConfigs, nodesMetaDataMap, reactFlowStore, toolNodeId]) + + const ensureAssembleExtractorNode = useCallback(() => { + if (!assembleExtractorNodeId) + return '' + const extractorNode = ensureExtractorNode({ + extractorNodeId: assembleExtractorNodeId, + nodeType: BlockEnum.Code, + data: { + outputs: { + result: { + type: VarType.string, + children: null, + }, + }, + }, + }) + if (!extractorNode) + return '' + if (extractorNode.data.type !== BlockEnum.Code) + return assembleExtractorNodeId + + const outputs = (extractorNode.data as CodeNodeType).outputs || {} + const resultOutput = outputs.result + if (!resultOutput || resultOutput.type !== VarType.string) { + const { getNodes, setNodes } = reactFlowStore.getState() + const currentNodes = getNodes() + const nextOutputs = { + ...outputs, + result: { + type: VarType.string, + children: null, + }, + } + setNodes(currentNodes.map((node) => { + if (node.id !== assembleExtractorNodeId) + return node + return { + ...node, + data: { + ...node.data, + outputs: nextOutputs, + }, + } + })) + handleSyncWorkflowDraft() + } + + return assembleExtractorNodeId + }, [assembleExtractorNodeId, ensureExtractorNode, handleSyncWorkflowDraft, reactFlowStore]) + + type DetectedAgent = { + nodeId: string + name: string + } + + const detectAgentFromText = useCallback((text: string): DetectedAgent | null => { + if (!text) + return null + + const matches = text.matchAll(AGENT_CONTEXT_VAR_PATTERN) + for (const match of matches) { + const nodeId = getAgentNodeIdFromContextVar(match[0]) + if (!nodeId) + continue + const node = nodesByIdMap[nodeId] + if (node && contextNodeIds.has(nodeId)) { + return { + nodeId, + name: node.data.title, + } + } + } + return null + }, [contextNodeIds, nodesByIdMap]) + + const detectedAgentFromValue: DetectedAgent | null = useMemo(() => { + return detectAgentFromText(value) + }, [detectAgentFromText, value]) + + // Check if value only contains agent context variable without other user input + const isOnlyAgentContext = useMemo(() => { + if (!detectedAgentFromValue || !value) + return false + const valueWithoutAgentContext = value.replace(AGENT_CONTEXT_VAR_PATTERN, '').trim() + return valueWithoutAgentContext === '' + }, [detectedAgentFromValue, value]) + + const agentNodes = useMemo(() => { + if (!contextNodeIds.size) + return [] + return availableNodes + .filter(node => contextNodeIds.has(node.id)) + .map(node => ({ + id: node.id, + title: node.data.title, + })) + }, [availableNodes, contextNodeIds]) + + const workflowNodesMap = useMemo(() => { + const acc: WorkflowNodesMap = {} + availableNodes.forEach((node) => { + acc[node.id] = { + title: node.data.title, + type: node.data.type, + height: node.data.height, + width: node.data.width, + position: node.data.position, + } + if (node.data.type === BlockEnum.Start) { + acc.sys = { + title: t('blocks.start', { ns: 'workflow' }), + type: BlockEnum.Start, + height: node.data.height, + width: node.data.width, + position: node.data.position, + } + } + }) + return acc + }, [availableNodes, t]) + + const getNodeWarning = useCallback((node?: WorkflowNode) => { + if (!node) + return true + const validator = nodesMetaDataMap?.[node.data.type as BlockEnum]?.checkValid + if (!validator) + return false + let moreDataForCheckValid: AgentCheckValidContext | undefined + if (node.data.type === BlockEnum.Agent) { + const agentData = node.data as AgentNodeType + const isReadyForCheckValid = !!strategyProviders + const provider = strategyProviders?.find(provider => provider.declaration.identity.name === agentData.agent_strategy_provider_name) + const strategy = provider?.declaration.strategies?.find(s => s.identity.name === agentData.agent_strategy_name) + moreDataForCheckValid = { + provider, + strategy, + language, + isReadyForCheckValid, + } + } + const { errorMessage } = validator(node.data, t, moreDataForCheckValid) + return Boolean(errorMessage) + }, [language, nodesMetaDataMap, strategyProviders, t]) + + const hasAgentWarning = useMemo(() => { + if (!detectedAgentFromValue) + return false + const agentWarning = getNodeWarning(nodesById[detectedAgentFromValue.nodeId]) + if (!toolNodeId || !paramKey) + return agentWarning + const extractorNodeId = `${toolNodeId}_ext_${paramKey}` + const extractorWarning = getNodeWarning(nodesById[extractorNodeId]) + return agentWarning || extractorWarning + }, [detectedAgentFromValue, getNodeWarning, nodesById, paramKey, toolNodeId]) + + const hasAssembleWarning = useMemo(() => { + if (!isAssembleValue || !assembleExtractorNodeId) + return false + return getNodeWarning(nodesById[assembleExtractorNodeId]) + }, [assembleExtractorNodeId, getNodeWarning, isAssembleValue, nodesById]) + + const syncExtractorPromptFromText = useCallback((text: string) => { + if (!toolNodeId || !paramKey) + return + + const detectedAgent = detectAgentFromText(text) + if (!detectedAgent) + return + + const escapedAgentId = detectedAgent.nodeId.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') + const leadingPattern = new RegExp(`^\\{\\{@${escapedAgentId}\\.context@\\}\\}`) + const promptText = text.replace(leadingPattern, '') + + const extractorNodeId = `${toolNodeId}_ext_${paramKey}` + const { getNodes, setNodes } = reactFlowStore.getState() + const nodes = getNodes() + const extractorNode = nodes.find(node => node.id === extractorNodeId) as WorkflowNode | undefined + if (!extractorNode?.data?.prompt_template) + return + + const currentPromptText = getUserPromptText(extractorNode.data.prompt_template) + const shouldUpdate = !hasUserPromptTemplate(extractorNode.data.prompt_template) + || currentPromptText !== promptText + if (!shouldUpdate) + return + + const nextPromptTemplate = buildPromptTemplateWithText(extractorNode.data.prompt_template, promptText) + const nextNodes = nodes.map((node) => { + if (node.id !== extractorNodeId) + return node + return { + ...node, + data: { + ...node.data, + prompt_template: nextPromptTemplate, + }, + } + }) + setNodes(nextNodes) + handleSyncWorkflowDraft() + }, [detectAgentFromText, handleSyncWorkflowDraft, paramKey, reactFlowStore, toolNodeId]) + + const removeExtractorNode = useCallback(() => { + if (!toolNodeId || !paramKey) + return + + const extractorNodeId = `${toolNodeId}_ext_${paramKey}` + const { getNodes, setNodes } = reactFlowStore.getState() + const nodes = getNodes() + const hasExtractorNode = nodes.some(node => node.id === extractorNodeId) + if (!hasExtractorNode) + return + + setNodes(nodes.filter(node => node.id !== extractorNodeId)) + handleSyncWorkflowDraft() + }, [handleSyncWorkflowDraft, paramKey, reactFlowStore, toolNodeId]) + + const handleAgentRemove = useCallback(() => { + const agentNodeId = detectedAgentFromValue?.nodeId + if (!agentNodeId || !onChange) + return + + const valueWithoutAgentVars = value.replace(AGENT_CONTEXT_VAR_PATTERN, (match) => { + const nodeId = getAgentNodeIdFromContextVar(match) + return nodeId === agentNodeId ? '' : match + }) + + removeExtractorNode() + onChange(valueWithoutAgentVars, VarKindTypeEnum.mixed, null) + setControlPromptEditorRerenderKey(Date.now()) + }, [detectedAgentFromValue?.nodeId, onChange, removeExtractorNode, setControlPromptEditorRerenderKey, value]) + + const handleAgentSelect = useCallback((agent: AgentNode) => { + if (!onChange) + return + + // compute words after the latest '@' and delete them + const valueWithoutTrigger = value.replace(/@[^@\n]*$/, '') + const newValue = `{{@${agent.id}.context@}}${valueWithoutTrigger}` + + if (toolNodeId && paramKey) { + ensureExtractorNode({ + extractorNodeId: `${toolNodeId}_ext_${paramKey}`, + nodeType: BlockEnum.LLM, + data: { + structured_output_enabled: true, + structured_output: { + schema: { + type: Type.object, + properties: { + [paramKey]: { + type: Type.string, + }, + }, + required: [paramKey], + additionalProperties: false, + }, + }, + }, + }) + } + + const mentionConfigWithOutputSelector: MentionConfig = { + ...DEFAULT_MENTION_CONFIG, + extractor_node_id: toolNodeId && paramKey ? `${toolNodeId}_ext_${paramKey}` : '', + output_selector: paramKey ? ['structured_output', paramKey] : [], + } + onChange(newValue, VarKindTypeEnum.mention, mentionConfigWithOutputSelector) + syncExtractorPromptFromText(newValue) + setControlPromptEditorRerenderKey(Date.now()) + }, [ensureExtractorNode, onChange, paramKey, setControlPromptEditorRerenderKey, syncExtractorPromptFromText, toolNodeId, value]) + + const handleAssembleSelect = useCallback((): ValueSelector | null => { + if (!toolNodeId || !paramKey || !assemblePlaceholder) + return null + const extractorNodeId = assembleExtractorNodeId || `${toolNodeId}_ext_${paramKey}` + ensureAssembleExtractorNode() + onChange?.(assemblePlaceholder, VarKindTypeEnum.mixed, null) + setControlPromptEditorRerenderKey(Date.now()) + return [extractorNodeId, 'result'] + }, [assembleExtractorNodeId, assemblePlaceholder, ensureAssembleExtractorNode, onChange, paramKey, setControlPromptEditorRerenderKey, toolNodeId]) + + const handleAssembleRemove = useCallback(() => { + if (!onChange || !assemblePlaceholder) + return + + removeExtractorNode() + onChange('', VarKindTypeEnum.mixed, null) + setControlPromptEditorRerenderKey(Date.now()) + }, [assemblePlaceholder, onChange, removeExtractorNode, setControlPromptEditorRerenderKey]) + + const handleOpenSubGraphModal = useCallback(() => { + setIsSubGraphModalOpen(true) + }, []) + + const handleCloseSubGraphModal = useCallback(() => { + setIsSubGraphModalOpen(false) + }, []) + + const sourceVariable: ValueSelector | undefined = detectedAgentFromValue + ? [detectedAgentFromValue.nodeId, 'context'] + : undefined return ( - + {isAssembleValue && ( + )} - className="caret:text-text-accent" - editable={!readOnly} - value={value} - workflowVariableBlock={{ - show: !disableVariableInsertion, - variables: nodesOutputVars || [], - workflowNodesMap: availableNodes.reduce((acc, node) => { - acc[node.id] = { - title: node.data.title, - type: node.data.type, - } - if (node.data.type === BlockEnum.Start) { - acc.sys = { - title: t('blocks.start', { ns: 'workflow' }), - type: BlockEnum.Start, - } - } - return acc - }, {} as any), - showManageInputField, - onManageInputField, - }} - placeholder={} - onChange={onChange} - /> + {!isAssembleValue && detectedAgentFromValue && ( + + )} + {!isAssembleValue && ( +
+ 0 && !detectedAgentFromValue, + agentNodes, + onSelect: handleAgentSelect, + }} + placeholder={} + onChange={(text) => { + const hasPlaceholder = new RegExp(AGENT_CONTEXT_VAR_PATTERN.source).test(text) + if (hasPlaceholder) + syncExtractorPromptFromText(text) + if (detectedAgentFromValue && !hasPlaceholder) { + removeExtractorNode() + onChange?.(text, VarKindTypeEnum.mixed, null) + return + } + onChange?.(text) + }} + /> + {isOnlyAgentContext && paramKey && ( +
+ + {t('nodes.tool.agentPlaceholder', { ns: 'workflow', paramKey })} + +
+ )} +
+ )} + {toolNodeId && paramKey && isAssembleValue && ( + + )} + {toolNodeId && paramKey && !isAssembleValue && detectedAgentFromValue && sourceVariable && ( + + )} +
) } diff --git a/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/placeholder.tsx b/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/placeholder.tsx index 6e999975f1..02053861a3 100644 --- a/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/placeholder.tsx +++ b/web/app/components/workflow/nodes/tool/components/mixed-variable-text-input/placeholder.tsx @@ -7,9 +7,10 @@ import { CustomTextNode } from '@/app/components/base/prompt-editor/plugins/cust type PlaceholderProps = { disableVariableInsertion?: boolean + hasSelectedAgent?: boolean } -const Placeholder = ({ disableVariableInsertion = false }: PlaceholderProps) => { +const Placeholder = ({ disableVariableInsertion = false, hasSelectedAgent = false }: PlaceholderProps) => { const { t } = useTranslation() const [editor] = useLexicalComposerContext() @@ -44,6 +45,21 @@ const Placeholder = ({ disableVariableInsertion = false }: PlaceholderProps) => > {t('nodes.tool.insertPlaceholder2', { ns: 'workflow' })}
+ {!hasSelectedAgent && ( + <> +
@
+
{ + e.preventDefault() + e.stopPropagation() + handleInsert('@') + })} + > + {t('nodes.tool.insertPlaceholder3', { ns: 'workflow' })} +
+ + )} )}
diff --git a/web/app/components/workflow/nodes/tool/components/sub-graph-modal/index.tsx b/web/app/components/workflow/nodes/tool/components/sub-graph-modal/index.tsx new file mode 100644 index 0000000000..2fbe5b7fbe --- /dev/null +++ b/web/app/components/workflow/nodes/tool/components/sub-graph-modal/index.tsx @@ -0,0 +1,319 @@ +'use client' +import type { FC } from 'react' +import type { SubGraphModalProps } from './types' +import type { MentionConfig } from '@/app/components/workflow/nodes/_base/types' +import type { CodeNodeType } from '@/app/components/workflow/nodes/code/types' +import type { LLMNodeType } from '@/app/components/workflow/nodes/llm/types' +import type { ToolNodeType } from '@/app/components/workflow/nodes/tool/types' +import type { Node, PromptItem, PromptTemplateItem } from '@/app/components/workflow/types' +import { Dialog, DialogPanel, Transition, TransitionChild } from '@headlessui/react' +import { RiCloseLine } from '@remixicon/react' +import { noop } from 'es-toolkit/function' +import { Fragment, memo, useCallback, useEffect, useMemo } from 'react' +import { useTranslation } from 'react-i18next' +import { useStore as useReactFlowStore, useStoreApi } from 'reactflow' +import { AssembleVariablesAlt } from '@/app/components/base/icons/src/vender/line/general' +import { Agent } from '@/app/components/base/icons/src/vender/workflow' +import { useIsChatMode, useNodesSyncDraft, useWorkflow, useWorkflowVariables } from '@/app/components/workflow/hooks' +import { useHooksStore } from '@/app/components/workflow/hooks-store' +import { VarKindType } from '@/app/components/workflow/nodes/_base/types' +import { useStore as useWorkflowStore } from '@/app/components/workflow/store' +import { BlockEnum, EditionType, isPromptMessageContext, PromptRole, VarType } from '@/app/components/workflow/types' +import SubGraphCanvas from './sub-graph-canvas' + +const SubGraphModal: FC = (props) => { + const { t } = useTranslation() + const { isOpen, onClose, variant, toolNodeId, paramKey } = props + const isAgentVariant = variant === 'agent' + const resolvedAgentNodeId = isAgentVariant ? props.agentNodeId : '' + const agentName = isAgentVariant ? props.agentName : '' + const assembleTitle = !isAgentVariant ? props.title : '' + const modalTitle = useMemo(() => { + const baseTitle = isAgentVariant + ? agentName + : (assembleTitle || t('nodes.tool.assembleVariables', { ns: 'workflow' })) + const prefix = isAgentVariant && baseTitle ? '@' : '' + return `${prefix}${baseTitle} ${t('subGraphModal.title', { ns: 'workflow' })}`.trim() + }, [agentName, assembleTitle, isAgentVariant, t]) + const reactflowStore = useStoreApi() + const workflowNodes = useWorkflowStore(state => state.nodes) + const workflowEdges = useReactFlowStore(state => state.edges) + const setControlPromptEditorRerenderKey = useWorkflowStore(state => state.setControlPromptEditorRerenderKey) + const { handleSyncWorkflowDraft, doSyncWorkflowDraft } = useNodesSyncDraft() + const configsMap = useHooksStore(state => state.configsMap) + const { getBeforeNodesInSameBranch } = useWorkflow() + const { getNodeAvailableVars } = useWorkflowVariables() + const isChatMode = useIsChatMode() + + const extractorNodeId = `${toolNodeId}_ext_${paramKey}` + const extractorNode = useMemo(() => { + return workflowNodes.find(node => node.id === extractorNodeId) as Node | undefined + }, [extractorNodeId, workflowNodes]) + const toolNode = useMemo(() => { + return workflowNodes.find(node => node.id === toolNodeId) + }, [toolNodeId, workflowNodes]) + const toolParam = (toolNode?.data as ToolNodeType | undefined)?.tool_parameters?.[paramKey] + const toolParamValue = toolParam?.value as string | undefined + const assemblePlaceholder = useMemo(() => { + return `{{#${toolNodeId}_ext_${paramKey}.result#}}` + }, [paramKey, toolNodeId]) + + const parentBeforeNodes = useMemo(() => { + if (!isOpen) + return [] + return getBeforeNodesInSameBranch(toolNodeId, workflowNodes, workflowEdges) + }, [getBeforeNodesInSameBranch, isOpen, toolNodeId, workflowEdges, workflowNodes]) + + const parentContextNodes = useMemo(() => { + if (!parentBeforeNodes.length || !isAgentVariant) + return [] + return parentBeforeNodes.filter(node => node.data.type === BlockEnum.Agent || node.data.type === BlockEnum.LLM) + }, [isAgentVariant, parentBeforeNodes]) + + const parentAvailableNodes = useMemo(() => { + if (!isOpen) + return [] + return isAgentVariant ? parentContextNodes : parentBeforeNodes + }, [isAgentVariant, isOpen, parentBeforeNodes, parentContextNodes]) + + const parentAvailableVars = useMemo(() => { + if (!parentAvailableNodes.length) + return [] + const vars = getNodeAvailableVars({ + beforeNodes: parentAvailableNodes, + isChatMode, + filterVar: () => true, + }) + const availableNodeIds = new Set(parentAvailableNodes.map(node => node.id)) + return vars.filter(nodeVar => availableNodeIds.has(nodeVar.nodeId)) + }, [getNodeAvailableVars, isChatMode, parentAvailableNodes]) + + const mentionConfig = useMemo(() => { + const current = toolParam?.mention_config + const rawSelector = Array.isArray(current?.output_selector) ? current!.output_selector : [] + const outputSelector = rawSelector[0] === extractorNodeId ? rawSelector.slice(1) : rawSelector + const defaultOutputSelector = ['structured_output', paramKey] + + return { + extractor_node_id: current?.extractor_node_id || extractorNodeId, + output_selector: outputSelector.length > 0 ? outputSelector : defaultOutputSelector, + null_strategy: current?.null_strategy || 'use_default', + default_value: current?.default_value ?? '', + } + }, [extractorNodeId, paramKey, toolParam?.mention_config]) + + const handleMentionConfigChange = useCallback((config: MentionConfig) => { + if (!isAgentVariant) + return + + const { getNodes, setNodes } = reactflowStore.getState() + const nextNodes = getNodes().map((node) => { + if (node.id !== toolNodeId) + return node + + const toolData = node.data as ToolNodeType + const currentParam = toolData.tool_parameters?.[paramKey] + if (!currentParam) + return node + + return { + ...node, + data: { + ...toolData, + tool_parameters: { + ...toolData.tool_parameters, + [paramKey]: { + ...currentParam, + type: currentParam.type || VarKindType.mention, + mention_config: config, + }, + }, + }, + } + }) + setNodes(nextNodes) + handleSyncWorkflowDraft() + }, [handleSyncWorkflowDraft, isAgentVariant, paramKey, reactflowStore, toolNodeId]) + + useEffect(() => { + if (!isAgentVariant || !toolParam || (toolParam.type && toolParam.type !== VarKindType.mention)) + return + + const current = toolParam.mention_config + const needsExtractor = !current?.extractor_node_id + const needsNullStrategy = !current?.null_strategy + const needsOutputSelector = !Array.isArray(current?.output_selector) + const needsDefaultValue = current?.default_value === undefined + + if (needsExtractor || needsNullStrategy || needsOutputSelector || needsDefaultValue) + handleMentionConfigChange(mentionConfig) + }, [handleMentionConfigChange, isAgentVariant, mentionConfig, toolParam]) + + const getUserPromptText = useCallback((promptTemplate?: PromptTemplateItem[] | PromptItem) => { + if (!promptTemplate) + return '' + const resolveText = (item?: PromptItem) => { + if (!item) + return '' + if (item.edition_type === EditionType.jinja2) + return item.jinja2_text || item.text || '' + return item.text || '' + } + if (Array.isArray(promptTemplate)) { + for (const item of promptTemplate) { + if (!isPromptMessageContext(item) && item.role === PromptRole.user) + return resolveText(item) + } + return '' + } + return resolveText(promptTemplate) + }, []) + + // TODO: handle external workflow updates while sub-graph modal is open. + const handleSave = useCallback((subGraphNodes: Node[]) => { + const extractorNodeData = subGraphNodes.find(node => node.id === extractorNodeId) as Node | undefined + if (!extractorNodeData) + return + + const ensureAssembleOutputs = (payload: CodeNodeType) => { + const outputs = payload.outputs || {} + if (outputs.result) + return payload + return { + ...payload, + outputs: { + ...outputs, + result: { + type: VarType.string, + children: null, + }, + }, + } + } + + const userPromptText = isAgentVariant + ? getUserPromptText((extractorNodeData.data as LLMNodeType).prompt_template) + : '' + const placeholder = isAgentVariant && resolvedAgentNodeId ? `{{@${resolvedAgentNodeId}.context@}}` : '' + const nextValue = isAgentVariant + ? `${placeholder}${userPromptText}` + : assemblePlaceholder + + const { getNodes, setNodes } = reactflowStore.getState() + const nextNodes = getNodes().map((node) => { + if (node.id === extractorNodeId) { + const nextData = isAgentVariant + ? extractorNodeData.data + : ensureAssembleOutputs(extractorNodeData.data as CodeNodeType) + return { + ...node, + hidden: true, + data: { + ...node.data, + ...nextData, + parent_node_id: toolNodeId, + }, + } + } + if (node.id === toolNodeId) { + const toolData = node.data as ToolNodeType + if (!toolData.tool_parameters?.[paramKey]) + return node + + return { + ...node, + data: { + ...toolData, + tool_parameters: { + ...toolData.tool_parameters, + [paramKey]: { + ...toolData.tool_parameters[paramKey], + value: nextValue, + }, + }, + }, + } + } + return node + }) + setNodes(nextNodes) + setControlPromptEditorRerenderKey(Date.now()) + }, [assemblePlaceholder, extractorNodeId, getUserPromptText, isAgentVariant, paramKey, reactflowStore, resolvedAgentNodeId, setControlPromptEditorRerenderKey, toolNodeId]) + + return ( + + + +
+ +
+
+ + +
+
+
+ {isAgentVariant + ? + : } +
+ + {modalTitle} + +
+ +
+ +
+ {variant === 'agent' + ? ( + | undefined} + toolParamValue={toolParamValue} + parentAvailableNodes={parentAvailableNodes} + parentAvailableVars={parentAvailableVars} + onSave={handleSave} + onSyncWorkflowDraft={doSyncWorkflowDraft} + /> + ) + : ( + | undefined} + toolParamValue={toolParamValue} + parentAvailableNodes={parentAvailableNodes} + parentAvailableVars={parentAvailableVars} + onSave={handleSave} + onSyncWorkflowDraft={doSyncWorkflowDraft} + /> + )} +
+
+
+
+
+
+
+ ) +} + +export default memo(SubGraphModal) diff --git a/web/app/components/workflow/nodes/tool/components/sub-graph-modal/sub-graph-canvas.tsx b/web/app/components/workflow/nodes/tool/components/sub-graph-modal/sub-graph-canvas.tsx new file mode 100644 index 0000000000..a9e9e2565d --- /dev/null +++ b/web/app/components/workflow/nodes/tool/components/sub-graph-modal/sub-graph-canvas.tsx @@ -0,0 +1,15 @@ +'use client' +import type { FC } from 'react' +import type { SubGraphCanvasProps } from './types' +import { memo } from 'react' +import SubGraph from '@/app/components/sub-graph' + +const SubGraphCanvas: FC = (props) => { + return ( +
+ +
+ ) +} + +export default memo(SubGraphCanvas) diff --git a/web/app/components/workflow/nodes/tool/components/sub-graph-modal/types.ts b/web/app/components/workflow/nodes/tool/components/sub-graph-modal/types.ts new file mode 100644 index 0000000000..8a29b402d1 --- /dev/null +++ b/web/app/components/workflow/nodes/tool/components/sub-graph-modal/types.ts @@ -0,0 +1,25 @@ +import type { SubGraphProps } from '@/app/components/sub-graph/types' +import type { ValueSelector } from '@/app/components/workflow/types' + +type BaseSubGraphModalProps = { + isOpen: boolean + onClose: () => void + toolNodeId: string + paramKey: string +} + +type AgentSubGraphModalProps = BaseSubGraphModalProps & { + variant: 'agent' + sourceVariable: ValueSelector + agentName: string + agentNodeId: string +} + +type AssembleSubGraphModalProps = BaseSubGraphModalProps & { + variant: 'assemble' + title: string +} + +export type SubGraphModalProps = AgentSubGraphModalProps | AssembleSubGraphModalProps + +export type SubGraphCanvasProps = SubGraphProps diff --git a/web/app/components/workflow/nodes/tool/node.tsx b/web/app/components/workflow/nodes/tool/node.tsx index 0cf4f0ff58..0b20f30333 100644 --- a/web/app/components/workflow/nodes/tool/node.tsx +++ b/web/app/components/workflow/nodes/tool/node.tsx @@ -1,17 +1,49 @@ import type { FC } from 'react' import type { ToolNodeType } from './types' -import type { NodeProps } from '@/app/components/workflow/types' +import type { StrategyDetail, StrategyPluginDetail } from '@/app/components/plugins/types' +import type { AgentNodeType } from '@/app/components/workflow/nodes/agent/types' +import type { CommonNodeType, NodeProps, Node as WorkflowNode } from '@/app/components/workflow/types' import * as React from 'react' -import { useEffect } from 'react' +import { useEffect, useMemo } from 'react' +import { useTranslation } from 'react-i18next' +import { useNodes } from 'reactflow' +import AlertTriangle from '@/app/components/base/icons/src/vender/solid/alertsAndFeedback/AlertTriangle' import { FormTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' +import BlockIcon from '@/app/components/workflow/block-icon' +import { useNodesMetaData } from '@/app/components/workflow/hooks' import { useNodeDataUpdate } from '@/app/components/workflow/hooks/use-node-data-update' import { useNodePluginInstallation } from '@/app/components/workflow/hooks/use-node-plugin-installation' import { InstallPluginButton } from '@/app/components/workflow/nodes/_base/components/install-plugin-button' +import { BlockEnum } from '@/app/components/workflow/types' +import { useGetLanguage } from '@/context/i18n' +import { useStrategyProviders } from '@/service/use-strategy' +import { cn } from '@/utils/classnames' +import { VarType } from './types' + +const AGENT_CONTEXT_VAR_PATTERN = /\{\{@[^.@#]+\.context@\}\}/g +const AGENT_CONTEXT_VAR_PREFIX = '{{@' +const AGENT_CONTEXT_VAR_SUFFIX = '.context@}}' +const getAgentNodeIdFromContextVar = (placeholder: string) => { + if (!placeholder.startsWith(AGENT_CONTEXT_VAR_PREFIX) || !placeholder.endsWith(AGENT_CONTEXT_VAR_SUFFIX)) + return '' + return placeholder.slice(AGENT_CONTEXT_VAR_PREFIX.length, -AGENT_CONTEXT_VAR_SUFFIX.length) +} +type AgentCheckValidContext = { + provider?: StrategyPluginDetail + strategy?: StrategyDetail + language: string + isReadyForCheckValid: boolean +} const Node: FC> = ({ id, data, }) => { + const { t } = useTranslation() + const language = useGetLanguage() + const { nodesMap: nodesMetaDataMap } = useNodesMetaData() + const { data: strategyProviders } = useStrategyProviders() + const nodes = useNodes() const { tool_configurations, paramSchemas } = data const toolConfigs = Object.keys(tool_configurations || {}) const { @@ -38,9 +70,90 @@ const Node: FC> = ({ }) }, [data._pluginInstallLocked, data._dimmed, handleNodeDataUpdate, id, shouldDim, shouldLock]) - const hasConfigs = toolConfigs.length > 0 + const nodesById = useMemo(() => { + return nodes.reduce((acc, node) => { + acc[node.id] = node + return acc + }, {} as Record) + }, [nodes]) - if (!showInstallButton && !hasConfigs) + const mentionEntries = useMemo(() => { + const entries: Array<{ agentNodeId: string, extractorNodeId?: string, paramKey: string }> = [] + const seen = new Set() + const toolParams = data.tool_parameters || {} + Object.entries(toolParams).forEach(([paramKey, param]) => { + const value = param?.value + if (typeof value !== 'string') + return + const matches = value.matchAll(AGENT_CONTEXT_VAR_PATTERN) + for (const match of matches) { + const agentNodeId = getAgentNodeIdFromContextVar(match[0]) + if (!agentNodeId) + continue + const entryKey = `${paramKey}:${agentNodeId}` + if (seen.has(entryKey)) + continue + seen.add(entryKey) + entries.push({ + agentNodeId, + paramKey, + extractorNodeId: param?.mention_config?.extractor_node_id + || (param?.type === VarType.mention ? `${id}_ext_${paramKey}` : undefined), + }) + } + }) + return entries + }, [data.tool_parameters, id]) + + const referenceItems = useMemo(() => { + if (!mentionEntries.length) + return [] + + const getNodeWarning = (node?: WorkflowNode) => { + if (!node) + return true + const validator = nodesMetaDataMap?.[node.data.type as BlockEnum]?.checkValid + if (!validator) + return false + let moreDataForCheckValid: AgentCheckValidContext | undefined + if (node.data.type === BlockEnum.Agent) { + const agentData = node.data as AgentNodeType + const isReadyForCheckValid = !!strategyProviders + const provider = strategyProviders?.find(provider => provider.declaration.identity.name === agentData.agent_strategy_provider_name) + const strategy = provider?.declaration.strategies?.find(s => s.identity.name === agentData.agent_strategy_name) + moreDataForCheckValid = { + provider, + strategy, + language, + isReadyForCheckValid, + } + } + const { errorMessage } = validator(node.data, t, moreDataForCheckValid) + return Boolean(errorMessage) + } + + return mentionEntries.map(({ agentNodeId, extractorNodeId, paramKey }) => { + const agentNode = nodesById[agentNodeId] + const agentLabel = `@${agentNode?.data.title || agentNodeId}` + const agentWarning = getNodeWarning(agentNode) + + const extractorWarning = extractorNodeId + ? getNodeWarning(nodesById[extractorNodeId]) + : false + const hasWarning = agentWarning || extractorWarning + return { + key: `${paramKey}-${agentNodeId}-${extractorNodeId || 'no-extractor'}`, + label: agentLabel, + type: BlockEnum.Agent, + hasWarning, + } + }) + }, [mentionEntries, nodesById, nodesMetaDataMap, strategyProviders, language, t]) + + const hasConfigs = toolConfigs.length > 0 + const hasReferences = referenceItems.length > 0 + + if (!showInstallButton && !hasConfigs && !hasReferences) return null return ( @@ -86,6 +199,35 @@ const Node: FC> = ({ ))}
)} + {hasReferences && ( +
+ {referenceItems.map(item => ( +
+
+ + + {item.label} + +
+ {item.hasWarning && ( + + )} +
+ ))} +
+ )}
) } diff --git a/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts b/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts index ca1e61be37..f4a042d077 100644 --- a/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts +++ b/web/app/components/workflow/nodes/tool/use-single-run-form-params.ts @@ -32,7 +32,7 @@ const useSingleRunFormParams = ({ const { inputs } = useNodeCrud(id, payload) const hadVarParams = Object.keys(inputs.tool_parameters) - .filter(key => inputs.tool_parameters[key].type !== VarType.constant) + .filter(key => ![VarType.constant, VarType.mention].includes(inputs.tool_parameters[key].type)) .map(k => inputs.tool_parameters[k]) const hadVarSettings = Object.keys(inputs.tool_configurations) diff --git a/web/app/components/workflow/panel/index.tsx b/web/app/components/workflow/panel/index.tsx index 88ada8b11e..faaf764ee2 100644 --- a/web/app/components/workflow/panel/index.tsx +++ b/web/app/components/workflow/panel/index.tsx @@ -19,6 +19,7 @@ export type PanelProps = { right?: React.ReactNode } versionHistoryPanelProps?: VersionHistoryPanelProps + withHeader?: boolean } /** @@ -69,6 +70,7 @@ const useResizeObserver = ( const Panel: FC = ({ components, versionHistoryPanelProps, + withHeader = true, }) => { const selectedNode = useReactflow(useShallow((s) => { const nodes = s.getNodes() @@ -127,7 +129,10 @@ const Panel: FC = ({
{components?.left} diff --git a/web/app/components/workflow/selection-contextmenu.tsx b/web/app/components/workflow/selection-contextmenu.tsx index d789d98fe0..cb3faa68d7 100644 --- a/web/app/components/workflow/selection-contextmenu.tsx +++ b/web/app/components/workflow/selection-contextmenu.tsx @@ -1,3 +1,5 @@ +import type { FC, ReactElement } from 'react' +import type { I18nKeysByPrefix } from '@/types/i18n' import { RiAlignBottom, RiAlignCenter, @@ -17,9 +19,13 @@ import { } from 'react' import { useTranslation } from 'react-i18next' import { useStore as useReactFlowStore, useStoreApi } from 'reactflow' -import { useNodesReadOnly, useNodesSyncDraft } from './hooks' +import { shallow } from 'zustand/shallow' +import Tooltip from '@/app/components/base/tooltip' +import { useNodesInteractions, useNodesReadOnly, useNodesSyncDraft } from './hooks' +import { useMakeGroupAvailability } from './hooks/use-make-group' import { useSelectionInteractions } from './hooks/use-selection-interactions' import { useWorkflowHistory, WorkflowHistoryEvent } from './hooks/use-workflow-history' +import ShortcutsName from './shortcuts-name' import { useStore, useWorkflowStore } from './store' enum AlignType { @@ -33,21 +39,67 @@ enum AlignType { DistributeVertical = 'distributeVertical', } +type AlignButtonConfig = { + type: AlignType + icon: ReactElement + labelKey: I18nKeysByPrefix<'workflow', 'operator.'> +} + +type AlignButtonProps = { + config: AlignButtonConfig + label: string + onClick: (type: AlignType) => void + position?: 'top' | 'bottom' | 'left' | 'right' +} + +const AlignButton: FC = ({ config, label, onClick, position = 'bottom' }) => { + return ( + +
onClick(config.type)} + > + {config.icon} +
+
+ ) +} + +const ALIGN_BUTTONS: AlignButtonConfig[] = [ + { type: AlignType.Left, icon: , labelKey: 'alignLeft' }, + { type: AlignType.Center, icon: , labelKey: 'alignCenter' }, + { type: AlignType.Right, icon: , labelKey: 'alignRight' }, + { type: AlignType.DistributeHorizontal, icon: , labelKey: 'distributeHorizontal' }, + { type: AlignType.Top, icon: , labelKey: 'alignTop' }, + { type: AlignType.Middle, icon: , labelKey: 'alignMiddle' }, + { type: AlignType.Bottom, icon: , labelKey: 'alignBottom' }, + { type: AlignType.DistributeVertical, icon: , labelKey: 'distributeVertical' }, +] + const SelectionContextmenu = () => { const { t } = useTranslation() const ref = useRef(null) - const { getNodesReadOnly } = useNodesReadOnly() + const { getNodesReadOnly, nodesReadOnly } = useNodesReadOnly() const { handleSelectionContextmenuCancel } = useSelectionInteractions() + const { + handleNodesCopy, + handleNodesDuplicate, + handleNodesDelete, + handleMakeGroup, + } = useNodesInteractions() const selectionMenu = useStore(s => s.selectionMenu) // Access React Flow methods const store = useStoreApi() const workflowStore = useWorkflowStore() - // Get selected nodes for alignment logic - const selectedNodes = useReactFlowStore(state => - state.getNodes().filter(node => node.selected), - ) + const selectedNodeIds = useReactFlowStore((state) => { + const ids = state.getNodes().filter(node => node.selected).map(node => node.id) + ids.sort() + return ids + }, shallow) + + const { canMakeGroup } = useMakeGroupAvailability(selectedNodeIds) const { handleSyncWorkflowDraft } = useNodesSyncDraft() const { saveStateToHistory } = useWorkflowHistory() @@ -65,9 +117,9 @@ const SelectionContextmenu = () => { if (container) { const { width: containerWidth, height: containerHeight } = container.getBoundingClientRect() - const menuWidth = 240 + const menuWidth = 244 - const estimatedMenuHeight = 380 + const estimatedMenuHeight = 203 if (left + menuWidth > containerWidth) left = left - menuWidth @@ -87,9 +139,9 @@ const SelectionContextmenu = () => { }, ref) useEffect(() => { - if (selectionMenu && selectedNodes.length <= 1) + if (selectionMenu && selectedNodeIds.length <= 1) handleSelectionContextmenuCancel() - }, [selectionMenu, selectedNodes.length, handleSelectionContextmenuCancel]) + }, [selectionMenu, selectedNodeIds.length, handleSelectionContextmenuCancel]) // Handle align nodes logic const handleAlignNode = useCallback((currentNode: any, nodeToAlign: any, alignType: AlignType, minX: number, maxX: number, minY: number, maxY: number) => { @@ -248,7 +300,7 @@ const SelectionContextmenu = () => { }, []) const handleAlignNodes = useCallback((alignType: AlignType) => { - if (getNodesReadOnly() || selectedNodes.length <= 1) { + if (getNodesReadOnly() || selectedNodeIds.length <= 1) { handleSelectionContextmenuCancel() return } @@ -259,9 +311,6 @@ const SelectionContextmenu = () => { // Get all current nodes const nodes = store.getState().getNodes() - // Get all selected nodes - const selectedNodeIds = selectedNodes.map(node => node.id) - // Find container nodes and their children // Container nodes (like Iteration and Loop) have child nodes that should not be aligned independently // when the container is selected. This prevents child nodes from being moved outside their containers. @@ -367,7 +416,7 @@ const SelectionContextmenu = () => { catch (err) { console.error('Failed to update nodes:', err) } - }, [store, workflowStore, selectedNodes, getNodesReadOnly, handleSyncWorkflowDraft, saveStateToHistory, handleSelectionContextmenuCancel, handleAlignNode, handleDistributeNodes]) + }, [getNodesReadOnly, handleAlignNode, handleDistributeNodes, handleSelectionContextmenuCancel, handleSyncWorkflowDraft, saveStateToHistory, selectedNodeIds, store, workflowStore]) if (!selectionMenu) return null @@ -381,73 +430,75 @@ const SelectionContextmenu = () => { }} ref={ref} > -
-
-
- {t('operator.vertical', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.Top)} - > - - {t('operator.alignTop', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.Middle)} - > - - {t('operator.alignMiddle', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.Bottom)} - > - - {t('operator.alignBottom', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.DistributeVertical)} - > - - {t('operator.distributeVertical', { ns: 'workflow' })} -
-
-
-
-
- {t('operator.horizontal', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.Left)} - > - - {t('operator.alignLeft', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.Center)} - > - - {t('operator.alignCenter', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.Right)} - > - - {t('operator.alignRight', { ns: 'workflow' })} -
-
handleAlignNodes(AlignType.DistributeHorizontal)} - > - - {t('operator.distributeHorizontal', { ns: 'workflow' })} -
+
+ {!nodesReadOnly && ( + <> +
+
{ + if (!canMakeGroup) + return + handleMakeGroup() + handleSelectionContextmenuCancel() + }} + > + {t('operator.makeGroup', { ns: 'workflow' })} + +
+
+
+
+
{ + handleNodesCopy() + handleSelectionContextmenuCancel() + }} + > + {t('common.copy', { ns: 'workflow' })} + +
+
{ + handleNodesDuplicate() + handleSelectionContextmenuCancel() + }} + > + {t('common.duplicate', { ns: 'workflow' })} + +
+
+
+
+
{ + handleNodesDelete() + handleSelectionContextmenuCancel() + }} + > + {t('operation.delete', { ns: 'common' })} + +
+
+
+ + )} +
+ {ALIGN_BUTTONS.map(config => ( + + ))}
diff --git a/web/app/components/workflow/store/workflow/index.ts b/web/app/components/workflow/store/workflow/index.ts index 7d2eab84ff..b2ff8b13b1 100644 --- a/web/app/components/workflow/store/workflow/index.ts +++ b/web/app/components/workflow/store/workflow/index.ts @@ -16,6 +16,7 @@ import type { VersionSliceShape } from './version-slice' import type { WorkflowDraftSliceShape } from './workflow-draft-slice' import type { WorkflowSliceShape } from './workflow-slice' import type { RagPipelineSliceShape } from '@/app/components/rag-pipeline/store' +import type { SubGraphSliceShape } from '@/app/components/sub-graph/types' import type { WorkflowSliceShape as WorkflowAppSliceShape } from '@/app/components/workflow-app/store/workflow/workflow-slice' import { useContext } from 'react' import { @@ -41,6 +42,7 @@ import { createWorkflowSlice } from './workflow-slice' export type SliceFromInjection = Partial & Partial + & Partial export type Shape = ChatVariableSliceShape diff --git a/web/app/components/workflow/types.ts b/web/app/components/workflow/types.ts index d807334d86..fb757753ab 100644 --- a/web/app/components/workflow/types.ts +++ b/web/app/components/workflow/types.ts @@ -30,6 +30,7 @@ export enum BlockEnum { Code = 'code', TemplateTransform = 'template-transform', HttpRequest = 'http-request', + Group = 'group', VariableAssigner = 'variable-assigner', VariableAggregator = 'variable-aggregator', Tool = 'tool', @@ -77,9 +78,11 @@ export type CommonNodeType = { _isCandidate?: boolean _isBundled?: boolean _children?: { nodeId: string, nodeType: BlockEnum }[] + parent_node_id?: string _isEntering?: boolean _showAddVariablePopup?: boolean _holdAddVariablePopup?: boolean + _hiddenInGroupId?: string _iterationLength?: number _iterationIndex?: number _waitingRun?: boolean @@ -114,6 +117,7 @@ export type CommonEdgeType = { _connectedNodeIsHovering?: boolean _connectedNodeIsSelected?: boolean _isBundled?: boolean + _hiddenInGroupId?: string _sourceRunningStatus?: NodeRunningStatus _targetRunningStatus?: NodeRunningStatus _waitingRun?: boolean @@ -253,6 +257,17 @@ export type PromptItem = { jinja2_text?: string } +export type PromptMessageContext = { + id?: string + $context: ValueSelector +} + +export type PromptTemplateItem = PromptItem | PromptMessageContext + +export const isPromptMessageContext = (item: PromptTemplateItem): item is PromptMessageContext => { + return '$context' in item +} + export enum MemoryRole { user = 'user', assistant = 'assistant', diff --git a/web/app/components/workflow/utils/elk-layout.ts b/web/app/components/workflow/utils/elk-layout.ts index c3b37c8f16..c0cf8543df 100644 --- a/web/app/components/workflow/utils/elk-layout.ts +++ b/web/app/components/workflow/utils/elk-layout.ts @@ -13,6 +13,7 @@ import { } from '@/app/components/workflow/constants' import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants' import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants' +import { CUSTOM_SUB_GRAPH_START_NODE } from '@/app/components/workflow/nodes/sub-graph-start/constants' import { BlockEnum, } from '@/app/components/workflow/types' @@ -442,6 +443,7 @@ const normaliseChildLayout = ( const startNode = nodes.find(node => node.type === CUSTOM_ITERATION_START_NODE || node.type === CUSTOM_LOOP_START_NODE + || node.type === CUSTOM_SUB_GRAPH_START_NODE || node.data?.type === BlockEnum.LoopStart || node.data?.type === BlockEnum.IterationStart, ) diff --git a/web/app/components/workflow/utils/node.ts b/web/app/components/workflow/utils/node.ts index b26e350b2a..72f559835c 100644 --- a/web/app/components/workflow/utils/node.ts +++ b/web/app/components/workflow/utils/node.ts @@ -1,6 +1,8 @@ +import type { CodeNodeType, OutputVar } from '../nodes/code/types' import type { IterationNodeType } from '../nodes/iteration/types' import type { LoopNodeType } from '../nodes/loop/types' import type { + CommonNodeType, Node, } from '../types' import { @@ -20,7 +22,61 @@ import { BlockEnum, } from '../types' -export function generateNewNode({ data, position, id, zIndex, type, ...rest }: Omit & { id?: string }): { +type MergeNodeDefaultDataParams>> = { + nodeType: BlockEnum + metaDefault?: Partial + appDefault?: Partial + baseData?: Partial + overrideData?: Partial +} + +const pickNonEmptyArray = (value?: T[]) => { + return Array.isArray(value) && value.length > 0 ? value : undefined +} + +export const mergeNodeDefaultData = >>({ + nodeType, + metaDefault, + appDefault, + baseData, + overrideData, +}: MergeNodeDefaultDataParams) => { + const merged = { + ...(metaDefault || {}), + ...(appDefault || {}), + ...(baseData || {}), + ...(overrideData || {}), + } as Partial + + if (nodeType === BlockEnum.Code) { + const codeMetaDefault = (metaDefault || {}) as Partial + const codeAppDefault = (appDefault || {}) as Partial + const codeBase = (baseData || {}) as Partial + const codeOverride = (overrideData || {}) as Partial + const codeDefaults = { + ...codeMetaDefault, + ...codeAppDefault, + } + + const outputs: OutputVar = { + ...(codeDefaults.outputs || {}), + ...(codeBase.outputs || {}), + ...(codeOverride.outputs || {}), + } + if (Object.keys(outputs).length > 0) + (merged as Partial).outputs = outputs + + const resolvedVariables = pickNonEmptyArray(codeBase.variables) + ?? pickNonEmptyArray(codeOverride.variables) + ?? pickNonEmptyArray(codeDefaults.variables) + if (resolvedVariables) + (merged as Partial).variables = resolvedVariables + } + + return merged +} + +export function generateNewNode({ data, position, id, zIndex, type, ...rest }: Omit, 'id'> & { id?: string }): { newNode: Node newIterationStartNode?: Node newLoopStartNode?: Node diff --git a/web/app/components/workflow/utils/workflow-init.ts b/web/app/components/workflow/utils/workflow-init.ts index 77a2ccefac..9eab630cfe 100644 --- a/web/app/components/workflow/utils/workflow-init.ts +++ b/web/app/components/workflow/utils/workflow-init.ts @@ -1,21 +1,15 @@ +import type { CustomGroupNodeData } from '../custom-group-node' +import type { GroupNodeData } from '../nodes/group/types' import type { IfElseNodeType } from '../nodes/if-else/types' import type { IterationNodeType } from '../nodes/iteration/types' import type { LoopNodeType } from '../nodes/loop/types' import type { QuestionClassifierNodeType } from '../nodes/question-classifier/types' import type { ToolNodeType } from '../nodes/tool/types' -import type { - Edge, - Node, -} from '../types' +import type { Edge, Node } from '../types' import { cloneDeep } from 'es-toolkit/object' -import { - getConnectedEdges, -} from 'reactflow' +import { getConnectedEdges } from 'reactflow' +import { getIterationStartNode, getLoopStartNode } from '@/app/components/workflow/utils/node' import { correctModelProvider } from '@/utils' -import { - getIterationStartNode, - getLoopStartNode, -} from '.' import { CUSTOM_NODE, DEFAULT_RETRY_INTERVAL, @@ -25,18 +19,22 @@ import { NODE_WIDTH_X_OFFSET, START_INITIAL_POSITION, } from '../constants' +import { CUSTOM_GROUP_NODE, GROUP_CHILDREN_Z_INDEX } from '../custom-group-node' import { branchNameCorrect } from '../nodes/if-else/utils' import { CUSTOM_ITERATION_START_NODE } from '../nodes/iteration-start/constants' import { CUSTOM_LOOP_START_NODE } from '../nodes/loop-start/constants' -import { - BlockEnum, - ErrorHandleMode, -} from '../types' +import { BlockEnum, ErrorHandleMode } from '../types' const WHITE = 'WHITE' const GRAY = 'GRAY' const BLACK = 'BLACK' -const isCyclicUtil = (nodeId: string, color: Record, adjList: Record, stack: string[]) => { + +const isCyclicUtil = ( + nodeId: string, + color: Record, + adjList: Record, + stack: string[], +) => { color[nodeId] = GRAY stack.push(nodeId) @@ -47,8 +45,12 @@ const isCyclicUtil = (nodeId: string, color: Record, adjList: Re stack.push(childId) return true } - if (color[childId] === WHITE && isCyclicUtil(childId, color, adjList, stack)) + if ( + color[childId] === WHITE + && isCyclicUtil(childId, color, adjList, stack) + ) { return true + } } color[nodeId] = BLACK if (stack.length > 0 && stack[stack.length - 1] === nodeId) @@ -66,8 +68,7 @@ const getCycleEdges = (nodes: Node[], edges: Edge[]) => { adjList[node.id] = [] } - for (const edge of edges) - adjList[edge.source]?.push(edge.target) + for (const edge of edges) adjList[edge.source]?.push(edge.target) for (let i = 0; i < nodes.length; i++) { if (color[nodes[i].id] === WHITE) @@ -87,20 +88,34 @@ const getCycleEdges = (nodes: Node[], edges: Edge[]) => { } export const preprocessNodesAndEdges = (nodes: Node[], edges: Edge[]) => { - const hasIterationNode = nodes.some(node => node.data.type === BlockEnum.Iteration) + const hasIterationNode = nodes.some( + node => node.data.type === BlockEnum.Iteration, + ) const hasLoopNode = nodes.some(node => node.data.type === BlockEnum.Loop) + const hasGroupNode = nodes.some(node => node.type === CUSTOM_GROUP_NODE) + const hasBusinessGroupNode = nodes.some( + node => node.data.type === BlockEnum.Group, + ) - if (!hasIterationNode && !hasLoopNode) { + if ( + !hasIterationNode + && !hasLoopNode + && !hasGroupNode + && !hasBusinessGroupNode + ) { return { nodes, edges, } } - const nodesMap = nodes.reduce((prev, next) => { - prev[next.id] = next - return prev - }, {} as Record) + const nodesMap = nodes.reduce( + (prev, next) => { + prev[next.id] = next + return prev + }, + {} as Record, + ) const iterationNodesWithStartNode = [] const iterationNodesWithoutStartNode = [] @@ -112,8 +127,12 @@ export const preprocessNodesAndEdges = (nodes: Node[], edges: Edge[]) => { if (currentNode.data.type === BlockEnum.Iteration) { if (currentNode.data.start_node_id) { - if (nodesMap[currentNode.data.start_node_id]?.type !== CUSTOM_ITERATION_START_NODE) + if ( + nodesMap[currentNode.data.start_node_id]?.type + !== CUSTOM_ITERATION_START_NODE + ) { iterationNodesWithStartNode.push(currentNode) + } } else { iterationNodesWithoutStartNode.push(currentNode) @@ -122,8 +141,12 @@ export const preprocessNodesAndEdges = (nodes: Node[], edges: Edge[]) => { if (currentNode.data.type === BlockEnum.Loop) { if (currentNode.data.start_node_id) { - if (nodesMap[currentNode.data.start_node_id]?.type !== CUSTOM_LOOP_START_NODE) + if ( + nodesMap[currentNode.data.start_node_id]?.type + !== CUSTOM_LOOP_START_NODE + ) { loopNodesWithStartNode.push(currentNode) + } } else { loopNodesWithoutStartNode.push(currentNode) @@ -132,7 +155,10 @@ export const preprocessNodesAndEdges = (nodes: Node[], edges: Edge[]) => { } const newIterationStartNodesMap = {} as Record - const newIterationStartNodes = [...iterationNodesWithStartNode, ...iterationNodesWithoutStartNode].map((iterationNode, index) => { + const newIterationStartNodes = [ + ...iterationNodesWithStartNode, + ...iterationNodesWithoutStartNode, + ].map((iterationNode, index) => { const newNode = getIterationStartNode(iterationNode.id) newNode.id = newNode.id + index newIterationStartNodesMap[iterationNode.id] = newNode @@ -140,24 +166,34 @@ export const preprocessNodesAndEdges = (nodes: Node[], edges: Edge[]) => { }) const newLoopStartNodesMap = {} as Record - const newLoopStartNodes = [...loopNodesWithStartNode, ...loopNodesWithoutStartNode].map((loopNode, index) => { + const newLoopStartNodes = [ + ...loopNodesWithStartNode, + ...loopNodesWithoutStartNode, + ].map((loopNode, index) => { const newNode = getLoopStartNode(loopNode.id) newNode.id = newNode.id + index newLoopStartNodesMap[loopNode.id] = newNode return newNode }) - const newEdges = [...iterationNodesWithStartNode, ...loopNodesWithStartNode].map((nodeItem) => { + const newEdges = [ + ...iterationNodesWithStartNode, + ...loopNodesWithStartNode, + ].map((nodeItem) => { const isIteration = nodeItem.data.type === BlockEnum.Iteration - const newNode = (isIteration ? newIterationStartNodesMap : newLoopStartNodesMap)[nodeItem.id] + const newNode = ( + isIteration ? newIterationStartNodesMap : newLoopStartNodesMap + )[nodeItem.id] const startNode = nodesMap[nodeItem.data.start_node_id] const source = newNode.id const sourceHandle = 'source' const target = startNode.id const targetHandle = 'target' - const parentNode = nodes.find(node => node.id === startNode.parentId) || null - const isInIteration = !!parentNode && parentNode.data.type === BlockEnum.Iteration + const parentNode + = nodes.find(node => node.id === startNode.parentId) || null + const isInIteration + = !!parentNode && parentNode.data.type === BlockEnum.Iteration const isInLoop = !!parentNode && parentNode.data.type === BlockEnum.Loop return { @@ -180,21 +216,159 @@ export const preprocessNodesAndEdges = (nodes: Node[], edges: Edge[]) => { } }) nodes.forEach((node) => { - if (node.data.type === BlockEnum.Iteration && newIterationStartNodesMap[node.id]) - (node.data as IterationNodeType).start_node_id = newIterationStartNodesMap[node.id].id + if ( + node.data.type === BlockEnum.Iteration + && newIterationStartNodesMap[node.id] + ) { + (node.data as IterationNodeType).start_node_id + = newIterationStartNodesMap[node.id].id + } - if (node.data.type === BlockEnum.Loop && newLoopStartNodesMap[node.id]) - (node.data as LoopNodeType).start_node_id = newLoopStartNodesMap[node.id].id + if (node.data.type === BlockEnum.Loop && newLoopStartNodesMap[node.id]) { + (node.data as LoopNodeType).start_node_id + = newLoopStartNodesMap[node.id].id + } + }) + + // Derive Group internal edges (input → entries, leaves → exits) + const groupInternalEdges: Edge[] = [] + const groupNodes = nodes.filter(node => node.type === CUSTOM_GROUP_NODE) + + for (const groupNode of groupNodes) { + const groupData = groupNode.data as unknown as CustomGroupNodeData + const { group } = groupData + + if (!group) + continue + + const { inputNodeId, entryNodeIds, exitPorts } = group + + // Derive edges: input → each entry node + for (const entryId of entryNodeIds) { + const entryNode = nodesMap[entryId] + if (entryNode) { + groupInternalEdges.push({ + id: `group-internal-${inputNodeId}-source-${entryId}-target`, + type: 'custom', + source: inputNodeId, + sourceHandle: 'source', + target: entryId, + targetHandle: 'target', + data: { + sourceType: '' as any, // Group input has empty type + targetType: entryNode.data.type, + _isGroupInternal: true, + _groupId: groupNode.id, + }, + zIndex: GROUP_CHILDREN_Z_INDEX, + } as Edge) + } + } + + // Derive edges: each leaf node → exit port + for (const exitPort of exitPorts) { + const leafNode = nodesMap[exitPort.leafNodeId] + if (leafNode) { + groupInternalEdges.push({ + id: `group-internal-${exitPort.leafNodeId}-${exitPort.sourceHandle}-${exitPort.portNodeId}-target`, + type: 'custom', + source: exitPort.leafNodeId, + sourceHandle: exitPort.sourceHandle, + target: exitPort.portNodeId, + targetHandle: 'target', + data: { + sourceType: leafNode.data.type, + targetType: '' as string, // Exit port has empty type + _isGroupInternal: true, + _groupId: groupNode.id, + }, + zIndex: GROUP_CHILDREN_Z_INDEX, + } as Edge) + } + } + } + + // Rebuild isTemp edges for business Group nodes (BlockEnum.Group) + // These edges connect the group node to external nodes for visual display + const groupTempEdges: Edge[] = [] + const inboundEdgeIds = new Set() + + nodes.forEach((groupNode) => { + if (groupNode.data.type !== BlockEnum.Group) + return + + const groupData = groupNode.data as GroupNodeData + const { + members = [], + headNodeIds = [], + leafNodeIds = [], + handlers = [], + } = groupData + const memberSet = new Set(members.map(m => m.id)) + const headSet = new Set(headNodeIds) + const leafSet = new Set(leafNodeIds) + + edges.forEach((edge) => { + // Inbound edge: source outside group, target is a head node + // Use Set to dedupe since multiple head nodes may share same external source + if (!memberSet.has(edge.source) && headSet.has(edge.target)) { + const sourceHandle = edge.sourceHandle || 'source' + const edgeId = `${edge.source}-${sourceHandle}-${groupNode.id}-target` + if (!inboundEdgeIds.has(edgeId)) { + inboundEdgeIds.add(edgeId) + groupTempEdges.push({ + id: edgeId, + type: 'custom', + source: edge.source, + sourceHandle, + target: groupNode.id, + targetHandle: 'target', + data: { + sourceType: edge.data?.sourceType, + targetType: BlockEnum.Group, + _isTemp: true, + }, + } as Edge) + } + } + + // Outbound edge: source is a leaf node, target outside group + if (leafSet.has(edge.source) && !memberSet.has(edge.target)) { + const edgeSourceHandle = edge.sourceHandle || 'source' + const handler = handlers.find( + h => + h.nodeId === edge.source && h.sourceHandle === edgeSourceHandle, + ) + if (handler) { + groupTempEdges.push({ + id: `${groupNode.id}-${handler.id}-${edge.target}-${edge.targetHandle}`, + type: 'custom', + source: groupNode.id, + sourceHandle: handler.id, + target: edge.target!, + targetHandle: edge.targetHandle, + data: { + sourceType: BlockEnum.Group, + targetType: edge.data?.targetType, + _isTemp: true, + }, + } as Edge) + } + } + }) }) return { nodes: [...nodes, ...newIterationStartNodes, ...newLoopStartNodes], - edges: [...edges, ...newEdges], + edges: [...edges, ...newEdges, ...groupInternalEdges, ...groupTempEdges], } } export const initialNodes = (originNodes: Node[], originEdges: Edge[]) => { - const { nodes, edges } = preprocessNodesAndEdges(cloneDeep(originNodes), cloneDeep(originEdges)) + const { nodes, edges } = preprocessNodesAndEdges( + cloneDeep(originNodes), + cloneDeep(originEdges), + ) const firstNode = nodes[0] if (!firstNode?.position) { @@ -206,23 +380,35 @@ export const initialNodes = (originNodes: Node[], originEdges: Edge[]) => { }) } - const iterationOrLoopNodeMap = nodes.reduce((acc, node) => { - if (node.parentId) { - if (acc[node.parentId]) - acc[node.parentId].push({ nodeId: node.id, nodeType: node.data.type }) - else - acc[node.parentId] = [{ nodeId: node.id, nodeType: node.data.type }] - } - return acc - }, {} as Record) + const iterationOrLoopNodeMap = nodes.reduce( + (acc, node) => { + if (node.parentId) { + if (acc[node.parentId]) { + acc[node.parentId].push({ + nodeId: node.id, + nodeType: node.data.type, + }) + } + else { + acc[node.parentId] = [{ nodeId: node.id, nodeType: node.data.type }] + } + } + return acc + }, + {} as Record, + ) return nodes.map((node) => { if (!node.type) node.type = CUSTOM_NODE const connectedEdges = getConnectedEdges([node], edges) - node.data._connectedSourceHandleIds = connectedEdges.filter(edge => edge.source === node.id).map(edge => edge.sourceHandle || 'source') - node.data._connectedTargetHandleIds = connectedEdges.filter(edge => edge.target === node.id).map(edge => edge.targetHandle || 'target') + node.data._connectedSourceHandleIds = connectedEdges + .filter(edge => edge.source === node.id) + .map(edge => edge.sourceHandle || 'source') + node.data._connectedTargetHandleIds = connectedEdges + .filter(edge => edge.target === node.id) + .map(edge => edge.targetHandle || 'target') if (node.data.type === BlockEnum.IfElse) { const nodeData = node.data as IfElseNodeType @@ -237,49 +423,86 @@ export const initialNodes = (originNodes: Node[], originEdges: Edge[]) => { ] } node.data._targetBranches = branchNameCorrect([ - ...(node.data as IfElseNodeType).cases.map(item => ({ id: item.case_id, name: '' })), + ...(node.data as IfElseNodeType).cases.map(item => ({ + id: item.case_id, + name: '', + })), { id: 'false', name: '' }, ]) // delete conditions and logical_operator if cases is not empty - if (nodeData.cases.length > 0 && nodeData.conditions && nodeData.logical_operator) { + if ( + nodeData.cases.length > 0 + && nodeData.conditions + && nodeData.logical_operator + ) { delete nodeData.conditions delete nodeData.logical_operator } } if (node.data.type === BlockEnum.QuestionClassifier) { - node.data._targetBranches = (node.data as QuestionClassifierNodeType).classes.map((topic) => { + node.data._targetBranches = ( + node.data as QuestionClassifierNodeType + ).classes.map((topic) => { return topic }) } + if (node.data.type === BlockEnum.Group) { + const groupData = node.data as GroupNodeData + if (groupData.handlers?.length) { + node.data._targetBranches = groupData.handlers.map(handler => ({ + id: handler.id, + name: handler.label || handler.id, + })) + } + } + if (node.data.type === BlockEnum.Iteration) { const iterationNodeData = node.data as IterationNodeType iterationNodeData._children = iterationOrLoopNodeMap[node.id] || [] iterationNodeData.is_parallel = iterationNodeData.is_parallel || false iterationNodeData.parallel_nums = iterationNodeData.parallel_nums || 10 - iterationNodeData.error_handle_mode = iterationNodeData.error_handle_mode || ErrorHandleMode.Terminated + iterationNodeData.error_handle_mode + = iterationNodeData.error_handle_mode || ErrorHandleMode.Terminated } // TODO: loop error handle mode if (node.data.type === BlockEnum.Loop) { const loopNodeData = node.data as LoopNodeType loopNodeData._children = iterationOrLoopNodeMap[node.id] || [] - loopNodeData.error_handle_mode = loopNodeData.error_handle_mode || ErrorHandleMode.Terminated + loopNodeData.error_handle_mode + = loopNodeData.error_handle_mode || ErrorHandleMode.Terminated } // legacy provider handle - if (node.data.type === BlockEnum.LLM) - (node as any).data.model.provider = correctModelProvider((node as any).data.model.provider) + if (node.data.type === BlockEnum.LLM) { + (node as any).data.model.provider = correctModelProvider( + (node as any).data.model.provider, + ) + } - if (node.data.type === BlockEnum.KnowledgeRetrieval && (node as any).data.multiple_retrieval_config?.reranking_model) - (node as any).data.multiple_retrieval_config.reranking_model.provider = correctModelProvider((node as any).data.multiple_retrieval_config?.reranking_model.provider) + if ( + node.data.type === BlockEnum.KnowledgeRetrieval + && (node as any).data.multiple_retrieval_config?.reranking_model + ) { + (node as any).data.multiple_retrieval_config.reranking_model.provider + = correctModelProvider( + (node as any).data.multiple_retrieval_config?.reranking_model.provider, + ) + } - if (node.data.type === BlockEnum.QuestionClassifier) - (node as any).data.model.provider = correctModelProvider((node as any).data.model.provider) + if (node.data.type === BlockEnum.QuestionClassifier) { + (node as any).data.model.provider = correctModelProvider( + (node as any).data.model.provider, + ) + } - if (node.data.type === BlockEnum.ParameterExtractor) - (node as any).data.model.provider = correctModelProvider((node as any).data.model.provider) + if (node.data.type === BlockEnum.ParameterExtractor) { + (node as any).data.model.provider = correctModelProvider( + (node as any).data.model.provider, + ) + } if (node.data.type === BlockEnum.HttpRequest && !node.data.retry_config) { node.data.retry_config = { @@ -289,14 +512,21 @@ export const initialNodes = (originNodes: Node[], originEdges: Edge[]) => { } } - if (node.data.type === BlockEnum.Tool && !(node as Node).data.version && !(node as Node).data.tool_node_version) { + if ( + node.data.type === BlockEnum.Tool + && !(node as Node).data.version + && !(node as Node).data.tool_node_version + ) { (node as Node).data.tool_node_version = '2' const toolConfigurations = (node as Node).data.tool_configurations if (toolConfigurations && Object.keys(toolConfigurations).length > 0) { const newValues = { ...toolConfigurations } Object.keys(toolConfigurations).forEach((key) => { - if (typeof toolConfigurations[key] !== 'object' || toolConfigurations[key] === null) { + if ( + typeof toolConfigurations[key] !== 'object' + || toolConfigurations[key] === null + ) { newValues[key] = { type: 'constant', value: toolConfigurations[key], @@ -312,50 +542,62 @@ export const initialNodes = (originNodes: Node[], originEdges: Edge[]) => { } export const initialEdges = (originEdges: Edge[], originNodes: Node[]) => { - const { nodes, edges } = preprocessNodesAndEdges(cloneDeep(originNodes), cloneDeep(originEdges)) + const { nodes, edges } = preprocessNodesAndEdges( + cloneDeep(originNodes), + cloneDeep(originEdges), + ) let selectedNode: Node | null = null - const nodesMap = nodes.reduce((acc, node) => { - acc[node.id] = node + const nodesMap = nodes.reduce( + (acc, node) => { + acc[node.id] = node - if (node.data?.selected) - selectedNode = node + if (node.data?.selected) + selectedNode = node - return acc - }, {} as Record) + return acc + }, + {} as Record, + ) const cycleEdges = getCycleEdges(nodes, edges) - return edges.filter((edge) => { - return !cycleEdges.find(cycEdge => cycEdge.source === edge.source && cycEdge.target === edge.target) - }).map((edge) => { - edge.type = 'custom' + return edges + .filter((edge) => { + return !cycleEdges.find( + cycEdge => + cycEdge.source === edge.source && cycEdge.target === edge.target, + ) + }) + .map((edge) => { + edge.type = 'custom' - if (!edge.sourceHandle) - edge.sourceHandle = 'source' + if (!edge.sourceHandle) + edge.sourceHandle = 'source' - if (!edge.targetHandle) - edge.targetHandle = 'target' + if (!edge.targetHandle) + edge.targetHandle = 'target' - if (!edge.data?.sourceType && edge.source && nodesMap[edge.source]) { - edge.data = { - ...edge.data, - sourceType: nodesMap[edge.source].data.type!, - } as any - } + if (!edge.data?.sourceType && edge.source && nodesMap[edge.source]) { + edge.data = { + ...edge.data, + sourceType: nodesMap[edge.source].data.type!, + } as any + } - if (!edge.data?.targetType && edge.target && nodesMap[edge.target]) { - edge.data = { - ...edge.data, - targetType: nodesMap[edge.target].data.type!, - } as any - } + if (!edge.data?.targetType && edge.target && nodesMap[edge.target]) { + edge.data = { + ...edge.data, + targetType: nodesMap[edge.target].data.type!, + } as any + } - if (selectedNode) { - edge.data = { - ...edge.data, - _connectedNodeIsSelected: edge.source === selectedNode.id || edge.target === selectedNode.id, - } as any - } + if (selectedNode) { + edge.data = { + ...edge.data, + _connectedNodeIsSelected: + edge.source === selectedNode.id || edge.target === selectedNode.id, + } as any + } - return edge - }) + return edge + }) } diff --git a/web/app/components/workflow/utils/workflow.ts b/web/app/components/workflow/utils/workflow.ts index ea92ba76e0..2f45e07475 100644 --- a/web/app/components/workflow/utils/workflow.ts +++ b/web/app/components/workflow/utils/workflow.ts @@ -158,6 +158,95 @@ export const getValidTreeNodes = (nodes: Node[], edges: Edge[]) => { } } +export const getCommonPredecessorNodeIds = (selectedNodeIds: string[], edges: Edge[]) => { + const uniqSelectedNodeIds = Array.from(new Set(selectedNodeIds)) + if (uniqSelectedNodeIds.length <= 1) + return [] + + const selectedNodeIdSet = new Set(uniqSelectedNodeIds) + const predecessorNodeIdsMap = new Map>() + + edges.forEach((edge) => { + if (!selectedNodeIdSet.has(edge.target)) + return + + const predecessors = predecessorNodeIdsMap.get(edge.target) ?? new Set() + predecessors.add(edge.source) + predecessorNodeIdsMap.set(edge.target, predecessors) + }) + + let commonPredecessorNodeIds: Set | null = null + + uniqSelectedNodeIds.forEach((nodeId) => { + const predecessors = predecessorNodeIdsMap.get(nodeId) ?? new Set() + + if (!commonPredecessorNodeIds) { + commonPredecessorNodeIds = new Set(predecessors) + return + } + + Array.from(commonPredecessorNodeIds).forEach((predecessorNodeId) => { + if (!predecessors.has(predecessorNodeId)) + commonPredecessorNodeIds!.delete(predecessorNodeId) + }) + }) + + return Array.from(commonPredecessorNodeIds ?? []).sort() +} + +export type PredecessorHandle = { + nodeId: string + handleId: string +} + +export const getCommonPredecessorHandles = (targetNodeIds: string[], edges: Edge[]): PredecessorHandle[] => { + const uniqTargetNodeIds = Array.from(new Set(targetNodeIds)) + if (uniqTargetNodeIds.length === 0) + return [] + + // Get the "direct predecessor handler", which is: + // - edge.source (predecessor node) + // - edge.sourceHandle (the specific output handle of the predecessor; defaults to 'source' if not set) + // Used to handle multi-handle branch scenarios like If-Else / Classifier. + const targetNodeIdSet = new Set(uniqTargetNodeIds) + const predecessorHandleMap = new Map>() // targetNodeId -> Set<`${source}\0${handleId}`> + const delimiter = '\u0000' + + edges.forEach((edge) => { + if (!targetNodeIdSet.has(edge.target)) + return + + const predecessors = predecessorHandleMap.get(edge.target) ?? new Set() + const handleId = edge.sourceHandle || 'source' + predecessors.add(`${edge.source}${delimiter}${handleId}`) + predecessorHandleMap.set(edge.target, predecessors) + }) + + // Intersect predecessor handlers of all targets, keeping only handlers common to all targets. + let commonKeys: Set | null = null + + uniqTargetNodeIds.forEach((nodeId) => { + const keys = predecessorHandleMap.get(nodeId) ?? new Set() + + if (!commonKeys) { + commonKeys = new Set(keys) + return + } + + Array.from(commonKeys).forEach((key) => { + if (!keys.has(key)) + commonKeys!.delete(key) + }) + }) + + return Array.from(commonKeys ?? []) + .map((key) => { + const [nodeId, handleId] = key.split(delimiter) + return { nodeId, handleId } + }) + .sort((a, b) => a.nodeId.localeCompare(b.nodeId) || a.handleId.localeCompare(b.handleId)) +} + export const changeNodesAndEdgesId = (nodes: Node[], edges: Edge[]) => { const idMap = nodes.reduce((acc, node) => { acc[node.id] = uuid4() diff --git a/web/app/components/workflow/workflow-preview/components/nodes/sub-graph-start/index.tsx b/web/app/components/workflow/workflow-preview/components/nodes/sub-graph-start/index.tsx new file mode 100644 index 0000000000..aa0082b8c1 --- /dev/null +++ b/web/app/components/workflow/workflow-preview/components/nodes/sub-graph-start/index.tsx @@ -0,0 +1,60 @@ +import type { NodeProps } from 'reactflow' +import type { CommonNodeType } from '@/app/components/workflow/types' +import { memo } from 'react' +import { useTranslation } from 'react-i18next' +import { AssembleVariablesAlt } from '@/app/components/base/icons/src/vender/line/general' +import { Agent } from '@/app/components/base/icons/src/vender/workflow' +import Tooltip from '@/app/components/base/tooltip' +import { cn } from '@/utils/classnames' +import { NodeSourceHandle } from '../../node-handle' + +type SubGraphStartNodeData = CommonNodeType<{ + tooltip?: string + iconType?: string +}> + +type IconComponent = typeof Agent + +const iconMap: Record = { + agent: Agent, + assemble: AssembleVariablesAlt, +} + +const SubGraphStartNode = ({ id, data }: NodeProps) => { + const { t } = useTranslation() + const iconType = data?.iconType || 'agent' + const Icon = iconMap[iconType] || Agent + const rawTitle = data?.title?.trim() || '' + const showTitle = iconType === 'agent' && !!rawTitle + const displayTitle = showTitle && (rawTitle.startsWith('@') ? rawTitle : `@${rawTitle}`) + const tooltip = data?.tooltip + || (iconType === 'assemble' ? t('blocks.start', { ns: 'workflow' }) : (data?.title || t('blocks.start', { ns: 'workflow' }))) + + return ( +
+ +
+ +
+
+ {showTitle && ( + + {displayTitle} + + )} + +
+ ) +} + +export default memo(SubGraphStartNode) diff --git a/web/app/components/workflow/workflow-preview/index.tsx b/web/app/components/workflow/workflow-preview/index.tsx index 8f61c2cfb6..7e7b2c271c 100644 --- a/web/app/components/workflow/workflow-preview/index.tsx +++ b/web/app/components/workflow/workflow-preview/index.tsx @@ -29,6 +29,7 @@ import { import CustomConnectionLine from '@/app/components/workflow/custom-connection-line' import { CUSTOM_ITERATION_START_NODE } from '@/app/components/workflow/nodes/iteration-start/constants' import { CUSTOM_LOOP_START_NODE } from '@/app/components/workflow/nodes/loop-start/constants' +import { CUSTOM_SUB_GRAPH_START_NODE } from '@/app/components/workflow/nodes/sub-graph-start/constants' import { CUSTOM_NOTE_NODE } from '@/app/components/workflow/note-node/constants' import { CUSTOM_SIMPLE_NODE } from '@/app/components/workflow/simple-node/constants' import { @@ -40,6 +41,7 @@ import CustomEdge from './components/custom-edge' import CustomNode from './components/nodes' import IterationStartNode from './components/nodes/iteration-start' import LoopStartNode from './components/nodes/loop-start' +import SubGraphStartNode from './components/nodes/sub-graph-start' import CustomNoteNode from './components/note-node' import ZoomInOut from './components/zoom-in-out' import 'reactflow/dist/style.css' @@ -49,6 +51,7 @@ const nodeTypes = { [CUSTOM_NODE]: CustomNode, [CUSTOM_NOTE_NODE]: CustomNoteNode, [CUSTOM_SIMPLE_NODE]: CustomNode, + [CUSTOM_SUB_GRAPH_START_NODE]: SubGraphStartNode, [CUSTOM_ITERATION_START_NODE]: IterationStartNode, [CUSTOM_LOOP_START_NODE]: LoopStartNode, } diff --git a/web/config/index.spec.ts b/web/config/index.spec.ts index 7b1d91186d..e03ee92dfd 100644 --- a/web/config/index.spec.ts +++ b/web/config/index.spec.ts @@ -70,6 +70,8 @@ describe('config test', () => { // rag variables '{{#rag.1748945155129.a#}}', '{{#rag.shared.bbb#}}', + '{{@1749783300519.llm.a@}}', + '{{@sys.query@}}', ] vars.forEach((variable) => { expect(VAR_REGEX.test(variable)).toBe(true) diff --git a/web/config/index.ts b/web/config/index.ts index 08ce14b264..d443accc76 100644 --- a/web/config/index.ts +++ b/web/config/index.ts @@ -337,7 +337,7 @@ Thought: {{agent_scratchpad}} } export const VAR_REGEX - = /\{\{(#[\w-]{1,50}(\.\d+)?(\.[a-z_]\w{0,29}){1,10}#)\}\}/gi + = /\{\{([#@])[\w-]{1,50}(\.\d+)?(\.[a-z_]\w{0,29}){1,10}\1\}\}/gi export const resetReg = () => (VAR_REGEX.lastIndex = 0) diff --git a/web/eslint-suppressions.json b/web/eslint-suppressions.json index f53a175b27..a0e62ffee2 100644 --- a/web/eslint-suppressions.json +++ b/web/eslint-suppressions.json @@ -3631,11 +3631,6 @@ "count": 7 } }, - "app/components/workflow/nodes/tool/components/mixed-variable-text-input/index.tsx": { - "ts/no-explicit-any": { - "count": 1 - } - }, "app/components/workflow/nodes/tool/components/mixed-variable-text-input/placeholder.tsx": { "ts/no-explicit-any": { "count": 1 @@ -4538,4 +4533,4 @@ "count": 2 } } -} +} \ No newline at end of file diff --git a/web/i18n/en-US/workflow.json b/web/i18n/en-US/workflow.json index 2b4f351fe7..4a04f5ac63 100644 --- a/web/i18n/en-US/workflow.json +++ b/web/i18n/en-US/workflow.json @@ -8,6 +8,7 @@ "blocks.datasource-empty": "Empty Data Source", "blocks.document-extractor": "Doc Extractor", "blocks.end": "Output", + "blocks.group": "Group", "blocks.http-request": "HTTP Request", "blocks.if-else": "IF/ELSE", "blocks.iteration": "Iteration", @@ -39,6 +40,7 @@ "blocksAbout.datasource-empty": "Empty Data Source placeholder", "blocksAbout.document-extractor": "Used to parse uploaded documents into text content that is easily understandable by LLM.", "blocksAbout.end": "Define the output and result type of a workflow", + "blocksAbout.group": "Group multiple nodes together for better organization", "blocksAbout.http-request": "Allow server requests to be sent over the HTTP protocol", "blocksAbout.if-else": "Allows you to split the workflow into two branches based on if/else conditions", "blocksAbout.iteration": "Perform multiple steps on a list object until all results are outputted.", @@ -173,6 +175,7 @@ "common.needConnectTip": "This step is not connected to anything", "common.needOutputNode": "The Output node must be added", "common.needStartNode": "At least one start node must be added", + "common.noAgentNodes": "No agent nodes available", "common.noHistory": "No History", "common.noVar": "No variable", "common.notRunning": "Not running yet", @@ -204,6 +207,7 @@ "common.runApp": "Run App", "common.runHistory": "Run History", "common.running": "Running", + "common.searchAgent": "Search agent...", "common.searchVar": "Search variable", "common.setVarValuePlaceholder": "Set variable", "common.showRunHistory": "Show Run History", @@ -215,6 +219,7 @@ "common.variableNamePlaceholder": "Variable name", "common.versionHistory": "Version History", "common.viewDetailInTracingPanel": "View details", + "common.viewInternals": "View Internals", "common.viewOnly": "View Only", "common.viewRunHistory": "View run history", "common.workflowAsTool": "Workflow as Tool", @@ -633,9 +638,11 @@ "nodes.listFilter.outputVars.last_record": "Last record", "nodes.listFilter.outputVars.result": "Filter result", "nodes.listFilter.selectVariableKeyPlaceholder": "Select sub variable key", + "nodes.llm.addContext": "Add Context", "nodes.llm.addMessage": "Add Message", "nodes.llm.advancedSettings": "Advanced Settings", "nodes.llm.context": "context", + "nodes.llm.contextBlock": "Context Block", "nodes.llm.contextTooltip": "You can import Knowledge as context", "nodes.llm.files": "Files", "nodes.llm.jsonSchema.addChildField": "Add Child Field", @@ -672,6 +679,7 @@ "nodes.llm.reasoningFormat.tagged": "Keep think tags", "nodes.llm.reasoningFormat.title": "Enable reasoning tag separation", "nodes.llm.reasoningFormat.tooltip": "Extract content from think tags and store it in the reasoning_content field.", + "nodes.llm.removeContext": "Remove context", "nodes.llm.resolution.high": "High", "nodes.llm.resolution.low": "Low", "nodes.llm.resolution.name": "Resolution", @@ -773,10 +781,13 @@ "nodes.templateTransform.codeSupportTip": "Only supports Jinja2", "nodes.templateTransform.inputVars": "Input Variables", "nodes.templateTransform.outputVars.output": "Transformed content", + "nodes.tool.agentPlaceholder": "Tell me the {{paramKey}}...", + "nodes.tool.assembleVariables": "Assemble variables", "nodes.tool.authorize": "Authorize", "nodes.tool.inputVars": "Input Variables", "nodes.tool.insertPlaceholder1": "Type or press", "nodes.tool.insertPlaceholder2": "insert variable", + "nodes.tool.insertPlaceholder3": "add agent", "nodes.tool.outputVars.files.title": "tool generated files", "nodes.tool.outputVars.files.transfer_method": "Transfer method.Value is remote_url or local_file", "nodes.tool.outputVars.files.type": "Support type. Now only support image", @@ -951,6 +962,7 @@ "operator.distributeHorizontal": "Space Horizontally", "operator.distributeVertical": "Space Vertically", "operator.horizontal": "Horizontal", + "operator.makeGroup": "Make Group", "operator.selectionAlignment": "Selection Alignment", "operator.vertical": "Vertical", "operator.zoomIn": "Zoom In", @@ -979,6 +991,7 @@ "panel.scrollToSelectedNode": "Scroll to selected node", "panel.selectNextStep": "Select Next Step", "panel.startNode": "Start Node", + "panel.ungroup": "Ungroup", "panel.userInputField": "User Input Field", "publishLimit.startNodeDesc": "You’ve reached the limit of 2 triggers per workflow for this plan. Upgrade to publish this workflow.", "publishLimit.startNodeTitlePrefix": "Upgrade to", @@ -1039,6 +1052,22 @@ "skillSidebar.unsavedChanges.content": "You have unsaved changes. Do you want to discard them?", "skillSidebar.unsavedChanges.title": "Unsaved changes", "skillSidebar.uploading": "Uploading…", + "subGraphModal.canvasPlaceholder": "Click to configure the internal structure", + "subGraphModal.defaultValueHint": "Returns the value below", + "subGraphModal.internalStructure": "Internal structure", + "subGraphModal.internalStructureDesc": "Internal structure of @{{name}}", + "subGraphModal.lastRun": "LAST RUN", + "subGraphModal.noRunHistory": "No run history yet", + "subGraphModal.outputVariables": "OUTPUT VARIABLES", + "subGraphModal.settings": "SETTINGS", + "subGraphModal.sourceNode": "SOURCE", + "subGraphModal.title": "INTERNAL STRUCTURE", + "subGraphModal.whenOutputIsNone": "WHEN OUTPUT IS NONE", + "subGraphModal.whenOutputNone.default": "Default value", + "subGraphModal.whenOutputNone.defaultDesc": "Returns the value below", + "subGraphModal.whenOutputNone.error": "Raise an error", + "subGraphModal.whenOutputNone.errorDesc": "Pass the error to the outer workflow", + "subGraphModal.whenOutputNone.skip": "Skip this step", "tabs.-": "Default", "tabs.addAll": "Add all", "tabs.agent": "Agent Strategy", diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts new file mode 100644 index 0000000000..76050edabb --- /dev/null +++ b/web/i18n/en-US/workflow.ts @@ -0,0 +1,1298 @@ +const translation = { + common: { + undo: 'Undo', + redo: 'Redo', + editing: 'Editing', + autoSaved: 'Auto-Saved', + unpublished: 'Unpublished', + published: 'Published', + publish: 'Publish', + update: 'Update', + publishUpdate: 'Publish Update', + run: 'Test Run', + running: 'Running', + listening: 'Listening', + chooseStartNodeToRun: 'Choose the start node to run', + runAllTriggers: 'Run all triggers', + inRunMode: 'In Run Mode', + inPreview: 'In Preview', + inPreviewMode: 'In Preview Mode', + preview: 'Preview', + viewRunHistory: 'View run history', + runHistory: 'Run History', + goBackToEdit: 'Go back to editor', + conversationLog: 'Conversation Log', + features: 'Features', + featuresDescription: 'Enhance web app user experience', + ImageUploadLegacyTip: 'You can now create file type variables in the start form. We will no longer support the image upload feature in the future. ', + fileUploadTip: 'Image upload features have been upgraded to file upload. ', + featuresDocLink: 'Learn more', + debugAndPreview: 'Preview', + restart: 'Restart', + currentDraft: 'Current Draft', + currentDraftUnpublished: 'Current Draft Unpublished', + latestPublished: 'Latest Published', + publishedAt: 'Published', + restore: 'Restore', + versionHistory: 'Version History', + exitVersions: 'Exit Versions', + runApp: 'Run App', + batchRunApp: 'Batch Run App', + openInExplore: 'Open in Explore', + accessAPIReference: 'Access API Reference', + embedIntoSite: 'Embed Into Site', + addTitle: 'Add title...', + addDescription: 'Add description...', + noVar: 'No variable', + searchVar: 'Search variable', + variableNamePlaceholder: 'Variable name', + setVarValuePlaceholder: 'Set variable', + needConnectTip: 'This step is not connected to anything', + maxTreeDepth: 'Maximum limit of {{depth}} nodes per branch', + needAdd: '{{node}} node must be added', + needOutputNode: 'The Output node must be added', + needStartNode: 'At least one start node must be added', + needAnswerNode: 'The Answer node must be added', + workflowProcess: 'Workflow Process', + notRunning: 'Not running yet', + previewPlaceholder: 'Enter content in the box below to start debugging the Chatbot', + effectVarConfirm: { + title: 'Remove Variable', + content: 'The variable is used in other nodes. Do you still want to remove it?', + }, + insertVarTip: 'Press the \'/\' key to insert quickly', + processData: 'Process Data', + input: 'Input', + output: 'Output', + jinjaEditorPlaceholder: 'Type \'/\' or \'{\' to insert variable', + viewOnly: 'View Only', + showRunHistory: 'Show Run History', + enableJinja: 'Enable Jinja template support', + learnMore: 'Learn More', + copy: 'Copy', + duplicate: 'Duplicate', + addBlock: 'Add Node', + pasteHere: 'Paste Here', + pointerMode: 'Pointer Mode', + handMode: 'Hand Mode', + exportImage: 'Export Image', + exportPNG: 'Export as PNG', + exportJPEG: 'Export as JPEG', + exportSVG: 'Export as SVG', + currentView: 'Current View', + currentWorkflow: 'Current Workflow', + moreActions: 'More Actions', + model: 'Model', + workflowAsTool: 'Workflow as Tool', + configureRequired: 'Configure Required', + configure: 'Configure', + manageInTools: 'Manage in Tools', + workflowAsToolTip: 'Tool reconfiguration is required after the workflow update.', + workflowAsToolDisabledHint: 'Publish the latest workflow and ensure a connected User Input node before configuring it as a tool.', + viewDetailInTracingPanel: 'View details', + syncingData: 'Syncing data, just a few seconds.', + importDSL: 'Import DSL', + importDSLTip: 'Current draft will be overwritten.\nExport workflow as backup before importing.', + backupCurrentDraft: 'Backup Current Draft', + chooseDSL: 'Choose DSL file', + overwriteAndImport: 'Overwrite and Import', + importFailure: 'Import Failed', + importWarning: 'Caution', + importWarningDetails: 'DSL version difference may affect certain features', + importSuccess: 'Import Successfully', + parallelTip: { + click: { + title: 'Click', + desc: ' to add', + }, + drag: { + title: 'Drag', + desc: ' to connect', + }, + limit: 'Parallelism is limited to {{num}} branches.', + depthLimit: 'Parallel nesting layer limit of {{num}} layers', + }, + disconnect: 'Disconnect', + jumpToNode: 'Jump to this node', + addParallelNode: 'Add Parallel Node', + parallel: 'PARALLEL', + branch: 'BRANCH', + onFailure: 'On Failure', + addFailureBranch: 'Add Fail Branch', + loadMore: 'Load More', + noHistory: 'No History', + tagBound: 'Number of apps using this tag', + }, + publishLimit: { + startNodeTitlePrefix: 'Upgrade to', + startNodeTitleSuffix: 'unlock unlimited triggers per workflow', + startNodeDesc: 'You’ve reached the limit of 2 triggers per workflow for this plan. Upgrade to publish this workflow.', + }, + env: { + envPanelTitle: 'Environment Variables', + envDescription: 'Environment variables can be used to store private information and credentials. They are read-only and can be separated from the DSL file during export.', + envPanelButton: 'Add Variable', + modal: { + title: 'Add Environment Variable', + editTitle: 'Edit Environment Variable', + type: 'Type', + name: 'Name', + namePlaceholder: 'env name', + value: 'Value', + valuePlaceholder: 'env value', + secretTip: 'Used to define sensitive information or data, with DSL settings configured for leak prevention.', + description: 'Description', + descriptionPlaceholder: 'Describe the variable', + }, + export: { + title: 'Export Secret environment variables?', + checkbox: 'Export secret values', + ignore: 'Export DSL', + export: 'Export DSL with secret values ', + }, + }, + globalVar: { + title: 'System Variables', + description: 'System variables are global variables that can be referenced by any node without wiring when the type is correct, such as end-user ID and workflow ID.', + fieldsDescription: { + conversationId: 'Conversation ID', + dialogCount: 'Conversation Count', + userId: 'User ID', + triggerTimestamp: 'Application start timestamp', + appId: 'Application ID', + workflowId: 'Workflow ID', + workflowRunId: 'Workflow run ID', + }, + }, + sidebar: { + exportWarning: 'Export Current Saved Version', + exportWarningDesc: 'This will export the current saved version of your workflow. If you have unsaved changes in the editor, please save them first by using the export option in the workflow canvas.', + }, + chatVariable: { + panelTitle: 'Conversation Variables', + panelDescription: 'Conversation Variables are used to store interactive information that LLM needs to remember, including conversation history, uploaded files, user preferences. They are read-write. ', + docLink: 'Visit our docs to learn more.', + button: 'Add Variable', + modal: { + title: 'Add Conversation Variable', + editTitle: 'Edit Conversation Variable', + name: 'Name', + namePlaceholder: 'Variable name', + type: 'Type', + value: 'Default Value', + valuePlaceholder: 'Default value, leave blank to not set', + description: 'Description', + descriptionPlaceholder: 'Describe the variable', + editInJSON: 'Edit in JSON', + oneByOne: 'Add one by one', + editInForm: 'Edit in Form', + arrayValue: 'Value', + addArrayValue: 'Add Value', + objectKey: 'Key', + objectType: 'Type', + objectValue: 'Default Value', + }, + storedContent: 'Stored content', + updatedAt: 'Updated at ', + }, + changeHistory: { + title: 'Change History', + placeholder: 'You haven\'t changed anything yet', + clearHistory: 'Clear History', + hint: 'Hint', + hintText: 'Your editing actions are tracked in a change history, which is stored on your device for the duration of this session. This history will be cleared when you leave the editor.', + stepBackward_one: '{{count}} step backward', + stepBackward_other: '{{count}} steps backward', + stepForward_one: '{{count}} step forward', + stepForward_other: '{{count}} steps forward', + sessionStart: 'Session Start', + currentState: 'Current State', + nodeTitleChange: 'Node title changed', + nodeDescriptionChange: 'Node description changed', + nodeDragStop: 'Node moved', + nodeChange: 'Node changed', + nodeConnect: 'Node connected', + nodePaste: 'Node pasted', + nodeDelete: 'Node deleted', + nodeAdd: 'Node added', + nodeResize: 'Node resized', + noteAdd: 'Note added', + noteChange: 'Note changed', + noteDelete: 'Note deleted', + edgeDelete: 'Node disconnected', + }, + errorMsg: { + fieldRequired: '{{field}} is required', + rerankModelRequired: 'A configured Rerank Model is required', + authRequired: 'Authorization is required', + invalidJson: '{{field}} is invalid JSON', + fields: { + variable: 'Variable Name', + variableValue: 'Variable Value', + code: 'Code', + model: 'Model', + rerankModel: 'A configured Rerank Model', + visionVariable: 'Vision Variable', + }, + invalidVariable: 'Invalid variable', + noValidTool: '{{field}} no valid tool selected', + toolParameterRequired: '{{field}}: parameter [{{param}}] is required', + startNodeRequired: 'Please add a start node first before {{operation}}', + }, + error: { + startNodeRequired: 'Please add a start node first before {{operation}}', + operations: { + connectingNodes: 'connecting nodes', + addingNodes: 'adding nodes', + modifyingWorkflow: 'modifying workflow', + updatingWorkflow: 'updating workflow', + }, + }, + singleRun: { + testRun: 'Test Run', + startRun: 'Start Run', + preparingDataSource: 'Preparing Data Source', + reRun: 'Re-run', + running: 'Running', + testRunIteration: 'Test Run Iteration', + back: 'Back', + iteration: 'Iteration', + loop: 'Loop', + }, + tabs: { + 'searchBlock': 'Search node', + 'start': 'Start', + 'blocks': 'Nodes', + 'searchTool': 'Search tool', + 'searchTrigger': 'Search triggers...', + 'allTriggers': 'All triggers', + 'tools': 'Tools', + 'allTool': 'All', + 'plugin': 'Plugin', + 'customTool': 'Custom', + 'workflowTool': 'Workflow', + 'question-understand': 'Question Understand', + 'logic': 'Logic', + 'transform': 'Transform', + 'utilities': 'Utilities', + 'noResult': 'No match found', + 'noPluginsFound': 'No plugins were found', + 'requestToCommunity': 'Requests to the community', + 'agent': 'Agent Strategy', + 'allAdded': 'All added', + 'addAll': 'Add all', + 'sources': 'Sources', + 'searchDataSource': 'Search Data Source', + 'featuredTools': 'Featured', + 'showMoreFeatured': 'Show more', + 'showLessFeatured': 'Show less', + 'installed': 'Installed', + 'pluginByAuthor': 'By {{author}}', + 'usePlugin': 'Select tool', + 'hideActions': 'Hide tools', + 'noFeaturedPlugins': 'Discover more tools in Marketplace', + 'noFeaturedTriggers': 'Discover more triggers in Marketplace', + 'startDisabledTip': 'Trigger node and user input node are mutually exclusive.', + }, + blocks: { + 'start': 'User Input', + 'originalStartNode': 'original start node', + 'end': 'Output', + 'answer': 'Answer', + 'llm': 'LLM', + 'knowledge-retrieval': 'Knowledge Retrieval', + 'question-classifier': 'Question Classifier', + 'if-else': 'IF/ELSE', + 'code': 'Code', + 'template-transform': 'Template', + 'http-request': 'HTTP Request', + 'variable-assigner': 'Variable Aggregator', + 'variable-aggregator': 'Variable Aggregator', + 'assigner': 'Variable Assigner', + 'iteration-start': 'Iteration Start', + 'iteration': 'Iteration', + 'parameter-extractor': 'Parameter Extractor', + 'document-extractor': 'Doc Extractor', + 'list-operator': 'List Operator', + 'agent': 'Agent', + 'loop-start': 'Loop Start', + 'loop': 'Loop', + 'loop-end': 'Exit Loop', + 'knowledge-index': 'Knowledge Base', + 'datasource': 'Data Source', + 'trigger-schedule': 'Schedule Trigger', + 'trigger-webhook': 'Webhook Trigger', + 'trigger-plugin': 'Plugin Trigger', + }, + customWebhook: 'Custom Webhook', + blocksAbout: { + 'start': 'Define the initial parameters for launching a workflow', + 'end': 'Define the output and result type of a workflow', + 'answer': 'Define the reply content of a chat conversation', + 'llm': 'Invoking large language models to answer questions or process natural language', + 'knowledge-retrieval': 'Allows you to query text content related to user questions from the Knowledge', + 'question-classifier': 'Define the classification conditions of user questions, LLM can define how the conversation progresses based on the classification description', + 'if-else': 'Allows you to split the workflow into two branches based on if/else conditions', + 'code': 'Execute a piece of Python or NodeJS code to implement custom logic', + 'template-transform': 'Convert data to string using Jinja template syntax', + 'http-request': 'Allow server requests to be sent over the HTTP protocol', + 'variable-assigner': 'Aggregate multi-branch variables into a single variable for unified configuration of downstream nodes.', + 'assigner': 'The variable assignment node is used for assigning values to writable variables(like conversation variables).', + 'variable-aggregator': 'Aggregate multi-branch variables into a single variable for unified configuration of downstream nodes.', + 'iteration': 'Perform multiple steps on a list object until all results are outputted.', + 'loop': 'Execute a loop of logic until the termination condition is met or the maximum loop count is reached.', + 'loop-end': 'Equivalent to "break". This node has no configuration items. When the loop body reaches this node, the loop terminates.', + 'parameter-extractor': 'Use LLM to extract structured parameters from natural language for tool invocations or HTTP requests.', + 'document-extractor': 'Used to parse uploaded documents into text content that is easily understandable by LLM.', + 'list-operator': 'Used to filter or sort array content.', + 'agent': 'Invoking large language models to answer questions or process natural language', + 'knowledge-index': 'Knowledge Base About', + 'datasource': 'Data Source About', + 'trigger-schedule': 'Time-based workflow trigger that starts workflows on a schedule', + 'trigger-webhook': 'Webhook Trigger receives HTTP pushes from third-party systems to automatically trigger workflows.', + 'trigger-plugin': 'Third-party integration trigger that starts workflows from external platform events', + 'group': 'Group multiple nodes together for better organization and reusability.', + }, + difyTeam: 'Dify Team', + operator: { + zoomIn: 'Zoom In', + zoomOut: 'Zoom Out', + zoomTo50: 'Zoom to 50%', + zoomTo100: 'Zoom to 100%', + zoomToFit: 'Zoom to Fit', + makeGroup: 'Make group', + alignNodes: 'Align Nodes', + alignLeft: 'Left', + alignCenter: 'Center', + alignRight: 'Right', + alignTop: 'Top', + alignMiddle: 'Middle', + alignBottom: 'Bottom', + vertical: 'Vertical', + horizontal: 'Horizontal', + distributeHorizontal: 'Space Horizontally', + distributeVertical: 'Space Vertically', + selectionAlignment: 'Selection Alignment', + }, + variableReference: { + noAvailableVars: 'No available variables', + noVarsForOperation: 'There are no variables available for assignment with the selected operation.', + noAssignedVars: 'No available assigned variables', + assignedVarsDescription: 'Assigned variables must be writable variables, such as ', + conversationVars: 'conversation variables', + }, + panel: { + userInputField: 'User Input Field', + changeBlock: 'Change Node', + helpLink: 'View Docs', + openWorkflow: 'Open Workflow', + about: 'About', + createdBy: 'Created By ', + nextStep: 'Next Step', + addNextStep: 'Add the next step in this workflow', + selectNextStep: 'Select Next Step', + runThisStep: 'Run this step', + checklist: 'Checklist', + checklistTip: 'Make sure all issues are resolved before publishing', + checklistResolved: 'All issues are resolved', + goTo: 'Go to', + startNode: 'Start Node', + organizeBlocks: 'Organize nodes', + change: 'Change', + optional: '(optional)', + maximize: 'Maximize Canvas', + minimize: 'Exit Full Screen', + scrollToSelectedNode: 'Scroll to selected node', + optional_and_hidden: '(optional & hidden)', + }, + nodes: { + common: { + outputVars: 'Output Variables', + insertVarTip: 'Insert Variable', + memory: { + memory: 'Memory', + memoryTip: 'Chat memory settings', + windowSize: 'Window Size', + conversationRoleName: 'Conversation Role Name', + user: 'User prefix', + assistant: 'Assistant prefix', + }, + memories: { + title: 'Memories', + tip: 'Chat memory', + builtIn: 'Built-in', + }, + errorHandle: { + title: 'Error Handling', + tip: 'Exception handling strategy, triggered when a node encounters an exception.', + none: { + title: 'None', + desc: 'The node will stop running if an exception occurs and is not handled', + }, + defaultValue: { + title: 'Default Value', + desc: 'When an error occurs, specify a static output content.', + tip: 'On error, will return below value.', + inLog: 'Node exception, outputting according to default values.', + output: 'Output Default Value', + }, + failBranch: { + title: 'Fail Branch', + desc: 'When an error occurs, it will execute the exception branch', + customize: 'Go to the canvas to customize the fail branch logic.', + customizeTip: 'When the fail branch is activated, exceptions thrown by nodes will not terminate the process. Instead, it will automatically execute the predefined fail branch, allowing you to flexibly provide error messages, reports, fixes, or skip actions.', + inLog: 'Node exception, will automatically execute the fail branch. The node output will return an error type and error message and pass them to downstream.', + }, + partialSucceeded: { + tip: 'There are {{num}} nodes in the process running abnormally, please go to tracing to check the logs.', + }, + }, + retry: { + retry: 'Retry', + retryOnFailure: 'retry on failure', + maxRetries: 'max retries', + retryInterval: 'retry interval', + retryTimes: 'Retry {{times}} times on failure', + retrying: 'Retrying...', + retrySuccessful: 'Retry successful', + retryFailed: 'Retry failed', + retryFailedTimes: '{{times}} retries failed', + times: 'times', + ms: 'ms', + retries: '{{num}} Retries', + }, + typeSwitch: { + input: 'Input value', + variable: 'Use variable', + }, + inputVars: 'Input Variables', + pluginNotInstalled: 'Plugin is not installed', + }, + start: { + required: 'required', + inputField: 'Input Field', + builtInVar: 'Built-in Variables', + outputVars: { + query: 'User input', + memories: { + des: 'Conversation history', + type: 'message type', + content: 'message content', + }, + files: 'File list', + }, + noVarTip: 'Set inputs that can be used in the Workflow', + }, + end: { + outputs: 'Outputs', + output: { + type: 'output type', + variable: 'output variable', + }, + type: { + 'none': 'None', + 'plain-text': 'Plain Text', + 'structured': 'Structured', + }, + }, + answer: { + answer: 'Answer', + outputVars: 'Output Variables', + }, + llm: { + model: 'model', + variables: 'variables', + context: 'context', + contextTooltip: 'You can import Knowledge as context', + notSetContextInPromptTip: 'To enable the context feature, please fill in the context variable in PROMPT.', + prompt: 'prompt', + roleDescription: { + system: 'Give high level instructions for the conversation', + user: 'Provide instructions, queries, or any text-based input to the model', + assistant: 'The model’s responses based on the user messages', + }, + addMessage: 'Add Message', + vision: 'vision', + files: 'Files', + resolution: { + name: 'Resolution', + high: 'High', + low: 'Low', + }, + outputVars: { + output: 'Generate content', + reasoning_content: 'Reasoning Content', + usage: 'Model Usage Information', + }, + singleRun: { + variable: 'Variable', + }, + sysQueryInUser: 'sys.query in user message is required', + reasoningFormat: { + title: 'Enable reasoning tag separation', + tagged: 'Keep think tags', + separated: 'Separate think tags', + tooltip: 'Extract content from think tags and store it in the reasoning_content field.', + }, + jsonSchema: { + title: 'Structured Output Schema', + instruction: 'Instruction', + promptTooltip: 'Convert the text description into a standardized JSON Schema structure.', + promptPlaceholder: 'Describe your JSON Schema...', + generate: 'Generate', + import: 'Import from JSON', + generateJsonSchema: 'Generate JSON Schema', + generationTip: 'You can use natural language to quickly create a JSON Schema.', + generating: 'Generating JSON Schema...', + generatedResult: 'Generated Result', + resultTip: 'Here is the generated result. If you\'re not satisfied, you can go back and modify your prompt.', + back: 'Back', + regenerate: 'Regenerate', + apply: 'Apply', + doc: 'Learn more about structured output', + resetDefaults: 'Reset', + required: 'required', + addField: 'Add Field', + addChildField: 'Add Child Field', + showAdvancedOptions: 'Show advanced options', + stringValidations: 'String Validations', + fieldNamePlaceholder: 'Field Name', + descriptionPlaceholder: 'Add description', + warningTips: { + saveSchema: 'Please finish editing the current field before saving the schema', + }, + }, + }, + knowledgeRetrieval: { + queryVariable: 'Query Variable', + queryText: 'Query Text', + queryAttachment: 'Query Images', + knowledge: 'Knowledge', + outputVars: { + output: 'Retrieval segmented data', + content: 'Segmented content', + title: 'Segmented title', + icon: 'Segmented icon', + url: 'Segmented URL', + metadata: 'Other metadata', + files: 'Retrieved files', + }, + metadata: { + title: 'Metadata Filtering', + tip: 'Metadata filtering is the process of using metadata attributes (such as tags, categories, or access permissions) to refine and control the retrieval of relevant information within a system.', + options: { + disabled: { + title: 'Disabled', + subTitle: 'Not enabling metadata filtering', + }, + automatic: { + title: 'Automatic', + subTitle: 'Automatically generate metadata filtering conditions based on user query', + desc: 'Automatically generate metadata filtering conditions based on Query Variable', + }, + manual: { + title: 'Manual', + subTitle: 'Manually add metadata filtering conditions', + }, + }, + panel: { + title: 'Metadata Filter Conditions', + conditions: 'Conditions', + add: 'Add Condition', + search: 'Search metadata', + placeholder: 'Enter value', + datePlaceholder: 'Choose a time...', + select: 'Select variable...', + }, + }, + }, + http: { + inputVars: 'Input Variables', + api: 'API', + apiPlaceholder: 'Enter URL, type ‘/’ insert variable', + extractListPlaceholder: 'Enter list item index, type ‘/’ insert variable', + notStartWithHttp: 'API should start with http:// or https://', + key: 'Key', + type: 'Type', + value: 'Value', + bulkEdit: 'Bulk Edit', + keyValueEdit: 'Key-Value Edit', + headers: 'Headers', + params: 'Params', + body: 'Body', + binaryFileVariable: 'Binary File Variable', + outputVars: { + body: 'Response Content', + statusCode: 'Response Status Code', + headers: 'Response Header List JSON', + files: 'Files List', + }, + authorization: { + 'authorization': 'Authorization', + 'authorizationType': 'Authorization Type', + 'no-auth': 'None', + 'api-key': 'API-Key', + 'auth-type': 'Auth Type', + 'basic': 'Basic', + 'bearer': 'Bearer', + 'custom': 'Custom', + 'api-key-title': 'API Key', + 'header': 'Header', + }, + insertVarPlaceholder: 'type \'/\' to insert variable', + timeout: { + title: 'Timeout', + connectLabel: 'Connection Timeout', + connectPlaceholder: 'Enter connection timeout in seconds', + readLabel: 'Read Timeout', + readPlaceholder: 'Enter read timeout in seconds', + writeLabel: 'Write Timeout', + writePlaceholder: 'Enter write timeout in seconds', + }, + curl: { + title: 'Import from cURL', + placeholder: 'Paste cURL string here', + }, + verifySSL: { + title: 'Verify SSL Certificate', + warningTooltip: 'Disabling SSL verification is not recommended for production environments. This should only be used in development or testing, as it makes the connection vulnerable to security threats like man-in-the-middle attacks.', + }, + }, + code: { + inputVars: 'Input Variables', + outputVars: 'Output Variables', + advancedDependencies: 'Advanced Dependencies', + advancedDependenciesTip: 'Add some preloaded dependencies that take more time to consume or are not default built-in here', + searchDependencies: 'Search Dependencies', + syncFunctionSignature: 'Sync function signature to code', + }, + templateTransform: { + inputVars: 'Input Variables', + code: 'Code', + codeSupportTip: 'Only supports Jinja2', + outputVars: { + output: 'Transformed content', + }, + }, + ifElse: { + if: 'If', + else: 'Else', + elseDescription: 'Used to define the logic that should be executed when the if condition is not met.', + and: 'and', + or: 'or', + operator: 'Operator', + notSetVariable: 'Please set variable first', + comparisonOperator: { + 'contains': 'contains', + 'not contains': 'not contains', + 'start with': 'start with', + 'end with': 'end with', + 'is': 'is', + 'is not': 'is not', + 'empty': 'is empty', + 'not empty': 'is not empty', + 'null': 'is null', + 'not null': 'is not null', + 'in': 'in', + 'not in': 'not in', + 'all of': 'all of', + 'exists': 'exists', + 'not exists': 'not exists', + 'before': 'before', + 'after': 'after', + }, + optionName: { + image: 'Image', + doc: 'Doc', + audio: 'Audio', + video: 'Video', + localUpload: 'Local Upload', + url: 'URL', + }, + enterValue: 'Enter value', + addCondition: 'Add Condition', + conditionNotSetup: 'Condition NOT setup', + selectVariable: 'Select variable...', + addSubVariable: 'Sub Variable', + select: 'Select', + }, + variableAssigner: { + title: 'Assign variables', + outputType: 'Output Type', + varNotSet: 'Variable not set', + noVarTip: 'Add the variables to be assigned', + type: { + string: 'String', + number: 'Number', + object: 'Object', + array: 'Array', + }, + aggregationGroup: 'Aggregation Group', + aggregationGroupTip: 'Enabling this feature allows the variable aggregator to aggregate multiple sets of variables.', + addGroup: 'Add Group', + outputVars: { + varDescribe: '{{groupName}} output', + }, + setAssignVariable: 'Set assign variable', + }, + assigner: { + 'assignedVariable': 'Assigned Variable', + 'varNotSet': 'Variable NOT Set', + 'variables': 'Variables', + 'noVarTip': 'Click the "+" button to add variables', + 'writeMode': 'Write Mode', + 'writeModeTip': 'Append mode: Available for array variables only.', + 'over-write': 'Overwrite', + 'append': 'Append', + 'plus': 'Plus', + 'clear': 'Clear', + 'setVariable': 'Set Variable', + 'selectAssignedVariable': 'Select assigned variable...', + 'setParameter': 'Set parameter...', + 'operations': { + 'title': 'Operation', + 'over-write': 'Overwrite', + 'overwrite': 'Overwrite', + 'set': 'Set', + 'clear': 'Clear', + 'extend': 'Extend', + 'append': 'Append', + 'remove-first': 'Remove First', + 'remove-last': 'Remove Last', + '+=': '+=', + '-=': '-=', + '*=': '*=', + '/=': '/=', + }, + 'variable': 'Variable', + 'noAssignedVars': 'No available assigned variables', + 'assignedVarsDescription': 'Assigned variables must be writable variables, such as conversation variables.', + }, + tool: { + authorize: 'Authorize', + inputVars: 'Input Variables', + settings: 'Settings', + insertPlaceholder1: 'Type or press', + insertPlaceholder2: 'insert variable', + outputVars: { + text: 'tool generated content', + files: { + title: 'tool generated files', + type: 'Support type. Now only support image', + transfer_method: 'Transfer method.Value is remote_url or local_file', + url: 'Image url', + upload_file_id: 'Upload file id', + }, + json: 'tool generated json', + }, + }, + triggerPlugin: { + authorized: 'Authorized', + notConfigured: 'Not Configured', + notAuthorized: 'Not Authorized', + selectSubscription: 'Select Subscription', + availableSubscriptions: 'Available Subscriptions', + addSubscription: 'Add New Subscription', + removeSubscription: 'Remove Subscription', + subscriptionRemoved: 'Subscription removed successfully', + error: 'Error', + configuration: 'Configuration', + remove: 'Remove', + or: 'OR', + useOAuth: 'Use OAuth', + useApiKey: 'Use API Key', + authenticationFailed: 'Authentication failed', + authenticationSuccess: 'Authentication successful', + oauthConfigFailed: 'OAuth configuration failed', + configureOAuthClient: 'Configure OAuth Client', + oauthClientDescription: 'Configure OAuth client credentials to enable authentication', + oauthClientSaved: 'OAuth client configuration saved successfully', + configureApiKey: 'Configure API Key', + apiKeyDescription: 'Configure API key credentials for authentication', + apiKeyConfigured: 'API key configured successfully', + configurationFailed: 'Configuration failed', + failedToStart: 'Failed to start authentication flow', + credentialsVerified: 'Credentials verified successfully', + credentialVerificationFailed: 'Credential verification failed', + verifyAndContinue: 'Verify & Continue', + configureParameters: 'Configure Parameters', + parametersDescription: 'Configure trigger parameters and properties', + configurationComplete: 'Configuration Complete', + configurationCompleteDescription: 'Your trigger has been configured successfully', + configurationCompleteMessage: 'Your trigger configuration is now complete and ready to use.', + parameters: 'Parameters', + properties: 'Properties', + propertiesDescription: 'Additional configuration properties for this trigger', + noConfigurationRequired: 'No additional configuration required for this trigger.', + subscriptionName: 'Subscription Name', + subscriptionNameDescription: 'Enter a unique name for this trigger subscription', + subscriptionNamePlaceholder: 'Enter subscription name...', + subscriptionNameRequired: 'Subscription name is required', + subscriptionRequired: 'Subscription is required', + }, + questionClassifiers: { + model: 'model', + inputVars: 'Input Variables', + outputVars: { + className: 'Class Name', + usage: 'Model Usage Information', + }, + class: 'Class', + classNamePlaceholder: 'Write your class name', + advancedSetting: 'Advanced Setting', + topicName: 'Topic Name', + topicPlaceholder: 'Write your topic name', + addClass: 'Add Class', + instruction: 'Instruction', + instructionTip: 'Input additional instructions to help the question classifier better understand how to categorize questions.', + instructionPlaceholder: 'Write your instruction', + }, + parameterExtractor: { + inputVar: 'Input Variable', + outputVars: { + isSuccess: 'Is Success.On success the value is 1, on failure the value is 0.', + errorReason: 'Error Reason', + usage: 'Model Usage Information', + }, + extractParameters: 'Extract Parameters', + importFromTool: 'Import from tools', + addExtractParameter: 'Add Extract Parameter', + addExtractParameterContent: { + name: 'Name', + namePlaceholder: 'Extract Parameter Name', + type: 'Type', + typePlaceholder: 'Extract Parameter Type', + description: 'Description', + descriptionPlaceholder: 'Extract Parameter Description', + required: 'Required', + requiredContent: 'Required is only used as a reference for model inference, and not for mandatory validation of parameter output.', + }, + extractParametersNotSet: 'Extract Parameters not setup', + instruction: 'Instruction', + instructionTip: 'Input additional instructions to help the parameter extractor understand how to extract parameters.', + advancedSetting: 'Advanced Setting', + reasoningMode: 'Reasoning Mode', + reasoningModeTip: 'You can choose the appropriate reasoning mode based on the model\'s ability to respond to instructions for function calling or prompts.', + }, + iteration: { + deleteTitle: 'Delete Iteration Node?', + deleteDesc: 'Deleting the iteration node will delete all child nodes', + input: 'Input', + output: 'Output Variables', + iteration_one: '{{count}} Iteration', + iteration_other: '{{count}} Iterations', + currentIteration: 'Current Iteration', + comma: ', ', + error_one: '{{count}} Error', + error_other: '{{count}} Errors', + parallelMode: 'Parallel Mode', + parallelModeUpper: 'PARALLEL MODE', + parallelModeEnableTitle: 'Parallel Mode Enabled', + parallelModeEnableDesc: 'In parallel mode, tasks within iterations support parallel execution. You can configure this in the properties panel on the right.', + parallelPanelDesc: 'In parallel mode, tasks in the iteration support parallel execution.', + MaxParallelismTitle: 'Maximum parallelism', + MaxParallelismDesc: 'The maximum parallelism is used to control the number of tasks executed simultaneously in a single iteration.', + errorResponseMethod: 'Error response method', + ErrorMethod: { + operationTerminated: 'Terminated', + continueOnError: 'Continue on Error', + removeAbnormalOutput: 'Remove Abnormal Output', + }, + answerNodeWarningDesc: 'Parallel mode warning: Answer nodes, conversation variable assignments, and persistent read/write operations within iterations may cause exceptions.', + flattenOutput: 'Flatten Output', + flattenOutputDesc: 'When enabled, if all iteration outputs are arrays, they will be flattened into a single array. When disabled, outputs will maintain a nested array structure.', + }, + loop: { + deleteTitle: 'Delete Loop Node?', + deleteDesc: 'Deleting the loop node will remove all child nodes', + input: 'Input', + output: 'Output Variable', + loop_one: '{{count}} Loop', + loop_other: '{{count}} Loops', + currentLoop: 'Current Loop', + comma: ', ', + error_one: '{{count}} Error', + error_other: '{{count}} Errors', + breakCondition: 'Loop Termination Condition', + breakConditionTip: 'Only variables within loops with termination conditions and conversation variables can be referenced.', + loopMaxCount: 'Maximum Loop Count', + loopMaxCountError: 'Please enter a valid maximum loop count, ranging from 1 to {{maxCount}}', + errorResponseMethod: 'Error Response Method', + ErrorMethod: { + operationTerminated: 'Terminated', + continueOnError: 'Continue on Error', + removeAbnormalOutput: 'Remove Abnormal Output', + }, + loopVariables: 'Loop Variables', + initialLoopVariables: 'Initial Loop Variables', + finalLoopVariables: 'Final Loop Variables', + setLoopVariables: 'Set variables within the loop scope', + variableName: 'Variable Name', + inputMode: 'Input Mode', + exitConditionTip: 'A loop node needs at least one exit condition', + loopNode: 'Loop Node', + currentLoopCount: 'Current loop count: {{count}}', + totalLoopCount: 'Total loop count: {{count}}', + }, + note: { + addNote: 'Add Note', + editor: { + placeholder: 'Write your note...', + small: 'Small', + medium: 'Medium', + large: 'Large', + bold: 'Bold', + italic: 'Italic', + strikethrough: 'Strikethrough', + link: 'Link', + openLink: 'Open', + unlink: 'Unlink', + enterUrl: 'Enter URL...', + invalidUrl: 'Invalid URL', + bulletList: 'Bullet List', + showAuthor: 'Show Author', + }, + }, + docExtractor: { + inputVar: 'Input Variable', + outputVars: { + text: 'Extracted text', + }, + supportFileTypes: 'Support file types: {{types}}.', + learnMore: 'Learn more', + }, + listFilter: { + inputVar: 'Input Variable', + filterCondition: 'Filter Condition', + filterConditionKey: 'Filter Condition Key', + extractsCondition: 'Extract the N item', + filterConditionComparisonOperator: 'Filter Condition Comparison Operator', + filterConditionComparisonValue: 'Filter Condition value', + selectVariableKeyPlaceholder: 'Select sub variable key', + limit: 'Top N', + orderBy: 'Order by', + asc: 'ASC', + desc: 'DESC', + outputVars: { + result: 'Filter result', + first_record: 'First record', + last_record: 'Last record', + }, + }, + agent: { + strategy: { + label: 'Agentic Strategy', + tooltip: 'Different Agentic strategies determine how the system plans and executes multi-step tool calls', + shortLabel: 'Strategy', + configureTip: 'Please configure agentic strategy.', + configureTipDesc: 'After configuring the agentic strategy, this node will automatically load the remaining configurations. The strategy will affect the mechanism of multi-step tool reasoning. ', + selectTip: 'Select agentic strategy', + searchPlaceholder: 'Search agentic strategy', + }, + learnMore: 'Learn more', + pluginNotInstalled: 'This plugin is not installed', + pluginNotInstalledDesc: 'This plugin is installed from GitHub. Please go to Plugins to reinstall', + linkToPlugin: 'Link to Plugins', + pluginInstaller: { + install: 'Install', + installing: 'Installing', + }, + modelNotInMarketplace: { + title: 'Model not installed', + desc: 'This model is installed from Local or GitHub repository. Please use after installation.', + manageInPlugins: 'Manage in Plugins', + }, + modelNotSupport: { + title: 'Unsupported Model', + desc: 'The installed plugin version does not provide this model.', + descForVersionSwitch: 'The installed plugin version does not provide this model. Click to switch version.', + }, + configureModel: 'Configure Model', + notAuthorized: 'Not Authorized', + model: 'model', + toolbox: 'toolbox', + strategyNotSet: 'Agentic strategy Not Set', + tools: 'Tools', + maxIterations: 'Max Iterations', + modelNotSelected: 'Model not selected', + modelNotInstallTooltip: 'This model is not installed', + toolNotInstallTooltip: '{{tool}} is not installed', + toolNotAuthorizedTooltip: '{{tool}} Not Authorized', + strategyNotInstallTooltip: '{{strategy}} is not installed', + unsupportedStrategy: 'Unsupported strategy', + pluginNotFoundDesc: 'This plugin is installed from GitHub. Please go to Plugins to reinstall', + strategyNotFoundDesc: 'The installed plugin version does not provide this strategy.', + strategyNotFoundDescAndSwitchVersion: 'The installed plugin version does not provide this strategy. Click to switch version.', + modelSelectorTooltips: { + deprecated: 'This model is deprecated', + }, + outputVars: { + text: 'agent generated content', + usage: 'Model Usage Information', + files: { + title: 'agent generated files', + type: 'Support type. Now only support image', + transfer_method: 'Transfer method.Value is remote_url or local_file', + url: 'Image url', + upload_file_id: 'Upload file id', + }, + json: 'agent generated json', + }, + checkList: { + strategyNotSelected: 'Strategy not selected', + }, + installPlugin: { + title: 'Install Plugin', + desc: 'About to install the following plugin', + changelog: 'Change log', + install: 'Install', + cancel: 'Cancel', + }, + clickToViewParameterSchema: 'Click to view parameter schema', + parameterSchema: 'Parameter Schema', + }, + dataSource: { + supportedFileFormats: 'Supported file formats', + supportedFileFormatsPlaceholder: 'File extension, e.g. doc', + add: 'Add data source', + }, + knowledgeBase: { + chunkStructure: 'Chunk Structure', + chooseChunkStructure: 'Choose a chunk structure', + chunkStructureTip: { + title: 'Please choose a chunk structure', + message: 'The Dify Knowledge Base supports three chunking structures: General, Parent-child, and Q&A. Each knowledge base can have only one structure. The output from the preceding node must align with the selected chunk structure. Note that the choice of chunking structure affects the available index methods.', + learnMore: 'Learn more', + }, + changeChunkStructure: 'Change Chunk Structure', + chunksInput: 'Chunks', + chunksInputTip: 'The input variable of the knowledge base node is Chunks. The variable type is an object with a specific JSON Schema which must be consistent with the selected chunk structure.', + aboutRetrieval: 'about retrieval method.', + chunkIsRequired: 'Chunk structure is required', + indexMethodIsRequired: 'Index method is required', + chunksVariableIsRequired: 'Chunks variable is required', + embeddingModelIsRequired: 'Embedding model is required', + embeddingModelIsInvalid: 'Embedding model is invalid', + retrievalSettingIsRequired: 'Retrieval setting is required', + rerankingModelIsRequired: 'Reranking model is required', + rerankingModelIsInvalid: 'Reranking model is invalid', + }, + triggerSchedule: { + title: 'Schedule', + nodeTitle: 'Schedule Trigger', + notConfigured: 'Not configured', + useCronExpression: 'Use cron expression', + useVisualPicker: 'Use visual picker', + frequency: { + label: 'FREQUENCY', + hourly: 'Hourly', + daily: 'Daily', + weekly: 'Weekly', + monthly: 'Monthly', + }, + selectFrequency: 'Select frequency', + frequencyLabel: 'Frequency', + nextExecution: 'Next execution', + weekdays: 'Week days', + time: 'Time', + cronExpression: 'Cron expression', + nextExecutionTime: 'NEXT EXECUTION TIME', + nextExecutionTimes: 'Next 5 execution times', + startTime: 'Start Time', + executeNow: 'Execution now', + selectDateTime: 'Select Date & Time', + hours: 'Hours', + minutes: 'Minutes', + onMinute: 'On Minute', + days: 'Days', + lastDay: 'Last day', + lastDayTooltip: 'Not all months have 31 days. Use the \'last day\' option to select each month\'s final day.', + mode: 'Mode', + timezone: 'Timezone', + visualConfig: 'Visual Configuration', + monthlyDay: 'Monthly Day', + executionTime: 'Execution Time', + invalidTimezone: 'Invalid timezone', + invalidCronExpression: 'Invalid cron expression', + noValidExecutionTime: 'No valid execution time can be calculated', + executionTimeCalculationError: 'Failed to calculate execution times', + invalidFrequency: 'Invalid frequency', + invalidStartTime: 'Invalid start time', + startTimeMustBeFuture: 'Start time must be in the future', + invalidTimeFormat: 'Invalid time format (expected HH:MM AM/PM)', + invalidWeekday: 'Invalid weekday: {{weekday}}', + invalidMonthlyDay: 'Monthly day must be between 1-31 or "last"', + invalidOnMinute: 'On minute must be between 0-59', + invalidExecutionTime: 'Invalid execution time', + executionTimeMustBeFuture: 'Execution time must be in the future', + }, + triggerWebhook: { + title: 'Webhook Trigger', + nodeTitle: '🔗 Webhook Trigger', + configPlaceholder: 'Webhook trigger configuration will be implemented here', + webhookUrl: 'Webhook URL', + webhookUrlPlaceholder: 'Click generate to create webhook URL', + generate: 'Generate', + copy: 'Copy', + test: 'Test', + urlGenerated: 'Webhook URL generated successfully', + urlGenerationFailed: 'Failed to generate webhook URL', + urlCopied: 'URL copied to clipboard', + method: 'Method', + contentType: 'Content Type', + queryParameters: 'Query Parameters', + headerParameters: 'Header Parameters', + requestBodyParameters: 'Request Body Parameters', + parameterName: 'Variable name', + varName: 'Variable name', + varType: 'Type', + varNamePlaceholder: 'Enter variable name...', + required: 'Required', + addParameter: 'Add', + addHeader: 'Add', + noParameters: 'No parameters configured', + noQueryParameters: 'No query parameters configured', + noHeaders: 'No headers configured', + noBodyParameters: 'No body parameters configured', + debugUrlTitle: 'For test runs, always use this URL', + debugUrlCopy: 'Click to copy', + debugUrlCopied: 'Copied!', + debugUrlPrivateAddressWarning: 'This URL appears to be an internal address, which may cause webhook requests to fail. You may change TRIGGER_URL to a public address.', + errorHandling: 'Error Handling', + errorStrategy: 'Error Handling', + responseConfiguration: 'Response', + asyncMode: 'Async Mode', + statusCode: 'Status Code', + responseBody: 'Response Body', + responseBodyPlaceholder: 'Write your response body here', + headers: 'Headers', + validation: { + webhookUrlRequired: 'Webhook URL is required', + invalidParameterType: 'Invalid parameter type "{{type}}" for parameter "{{name}}"', + }, + }, + }, + triggerStatus: { + enabled: 'TRIGGER', + disabled: 'TRIGGER • DISABLED', + }, + entryNodeStatus: { + enabled: 'START', + disabled: 'START • DISABLED', + }, + tracing: { + stopBy: 'Stop by {{user}}', + }, + versionHistory: { + title: 'Versions', + currentDraft: 'Current Draft', + latest: 'Latest', + filter: { + all: 'All', + onlyYours: 'Only yours', + onlyShowNamedVersions: 'Only show named versions', + reset: 'Reset Filter', + empty: 'No matching version history found', + }, + defaultName: 'Untitled Version', + nameThisVersion: 'Name this version', + editVersionInfo: 'Edit version info', + copyId: 'Copy ID', + editField: { + title: 'Title', + releaseNotes: 'Release Notes', + titleLengthLimit: 'Title can\'t exceed {{limit}} characters', + releaseNotesLengthLimit: 'Release notes can\'t exceed {{limit}} characters', + }, + releaseNotesPlaceholder: 'Describe what changed', + restorationTip: 'After version restoration, the current draft will be overwritten.', + deletionTip: 'Deletion is irreversible, please confirm.', + action: { + restoreSuccess: 'Version restored', + restoreFailure: 'Failed to restore version', + deleteSuccess: 'Version deleted', + deleteFailure: 'Failed to delete version', + updateSuccess: 'Version updated', + updateFailure: 'Failed to update version', + copyIdSuccess: 'ID copied to clipboard', + }, + }, + debug: { + settingsTab: 'Settings', + lastRunTab: 'Last Run', + relationsTab: 'Relations', + copyLastRun: 'Copy Last Run', + noLastRunFound: 'No previous run found', + noMatchingInputsFound: 'No matching inputs found from last run', + lastRunInputsCopied: '{{count}} input(s) copied from last run', + copyLastRunError: 'Failed to copy last run inputs', + noData: { + description: 'The results of the last run will be displayed here', + runThisNode: 'Run this node', + }, + variableInspect: { + title: 'Variable Inspect', + emptyTip: 'After stepping through a node on the canvas or running a node step by step, you can view the current value of the node variable in Variable Inspect', + emptyLink: 'Learn more', + clearAll: 'Reset all', + clearNode: 'Clear cached variable', + resetConversationVar: 'Reset conversation variable to default value', + view: 'View log', + edited: 'Edited', + reset: 'Reset to last run value', + listening: { + title: 'Listening for events from triggers...', + tip: 'You can now simulate event triggers by sending test requests to HTTP {{nodeName}} endpoint or use it as a callback URL for live event debugging. All outputs can be viewed directly in the Variable Inspector.', + tipPlugin: 'Now you can create events in {{- pluginName}}, and retrieve outputs from these events in the Variable Inspector.', + tipSchedule: 'Listening for events from schedule triggers.\nNext scheduled run: {{nextTriggerTime}}', + tipFallback: 'Await incoming trigger events. Outputs will appear here.', + defaultNodeName: 'this trigger', + defaultPluginName: 'this plugin trigger', + defaultScheduleTime: 'Not configured', + selectedTriggers: 'selected triggers', + stopButton: 'Stop', + }, + trigger: { + normal: 'Variable Inspect', + running: 'Caching running status', + stop: 'Stop run', + cached: 'View cached variables', + clear: 'Clear', + }, + envNode: 'Environment', + chatNode: 'Conversation', + systemNode: 'System', + exportToolTip: 'Export Variable as File', + largeData: 'Large data, read-only preview. Export to view all.', + largeDataNoExport: 'Large data - partial preview only', + export: 'export', + }, + lastOutput: 'Last Output', + relations: { + dependencies: 'Dependencies', + dependents: 'Dependents', + dependenciesDescription: 'Nodes that this node relies on', + dependentsDescription: 'Nodes that rely on this node', + noDependencies: 'No dependencies', + noDependents: 'No dependents', + }, + }, + onboarding: { + title: 'Select a start node to begin', + description: 'Different start nodes have different capabilities. Don\'t worry, you can always change them later.', + userInputFull: 'User Input (original start node)', + userInputDescription: 'Start node that allows setting user input variables, with web app, service API, MCP server, and workflow as tool capabilities.', + trigger: 'Trigger', + triggerDescription: 'Triggers can serve as the start node of a workflow, such as scheduled tasks, custom webhooks, or integrations with other apps.', + back: 'Back', + learnMore: 'Learn more', + aboutStartNode: 'about start node.', + escTip: { + press: 'Press', + key: 'esc', + toDismiss: 'to dismiss', + }, + }, +} + +export default translation diff --git a/web/i18n/ja-JP/workflow.json b/web/i18n/ja-JP/workflow.json index df8fb56dd0..b38ccfc8ad 100644 --- a/web/i18n/ja-JP/workflow.json +++ b/web/i18n/ja-JP/workflow.json @@ -171,6 +171,7 @@ "common.needConnectTip": "接続されていないステップがあります", "common.needOutputNode": "出力ノードを追加する必要があります", "common.needStartNode": "少なくとも1つのスタートノードを追加する必要があります", + "common.noAgentNodes": "利用可能なエージェントノードがありません", "common.noHistory": "履歴がありません", "common.noVar": "変数がありません", "common.notRunning": "まだ実行されていません", @@ -202,6 +203,7 @@ "common.runApp": "アプリを実行", "common.runHistory": "実行履歴", "common.running": "実行中", + "common.searchAgent": "エージェントを検索...", "common.searchVar": "変数を検索", "common.setVarValuePlaceholder": "変数値を設定", "common.showRunHistory": "実行履歴を表示", @@ -213,6 +215,7 @@ "common.variableNamePlaceholder": "変数名を入力", "common.versionHistory": "バージョン履歴", "common.viewDetailInTracingPanel": "詳細を表示", + "common.viewInternals": "内部構造を表示", "common.viewOnly": "閲覧のみ", "common.viewRunHistory": "実行履歴を表示", "common.workflowAsTool": "ワークフローをツールとして公開する", @@ -620,8 +623,10 @@ "nodes.listFilter.outputVars.last_record": "最後のレコード", "nodes.listFilter.outputVars.result": "フィルター結果", "nodes.listFilter.selectVariableKeyPlaceholder": "サブ変数キーを選択する", + "nodes.llm.addContext": "コンテキスト追加", "nodes.llm.addMessage": "メッセージ追加", "nodes.llm.context": "コンテキスト", + "nodes.llm.contextBlock": "コンテキストブロック", "nodes.llm.contextTooltip": "ナレッジベースをコンテキストとして利用", "nodes.llm.files": "ファイル", "nodes.llm.jsonSchema.addChildField": "サブフィールドを追加", @@ -658,6 +663,7 @@ "nodes.llm.reasoningFormat.tagged": "タグを考え続けてください", "nodes.llm.reasoningFormat.title": "推論タグの分離を有効にする", "nodes.llm.reasoningFormat.tooltip": "thinkタグから内容を抽出し、それをreasoning_contentフィールドに保存します。", + "nodes.llm.removeContext": "コンテキストを削除", "nodes.llm.resolution.high": "高", "nodes.llm.resolution.low": "低", "nodes.llm.resolution.name": "解像度", @@ -758,10 +764,13 @@ "nodes.templateTransform.codeSupportTip": "Jinja2 のみをサポートしています", "nodes.templateTransform.inputVars": "入力変数", "nodes.templateTransform.outputVars.output": "変換されたコンテンツ", + "nodes.tool.agentPlaceholder": "{{paramKey}} を教えてください...", + "nodes.tool.assembleVariables": "変数を組み立てる", "nodes.tool.authorize": "認証する", "nodes.tool.inputVars": "入力変数", "nodes.tool.insertPlaceholder1": "タイプするか押してください", "nodes.tool.insertPlaceholder2": "変数を挿入する", + "nodes.tool.insertPlaceholder3": "エージェントを追加", "nodes.tool.outputVars.files.title": "ツールが生成したファイル", "nodes.tool.outputVars.files.transfer_method": "転送方法。値は remote_url または local_file です", "nodes.tool.outputVars.files.type": "サポートタイプ。現在は画像のみサポートされています", @@ -964,6 +973,7 @@ "panel.scrollToSelectedNode": "選択したノードまでスクロール", "panel.selectNextStep": "次ノード選択", "panel.startNode": "開始ノード", + "panel.ungroup": "グループ解除", "panel.userInputField": "ユーザー入力欄", "publishLimit.startNodeDesc": "このプランでは、各ワークフローのトリガー数は最大 2 個まで設定できます。公開するにはアップグレードが必要です。", "publishLimit.startNodeTitlePrefix": "アップグレードして、", @@ -980,6 +990,22 @@ "singleRun.testRun": "テスト実行", "singleRun.testRunIteration": "テスト実行(イテレーション)", "singleRun.testRunLoop": "テスト実行ループ", + "subGraphModal.canvasPlaceholder": "クリックして内部構造を設定", + "subGraphModal.defaultValueHint": "以下の値を返す", + "subGraphModal.internalStructure": "内部構造", + "subGraphModal.internalStructureDesc": "@{{name}} の内部構造", + "subGraphModal.lastRun": "前回の実行", + "subGraphModal.noRunHistory": "実行履歴がありません", + "subGraphModal.outputVariables": "出力変数", + "subGraphModal.settings": "設定", + "subGraphModal.sourceNode": "ソース", + "subGraphModal.title": "内部構造", + "subGraphModal.whenOutputIsNone": "出力が空の場合", + "subGraphModal.whenOutputNone.default": "デフォルト値", + "subGraphModal.whenOutputNone.defaultDesc": "以下の値を返す", + "subGraphModal.whenOutputNone.error": "エラーを発生させる", + "subGraphModal.whenOutputNone.errorDesc": "エラーを外部ワークフローに渡す", + "subGraphModal.whenOutputNone.skip": "このステップをスキップ", "tabs.-": "デフォルト", "tabs.addAll": "すべてを追加する", "tabs.agent": "エージェント戦略", diff --git a/web/i18n/ja-JP/workflow.ts b/web/i18n/ja-JP/workflow.ts new file mode 100644 index 0000000000..1a5283f5b3 --- /dev/null +++ b/web/i18n/ja-JP/workflow.ts @@ -0,0 +1,1298 @@ +const translation = { + common: { + undo: '元に戻す', + redo: 'やり直し', + editing: '編集中', + autoSaved: '自動保存済み', + unpublished: '未公開', + published: '公開済み', + publish: '公開する', + update: '更新', + publishUpdate: '更新を公開', + run: 'テスト実行', + running: '実行中', + chooseStartNodeToRun: '実行する開始ノードを選択', + runAllTriggers: 'すべてのトリガーを実行', + inRunMode: '実行モード中', + inPreview: 'プレビュー中', + inPreviewMode: 'プレビューモード中', + preview: 'プレビュー', + viewRunHistory: '実行履歴を表示', + runHistory: '実行履歴', + goBackToEdit: '編集に戻る', + conversationLog: '会話ログ', + ImageUploadLegacyTip: '開始フォームでファイル型変数が作成可能になりました。画像アップロード機能は今後サポート終了となります。', + fileUploadTip: '画像アップロード機能がファイルアップロードに拡張されました', + debugAndPreview: 'プレビュー', + restart: '再起動', + currentDraft: '現在の下書き', + currentDraftUnpublished: '現在の下書き(未公開)', + latestPublished: '最新公開版', + publishedAt: '公開日時', + restore: '復元', + versionHistory: 'バージョン履歴', + exitVersions: 'バージョン履歴を閉じる', + runApp: 'アプリを実行', + batchRunApp: 'アプリを一括実行', + openInExplore: '探索ページで開く', + accessAPIReference: 'API リファレンス', + embedIntoSite: 'サイトに埋め込む', + addTitle: 'タイトルを追加...', + addDescription: '説明を追加...', + noVar: '変数がありません', + searchVar: '変数を検索', + variableNamePlaceholder: '変数名を入力', + setVarValuePlaceholder: '変数値を設定', + needConnectTip: '接続されていないステップがあります', + maxTreeDepth: '1 ブランチあたりの最大ノード数:{{depth}}', + needAdd: '{{node}}ノードを追加する必要があります', + needOutputNode: '出力ノードを追加する必要があります', + needStartNode: '少なくとも1つのスタートノードを追加する必要があります', + needAnswerNode: '回答ブロックを追加する必要があります', + workflowProcess: 'ワークフロー処理', + notRunning: 'まだ実行されていません', + previewPlaceholder: '入力欄にテキストを入力してチャットボットのデバッグを開始', + effectVarConfirm: { + title: '変数の削除', + content: '他のノードで変数が使用されています。それでも削除しますか?', + }, + insertVarTip: '"/"キーで変数を挿入', + processData: 'データ処理', + input: '入力', + output: '出力', + jinjaEditorPlaceholder: '「/」または「{」で変数挿入', + viewOnly: '閲覧のみ', + showRunHistory: '実行履歴を表示', + enableJinja: 'Jinja テンプレートを有効化', + learnMore: '詳細を見る', + copy: 'コピー', + duplicate: '複製', + addBlock: 'ブロックを追加', + pasteHere: 'ここに貼り付け', + pointerMode: 'ポインターモード', + handMode: 'ハンドモード', + exportImage: '画像を出力', + exportPNG: 'PNG で出力', + exportJPEG: 'JPEG で出力', + exportSVG: 'SVG で出力', + currentView: '現在のビュー', + currentWorkflow: '現在のワークフロー', + model: 'モデル', + workflowAsTool: 'ワークフローをツールとして公開する', + configureRequired: '設定が必要', + configure: '設定', + manageInTools: 'ツールページで管理', + workflowAsToolTip: 'ワークフロー更新後はツールの再設定が必要です', + workflowAsToolDisabledHint: '最新のワークフローを公開し、接続済みの User Input ノードを用意してからツールとして設定してください。', + viewDetailInTracingPanel: '詳細を表示', + syncingData: 'データ同期中。。。', + importDSL: 'DSL をインポート', + importDSLTip: '現在の下書きは上書きされます。インポート前にワークフローをエクスポートしてバックアップしてください', + backupCurrentDraft: '現在の下書きをバックアップ', + chooseDSL: 'DSL(yml) ファイルを選択', + overwriteAndImport: '上書きしてインポート', + importFailure: 'インポート失敗', + importWarning: '注意事項', + importWarningDetails: 'DSL バージョンの違いにより機能に影響が出る可能性があります', + importSuccess: 'インポート成功', + parallelTip: { + click: { + title: 'クリック', + desc: 'で追加', + }, + drag: { + title: 'ドラッグ', + desc: 'で接続', + }, + limit: '並列処理可能ブランチ数:{{num}}', + depthLimit: '並列ネスト最大階層数:{{num}}', + }, + disconnect: '接続解除', + jumpToNode: 'このノードに移動', + addParallelNode: '並列ノードを追加', + parallel: '並列', + branch: 'ブランチ', + onFailure: '失敗時', + addFailureBranch: '失敗ブランチを追加', + loadMore: 'さらに読み込む', + noHistory: '履歴がありません', + tagBound: 'このタグを使用しているアプリの数', + moreActions: 'さらにアクション', + listening: 'リッスン中', + features: '機能', + featuresDescription: 'ウェブアプリのユーザー体験を向上させる', + featuresDocLink: 'もっと詳しく知る', + }, + publishLimit: { + startNodeTitlePrefix: 'アップグレードして、', + startNodeTitleSuffix: '各ワークフローのトリガーを制限なしで使用できます。', + startNodeDesc: 'このプランでは、各ワークフローのトリガー数は最大 2 個まで設定できます。公開するにはアップグレードが必要です。', + }, + env: { + envPanelTitle: '環境変数', + envDescription: '環境変数は、個人情報や認証情報を格納するために使用することができます。これらは読み取り専用であり、DSL ファイルからエクスポートする際には分離されます。', + envPanelButton: '環境変数を追加', + modal: { + title: '環境変数を追加', + editTitle: '環境変数を編集', + type: 'タイプ', + name: '変数名', + namePlaceholder: '変数名を入力', + value: '値', + valuePlaceholder: '変数値を入力', + secretTip: 'この変数は機密情報やデータを定義するために使用されます。DSL をエクスポートするときに漏洩防止メカニズムを設定されます。', + description: '説明', + descriptionPlaceholder: '変数の説明を入力', + }, + export: { + title: 'シークレット環境変数をエクスポートしますか?', + checkbox: 'シークレット値を含む', + ignore: 'DSL をエクスポート', + export: 'シークレット値付きでエクスポート', + }, + }, + globalVar: { + title: 'システム変数', + description: 'システム変数は、タイプが適合していれば配線なしで任意のノードから参照できるグローバル変数です。エンドユーザーIDやワークフローIDなどが含まれます。', + fieldsDescription: { + conversationId: '会話ID', + dialogCount: '会話数', + userId: 'ユーザーID', + triggerTimestamp: 'アプリケーションの起動タイムスタンプ', + appId: 'アプリケーションID', + workflowId: 'ワークフローID', + workflowRunId: 'ワークフロー実行ID', + }, + }, + sidebar: { + exportWarning: '現在保存されているバージョンをエクスポート', + exportWarningDesc: 'これは現在保存されているワークフローのバージョンをエクスポートします。エディターで未保存の変更がある場合は、まずワークフローキャンバスのエクスポートオプションを使用して保存してください。', + }, + chatVariable: { + panelTitle: '会話変数', + panelDescription: '対話情報を保存・管理(会話履歴/ファイル/ユーザー設定など)。書き換えができます。', + docLink: '詳細ドキュメント', + button: '変数を追加', + modal: { + title: '会話変数を追加', + editTitle: '会話変数を編集', + name: '変数名', + namePlaceholder: '変数名を入力', + type: 'タイプ', + value: 'デフォルト値', + valuePlaceholder: 'デフォルト値、設定しない場合は空白にしてください', + description: '説明', + descriptionPlaceholder: '変数の説明を入力', + editInJSON: 'JSON で編集', + oneByOne: '個別追加', + editInForm: 'フォームで編集', + arrayValue: '値', + addArrayValue: '値を追加', + objectKey: 'キー', + objectType: 'タイプ', + objectValue: 'デフォルト値', + }, + storedContent: '保存内容', + updatedAt: '最終更新:', + }, + changeHistory: { + title: '変更履歴', + placeholder: 'まだ何も変更されていません', + clearHistory: '履歴をクリア', + hint: 'ヒント', + hintText: 'エディターでの編集操作は、エディターを離れるまで、お使いのデバイスに記録されます。この履歴は、エディターを離れると消去されます。', + stepBackward_one: '{{count}} ステップ戻る', + stepBackward_other: '{{count}} ステップ戻る', + stepForward_one: '{{count}} ステップ進む', + stepForward_other: '{{count}} ステップ進む', + sessionStart: 'セッション開始', + currentState: '現在の状態', + nodeTitleChange: 'ブロックのタイトルが変更されました', + nodeDescriptionChange: 'ブロックの説明が変更されました', + nodeDragStop: 'ブロックが移動されました', + nodeChange: 'ブロックが変更されました', + nodeConnect: 'ブロックが接続されました', + nodePaste: 'ブロックが貼り付けられました', + nodeDelete: 'ブロックが削除されました', + nodeAdd: 'ブロックが追加されました', + nodeResize: 'ブロックのサイズが変更されました', + noteAdd: '注釈が追加されました', + noteChange: '注釈が変更されました', + noteDelete: '注釈が削除されました', + edgeDelete: 'ブロックの接続が解除されました', + }, + errorMsg: { + fieldRequired: '{{field}} は必須です', + rerankModelRequired: 'Rerank モデルが設定されていません', + authRequired: '認証が必要です', + invalidJson: '{{field}} は無効な JSON です', + fields: { + variable: '変数名', + variableValue: '変数値', + code: 'コード', + model: 'モデル', + rerankModel: 'Rerank モデル', + visionVariable: 'ビジョン変数', + }, + invalidVariable: '無効な変数です', + noValidTool: '{{field}} に利用可能なツールがありません', + toolParameterRequired: '{{field}}: パラメータ [{{param}}] は必須です', + startNodeRequired: '{{operation}}前に開始ノードを追加してください', + }, + error: { + startNodeRequired: '{{operation}}前に開始ノードを追加してください', + operations: { + connectingNodes: 'ノード接続', + addingNodes: 'ノード追加', + modifyingWorkflow: 'ワークフロー変更', + updatingWorkflow: 'ワークフロー更新', + }, + }, + singleRun: { + testRun: 'テスト実行', + startRun: '実行開始', + running: '実行中', + testRunIteration: 'テスト実行(イテレーション)', + back: '戻る', + iteration: 'イテレーション', + loop: 'ループ', + reRun: '再実行', + preparingDataSource: 'データソースの準備', + }, + tabs: { + 'searchBlock': 'ブロック検索', + 'blocks': 'ブロック', + 'searchTool': 'ツール検索', + 'searchTrigger': 'トリガー検索...', + 'tools': 'ツール', + 'allTriggers': 'すべてのトリガー', + 'allTool': 'すべて', + 'customTool': 'カスタム', + 'workflowTool': 'ワークフロー', + 'question-understand': '問題理解', + 'logic': 'ロジック', + 'transform': '変換', + 'utilities': 'ツール', + 'noResult': '該当なし', + 'noPluginsFound': 'プラグインが見つかりません', + 'requestToCommunity': 'コミュニティにリクエスト', + 'plugin': 'プラグイン', + 'agent': 'エージェント戦略', + 'noFeaturedPlugins': 'マーケットプレイスでさらにツールを見つける', + 'noFeaturedTriggers': 'マーケットプレイスでさらにトリガーを見つける', + 'addAll': 'すべてを追加する', + 'allAdded': 'すべて追加されました', + 'searchDataSource': 'データソースを検索', + 'sources': 'ソース', + 'start': '始める', + 'startDisabledTip': 'トリガーノードとユーザー入力ノードは互いに排他です。', + 'featuredTools': '特集', + 'showMoreFeatured': 'もっと見る', + 'showLessFeatured': '表示を減らす', + 'installed': 'インストール済み', + 'pluginByAuthor': '{{author}} によって', + 'usePlugin': 'ツールを選択', + 'hideActions': 'ツールを隠す', + }, + blocks: { + 'start': 'ユーザー入力', + 'originalStartNode': '元の開始ノード', + 'end': '出力', + 'answer': '回答', + 'llm': 'LLM', + 'knowledge-retrieval': '知識検索', + 'question-classifier': '質問分類器', + 'if-else': 'IF/ELSE', + 'code': 'コード実行', + 'template-transform': 'テンプレート', + 'http-request': 'HTTP リクエスト', + 'variable-assigner': '変数代入器', + 'variable-aggregator': '変数集約器', + 'assigner': '変数代入', + 'iteration-start': 'イテレーション開始', + 'iteration': 'イテレーション', + 'parameter-extractor': 'パラメータ抽出', + 'document-extractor': 'テキスト抽出', + 'list-operator': 'リスト処理', + 'agent': 'エージェント', + 'loop-start': 'ループ開始', + 'loop': 'ループ', + 'loop-end': 'ループ完了', + 'knowledge-index': '知識ベース', + 'datasource': 'データソース', + 'trigger-plugin': 'プラグイントリガー', + 'trigger-webhook': 'Webhook トリガー', + 'trigger-schedule': 'スケジュールトリガー', + }, + customWebhook: 'カスタムWebhook', + blocksAbout: { + 'start': 'ワークフロー開始時の初期パラメータを定義します。', + 'end': 'ワークフローの出力と結果のタイプを定義します', + 'answer': 'チャットダイアログの返答内容を定義します。', + 'llm': '大規模言語モデルを呼び出して質問回答や自然言語処理を実行します。', + 'knowledge-retrieval': 'ナレッジベースからユーザー質問に関連するテキストを検索します。', + 'question-classifier': '質問の分類条件を定義し、LLM が分類に基づいて対話フローを制御します。', + 'if-else': 'if/else 条件でワークフローを 2 つの分岐に分割します。', + 'code': 'Python/NodeJS コードを実行してカスタムロジックを実装します。', + 'template-transform': 'Jinja テンプレート構文でデータを文字列に変換します。', + 'http-request': 'HTTP リクエストを送信できます。', + 'variable-assigner': '複数分岐の変数を集約し、下流ノードの設定を統一します。', + 'assigner': '書き込み可能な変数(例:会話変数)への値の割り当てを行います。', + 'variable-aggregator': '複数分岐の変数を集約し、下流ノードの設定を統一します。', + 'iteration': 'リスト要素に対して反復処理を実行し全結果を出力します。', + 'loop': '終了条件達成まで、または最大反復回数までロジックを繰り返します。', + 'loop-end': '「break」相当の機能です。このノードに設定項目はなく、ループ処理中にこのノードに到達すると即時終了します。', + 'parameter-extractor': '自然言語から構造化パラメータを抽出し、後続処理で利用します。', + 'document-extractor': 'アップロード文書を LLM 処理用に最適化されたテキストに変換します。', + 'list-operator': '配列のフィルタリングやソート処理を行います。', + 'agent': '大規模言語モデルを活用した質問応答や自然言語処理を実行します。', + 'knowledge-index': '知識ベースについて', + 'datasource': 'データソースについて', + 'trigger-schedule': 'スケジュールに基づいてワークフローを開始する時間ベースのトリガー', + 'trigger-webhook': 'Webhook トリガーは第三者システムからの HTTP プッシュを受信してワークフローを自動的に開始します。', + 'trigger-plugin': 'サードパーティ統合トリガー、外部プラットフォームのイベントによってワークフローを開始します', + 'group': '複数のノードをグループ化して整理・管理しやすくします', + }, + difyTeam: 'Dify チーム', + operator: { + zoomIn: '拡大', + zoomOut: '縮小', + zoomTo50: '50% サイズ', + zoomTo100: '等倍表示', + zoomToFit: '画面に合わせる', + makeGroup: 'グループ化', + horizontal: '水平', + alignBottom: '下', + alignNodes: 'ノードを整列', + vertical: '垂直', + alignLeft: '左', + alignTop: '上', + alignRight: '右', + alignMiddle: '中央', + distributeVertical: '垂直方向に等間隔配置', + alignCenter: '中央', + selectionAlignment: '選択の整列', + distributeHorizontal: '水平方向に等間隔配置', + }, + variableReference: { + noAvailableVars: '利用可能な変数がありません', + noVarsForOperation: 'この操作に割り当て可能な変数が存在しません。', + noAssignedVars: '割り当て可能な変数がありません', + assignedVarsDescription: '書き込み可能な変数(例:', + conversationVars: '会話変数', + }, + panel: { + userInputField: 'ユーザー入力欄', + changeBlock: 'ノード変更', + helpLink: 'ドキュメントを見る', + about: '詳細', + createdBy: '作成者', + nextStep: '次のステップ', + addNextStep: 'このワークフローで次ノードを追加', + selectNextStep: '次ノード選択', + runThisStep: 'このステップ実行', + checklist: 'チェックリスト', + checklistTip: '公開前に全ての項目を確認してください', + checklistResolved: '全てのチェックが完了しました', + goTo: '移動', + startNode: '開始ノード', + organizeBlocks: 'ノード整理', + change: '変更', + optional: '(任意)', + maximize: 'キャンバスを最大化する', + minimize: '全画面を終了する', + scrollToSelectedNode: '選択したノードまでスクロール', + optional_and_hidden: '(オプションおよび非表示)', + openWorkflow: 'ワークフローを開く', + }, + nodes: { + common: { + outputVars: '出力変数', + insertVarTip: '変数を挿入', + memory: { + memory: 'メモリ', + memoryTip: 'チャットメモリ設定', + windowSize: 'メモリウィンドウサイズ', + conversationRoleName: '会話ロール名', + user: 'ユーザー接頭辞', + assistant: 'アシスタント接頭辞', + }, + memories: { + title: 'メモリ', + tip: 'チャットの記憶管理', + builtIn: '組み込み', + }, + errorHandle: { + title: '例外処理', + tip: 'ノード例外発生時の処理ポリシーを設定', + none: { + title: '処理なし', + desc: '例外発生時に処理を停止', + }, + defaultValue: { + title: 'デフォルト値', + desc: '例外発生時のデフォルト出力', + tip: '例外発生時に返される値:', + inLog: 'ノード例外 - デフォルト値を出力', + output: 'デフォルト値出力', + }, + failBranch: { + title: '例外分岐', + desc: '例外発生時に分岐を実行', + customize: '失敗分岐ロジックをカスタマイズ', + customizeTip: '例外発生時、失敗分岐でエラー処理を柔軟に設定可能(エラーログ表示/修復処理/操作スキップ等)', + inLog: 'ノード例外 - 失敗分岐を実行。エラー情報を下流に伝播', + }, + partialSucceeded: { + tip: '{{num}}個のノードで異常発生。ログはトレース画面で確認可能', + }, + }, + retry: { + retry: '再試行', + retryOnFailure: '失敗時再試行', + maxRetries: '最大試行回数', + retryInterval: '再試行間隔', + retryTimes: '失敗時 {{times}}回再試行', + retrying: '再試行中...', + retrySuccessful: '再試行成功', + retryFailed: '再試行失敗', + retryFailedTimes: '{{times}}回再試行失敗', + times: '回', + ms: 'ミリ秒', + retries: '再試行回数:{{num}}', + }, + typeSwitch: { + input: '入力値', + variable: '変数を使用する', + }, + inputVars: '入力変数', + pluginNotInstalled: 'プラグインがインストールされていません', + }, + start: { + required: '必須', + inputField: '入力フィールド', + builtInVar: '組み込み変数', + outputVars: { + query: 'ユーザー入力', + memories: { + des: '会話履歴', + type: 'メッセージ種別', + content: 'メッセージ内容', + }, + files: 'ファイル一覧', + }, + noVarTip: '入力設定はワークフロー内で利用可能', + }, + end: { + outputs: '出力設定', + output: { + type: '出力形式', + variable: '出力変数', + }, + type: { + 'none': 'なし', + 'plain-text': 'プレーンテキスト', + 'structured': '構造化', + }, + }, + answer: { + answer: '応答', + outputVars: '出力変数', + }, + llm: { + model: 'AI モデル', + variables: '変数', + context: 'コンテキスト', + contextTooltip: 'ナレッジベースをコンテキストとして利用', + notSetContextInPromptTip: 'コンテキスト利用時はプロンプトに変数を明記してください', + prompt: 'プロンプト', + addMessage: 'メッセージ追加', + roleDescription: { + system: '対話の基本動作を定義', + user: '指示/質問を入力', + assistant: 'ユーザー入力への応答', + }, + vision: 'ビジョン', + files: 'ファイル', + resolution: { + name: '解像度', + high: '高', + low: '低', + }, + outputVars: { + output: '生成内容', + reasoning_content: '推論内容', + usage: 'モデル使用量', + }, + singleRun: { + variable: '変数', + }, + sysQueryInUser: 'ユーザーメッセージに sys.query を含めてください', + jsonSchema: { + title: '構造化データスキーマ', + instruction: '指示', + promptTooltip: 'テキスト説明から標準 JSON スキーマを自動生成できます。', + promptPlaceholder: 'JSON スキーマを入力...', + generate: '生成', + import: 'JSON インポート', + generateJsonSchema: 'スキーマ生成', + generationTip: '自然言語で簡単に JSON スキーマを作成可能です。', + generating: 'JSON スキーマを生成中...', + generatedResult: '生成結果', + resultTip: 'こちらが生成された結果です。ご満足いただけない場合は、前の画面に戻ってプロンプトを修正できます。', + back: '前に戻る', + regenerate: '再生成する', + apply: '適用', + doc: '構造化出力の詳細を見る', + resetDefaults: '初期化', + required: '必須項目', + addField: 'フィールドを追加', + addChildField: 'サブフィールドを追加', + showAdvancedOptions: '詳細設定', + stringValidations: '文字列検証', + fieldNamePlaceholder: 'フィールド名', + descriptionPlaceholder: '説明を入力', + warningTips: { + saveSchema: '編集中のフィールドを確定してから保存してください。', + }, + }, + reasoningFormat: { + tagged: 'タグを考え続けてください', + separated: '思考タグを分ける', + title: '推論タグの分離を有効にする', + tooltip: 'thinkタグから内容を抽出し、それをreasoning_contentフィールドに保存します。', + }, + }, + knowledgeRetrieval: { + queryVariable: '検索変数', + knowledge: 'ナレッジベース', + outputVars: { + output: '検索結果セグメント', + content: 'セグメント内容', + title: 'セグメントタイトル', + icon: 'セグメントアイコン', + url: 'セグメント URL', + metadata: 'メタデータ', + files: '取得したファイル', + }, + metadata: { + title: 'メタデータフィルタ', + tip: 'タグ/カテゴリ等の属性で検索を絞り込み', + options: { + disabled: { + title: '無効', + subTitle: 'フィルタリング不使用', + }, + automatic: { + title: '自動生成', + subTitle: '検索履歴からフィルタ条件を自動生成', + desc: 'Query Variable(検索変数)に基づきフィルタ条件を自動生成', + }, + manual: { + title: '手動設定', + subTitle: 'メタデータの条件を手動で追加', + }, + }, + panel: { + title: 'メタデータのフィルタ条件', + conditions: '条件一覧', + add: '条件追加', + search: 'メタデータ検索', + placeholder: '値を入力', + datePlaceholder: '日付選択...', + select: '変数選択...', + }, + }, + queryText: 'クエリテキスト', + queryAttachment: '画像を検索', + }, + http: { + inputVars: '入力変数', + api: 'API', + apiPlaceholder: 'URL を入力(変数使用時は"/"を入力)', + extractListPlaceholder: 'リスト番号を入力(変数使用時は"/"を入力)', + notStartWithHttp: 'API は http:// または https:// で始まってください', + key: 'キー', + type: 'タイプ', + value: '値', + bulkEdit: '一括編集', + keyValueEdit: 'キーバリュー編集', + headers: 'ヘッダー', + params: 'パラメータ', + body: 'ボディ', + binaryFileVariable: 'バイナリファイル変数', + outputVars: { + body: 'レスポンスコンテンツ', + statusCode: 'レスポンスステータスコード', + headers: 'レスポンスヘッダ(JSON)', + files: 'ファイル一覧', + }, + authorization: { + 'authorization': '認証', + 'authorizationType': '認証タイプ', + 'no-auth': 'なし', + 'api-key': 'API キー', + 'auth-type': 'API 認証タイプ', + 'basic': 'ベーシック', + 'bearer': 'Bearer', + 'custom': 'カスタム', + 'api-key-title': 'API キー', + 'header': 'ヘッダー', + }, + insertVarPlaceholder: '変数を挿入するには\'/\'を入力してください', + timeout: { + title: 'タイムアウト設定', + connectLabel: '接続タイムアウト', + connectPlaceholder: '接続タイムアウト(秒)', + readLabel: '読み取りタイムアウト', + readPlaceholder: '読み取りタイムアウト(秒)', + writeLabel: '書き込みタイムアウト', + writePlaceholder: '書き込みタイムアウト(秒)', + }, + curl: { + title: 'cURL からインポート', + placeholder: 'ここに cURL 文字列を貼り付けます', + }, + verifySSL: { + title: 'SSL証明書を確認する', + warningTooltip: 'SSL検証を無効にすることは、本番環境では推奨されません。これは開発またはテストのみに使用すべきであり、中間者攻撃などのセキュリティ脅威に対して接続を脆弱にするためです。', + }, + }, + code: { + inputVars: '入力変数', + outputVars: '出力変数', + advancedDependencies: '高度な依存関係', + advancedDependenciesTip: '消費に時間がかかる、またはデフォルトで組み込まれていない事前ロードされた依存関係を追加します', + searchDependencies: '依存関係を検索', + syncFunctionSignature: 'コードの関数署名を同期', + }, + templateTransform: { + inputVars: '入力変数', + code: 'コード', + codeSupportTip: 'Jinja2 のみをサポートしています', + outputVars: { + output: '変換されたコンテンツ', + }, + }, + ifElse: { + if: 'もし', + else: 'それ以外', + elseDescription: 'IF 条件が満たされない場合に実行するロジックを定義します。', + and: 'かつ', + or: 'または', + operator: '演算子', + notSetVariable: 'まず変数を設定してください', + comparisonOperator: { + 'contains': '含む', + 'not contains': '含まない', + 'start with': 'で始まる', + 'end with': 'で終わる', + 'is': 'である', + 'is not': 'でない', + 'empty': '空', + 'not empty': '空でない', + 'null': 'null', + 'not null': 'null でない', + 'in': '含まれている', + 'not in': '含まれていない', + 'all of': 'すべての', + 'exists': '存在します', + 'not exists': '存在しません', + 'before': '前に', + 'after': '後', + }, + enterValue: '値を入力', + addCondition: '条件を追加', + conditionNotSetup: '条件が設定されていません', + selectVariable: '変数を選択...', + optionName: { + audio: '音声', + localUpload: 'ローカルアップロード', + image: '画像', + video: '映像', + doc: 'ドキュメント', + url: 'URL', + }, + select: '選ぶ', + addSubVariable: 'サブ変数', + }, + variableAssigner: { + title: '変数を代入する', + outputType: '出力タイプ', + varNotSet: '変数が設定されていません', + noVarTip: '代入された変数を追加してください', + type: { + string: '文字列', + number: '数値', + object: 'オブジェクト', + array: '配列', + }, + aggregationGroup: 'グループ', + aggregationGroupTip: 'この機能を有効にすると、変数集約器は複数のセットの変数を集約できます。', + addGroup: 'グループを追加', + outputVars: { + varDescribe: '{{groupName}} 出力', + }, + setAssignVariable: '代入された変数を設定', + }, + assigner: { + 'assignedVariable': '代入された変数', + 'writeMode': '書き込みモード', + 'writeModeTip': '代入された変数が配列の場合,末尾に追記モードを追加する。', + 'over-write': '上書き', + 'append': '追記', + 'plus': 'プラス', + 'clear': 'クリア', + 'setVariable': '変数を設定する', + 'variable': '変数', + 'operations': { + 'title': '操作', + 'set': 'セット', + 'clear': 'クリア', + 'overwrite': '上書き', + 'append': '追加', + '-=': '-=', + '/=': '/=', + '+=': '+=', + 'over-write': '上書き', + 'extend': '延ばす', + '*=': '*=', + 'remove-last': '最後を削除する', + 'remove-first': '最初を削除する', + }, + 'setParameter': 'パラメータを設定...', + 'selectAssignedVariable': '代入変数を選択...', + 'varNotSet': '変数が設定されていません', + 'variables': '変数', + 'noVarTip': '「+」ボタンをクリックして変数を追加します', + 'noAssignedVars': '使用可能な代入変数がありません', + 'assignedVarsDescription': '代入される変数は、会話変数などの書き込み可能な変数である必要があります。', + }, + tool: { + inputVars: '入力変数', + outputVars: { + text: 'ツールが生成したコンテンツ', + files: { + title: 'ツールが生成したファイル', + type: 'サポートタイプ。現在は画像のみサポートされています', + transfer_method: '転送方法。値は remote_url または local_file です', + url: '画像 URL', + upload_file_id: 'アップロードファイル ID', + }, + json: 'ツールで生成された JSON', + }, + authorize: '認証する', + settings: '設定', + insertPlaceholder1: 'タイプするか押してください', + insertPlaceholder2: '変数を挿入する', + }, + questionClassifiers: { + model: 'モデル', + inputVars: '入力変数', + outputVars: { + className: 'クラス名', + usage: 'モデル使用量', + }, + class: 'クラス', + classNamePlaceholder: 'クラス名を入力してください', + advancedSetting: '高度な設定', + topicName: 'トピック名', + topicPlaceholder: 'トピック名を入力してください', + addClass: 'クラスを追加', + instruction: '指示', + instructionTip: '質問分類器が質問をどのように分類するかをよりよく理解するための追加の指示を入力します。', + instructionPlaceholder: '指示を入力してください', + }, + parameterExtractor: { + inputVar: '入力変数', + outputVars: { + isSuccess: '成功。成功した場合の値は 1、失敗した場合の値は 0 です。', + errorReason: 'エラーの理由', + usage: 'モデル使用量', + }, + extractParameters: 'パラメーターを抽出', + importFromTool: 'ツールからインポート', + addExtractParameter: '抽出パラメーターを追加', + addExtractParameterContent: { + name: '名前', + namePlaceholder: '抽出パラメーター名', + type: 'タイプ', + typePlaceholder: '抽出パラメータータイプ', + description: '説明', + descriptionPlaceholder: '抽出パラメーターの説明', + required: '必須', + requiredContent: '必須はモデル推論の参考としてのみ使用され、パラメーター出力の必須検証には使用されません。', + }, + extractParametersNotSet: '抽出パラメーターが設定されていません', + instruction: '指示', + instructionTip: 'パラメーター抽出器がパラメーターを抽出する方法を理解するのに役立つ追加の指示を入力します。', + advancedSetting: '高度な設定', + reasoningMode: '推論モード', + reasoningModeTip: '関数呼び出しやプロンプトの指示に応答するモデルの能力に基づいて、適切な推論モードを選択できます。', + }, + iteration: { + deleteTitle: 'イテレーションノードを削除しますか?', + deleteDesc: 'イテレーションノードを削除すると、すべての子ノードが削除されます', + input: '入力', + output: '出力変数', + iteration_one: '{{count}} イテレーション', + iteration_other: '{{count}} イテレーション', + currentIteration: '現在のイテレーション', + ErrorMethod: { + operationTerminated: '終了', + continueOnError: 'エラー時に続行', + removeAbnormalOutput: 'アブノーマルアウトプットの削除', + }, + comma: ',', + error_other: '{{カウント}}エラー', + error_one: '{{カウント}}エラー', + parallelModeUpper: 'パラレルモード', + parallelMode: 'パラレルモード', + MaxParallelismTitle: '最大並列処理', + errorResponseMethod: 'エラー応答方式', + parallelPanelDesc: '並列モードでは、イテレーションのタスクは並列実行をサポートします。', + parallelModeEnableDesc: '並列モードでは、イテレーション内のタスクは並列実行をサポートします。これは、右側のプロパティパネルで構成できます。', + parallelModeEnableTitle: 'パラレルモード有効', + MaxParallelismDesc: '最大並列処理は、1 回の反復で同時に実行されるタスクの数を制御するために使用されます。', + answerNodeWarningDesc: '並列モードの警告:応答ノード、会話変数の割り当て、およびイテレーション内の永続的な読み取り/書き込み操作により、例外が発生する可能性があります。', + flattenOutput: '出力をフラット化', + flattenOutputDesc: '有効にすると、すべての反復出力が配列の場合、1つの配列にまとめてフラット化されます。無効の場合はネストされた配列構造のままです。', + }, + loop: { + deleteTitle: 'ループノードを削除しますか?', + deleteDesc: 'ループノードを削除すると、全ての子ノードが削除されます。', + input: '入力', + output: '出力変数', + loop_one: '{{count}}回', + loop_other: '{{count}}回', + currentLoop: '現在のループ', + breakCondition: 'ループ終了条件', + breakConditionTip: 'ループ内の変数やセッション変数を参照し、終了条件を設定できます。', + loopMaxCount: '最大ループ回数', + loopMaxCountError: '最大ループ回数は 1 から{{maxCount}}の範囲で正しく入力してください。', + errorResponseMethod: 'エラー対応方法', + ErrorMethod: { + operationTerminated: 'エラー時に処理を終了', + continueOnError: 'エラーを無視して継続', + removeAbnormalOutput: '異常出力を除外', + }, + loopVariables: 'ループ変数', + initialLoopVariables: '初期ループ変数', + finalLoopVariables: '最終ループ変数', + setLoopVariables: 'ループスコープ内で変数を設定', + variableName: '変数名', + inputMode: '入力モード', + exitConditionTip: 'ループノードには少なくとも 1 つの終了条件が必要です', + loopNode: 'ループノード', + currentLoopCount: '現在のループ回数:{{count}}', + totalLoopCount: '総ループ回数:{{count}}', + error_other: '{{count}} エラー', + error_one: '{{count}} エラー', + comma: ',', + }, + note: { + addNote: 'コメントを追加', + editor: { + placeholder: 'メモを書く...', + small: '小', + medium: '中', + large: '大', + bold: '太字', + italic: '斜体', + strikethrough: '打ち消し線', + link: 'リンク', + openLink: '開く', + unlink: 'リンクをキャンセル', + enterUrl: 'リンク入力中...', + invalidUrl: 'リンク無効', + bulletList: 'リスト', + showAuthor: '著者を表示する', + }, + }, + docExtractor: { + outputVars: { + text: '抽出されたテキスト', + }, + inputVar: '入力変数', + learnMore: '詳細はこちら', + supportFileTypes: 'サポートするファイルタイプ:{{types}}。', + }, + listFilter: { + outputVars: { + last_record: '最後のレコード', + first_record: '最初のレコード', + result: 'フィルター結果', + }, + limit: 'トップ N', + asc: 'ASC', + filterCondition: 'フィルター条件', + filterConditionKey: 'フィルター条件キー', + orderBy: '並べる順番', + filterConditionComparisonValue: 'フィルター条件の値', + selectVariableKeyPlaceholder: 'サブ変数キーを選択する', + filterConditionComparisonOperator: 'フィルター条件を比較オペレーター', + inputVar: '入力変数', + desc: 'DESC', + extractsCondition: 'N 個のアイテムを抽出します', + }, + agent: { + strategy: { + label: 'エージェンティック戦略', + configureTipDesc: 'エージェント戦略を設定した後、このノードは残りの設定を自動的に読み込みます。この戦略は、マルチステップツール推論のメカニズムに影響を与えます。', + searchPlaceholder: 'エージェンティック戦略を検索する', + configureTip: 'エージェンティック戦略を設定してください。', + shortLabel: '戦略', + tooltip: '異なるエージェンティック戦略が、システムがマルチステップのツール呼び出しを計画し実行する方法を決定します。', + selectTip: 'エージェンシー戦略を選択する', + }, + pluginInstaller: { + install: 'インストール', + installing: 'インストール中', + }, + modelNotInMarketplace: { + manageInPlugins: 'プラグインを管理する', + title: 'モデルがインストールされていません', + desc: 'このモデルはローカルまたは GitHub リポジトリからインストールされます。インストール後にご利用ください。', + }, + modelNotSupport: { + title: 'サポートされていないモデル', + descForVersionSwitch: 'インストールされたプラグインのバージョンはこのモデルを提供していません。バージョンを切り替えるにはクリックしてください。', + desc: 'インストールされたプラグインのバージョンは、このモデルを提供していません。', + }, + modelSelectorTooltips: { + deprecated: 'このモデルは廃止されました', + }, + outputVars: { + files: { + url: '画像の URL', + type: 'サポートタイプ。現在はサポート画像のみ', + upload_file_id: 'ファイル ID をアップロード', + transfer_method: '転送方法。値は remote_url または local_file です。', + title: 'エージェント生成ファイル', + }, + text: 'エージェント生成コンテンツ', + json: 'エージェント生成の JSON', + usage: 'モデル使用量', + }, + checkList: { + strategyNotSelected: '戦略が選択されていません', + }, + installPlugin: { + install: 'インストール', + changelog: '変更ログ', + cancel: 'キャンセル', + desc: '次のプラグインをインストールしようとしています', + title: 'プラグインをインストールする', + }, + strategyNotSet: 'エージェンティック戦略は設定されていません', + strategyNotInstallTooltip: '{{strategy}}はインストールされていません', + modelNotSelected: 'モデルが選択されていません', + toolNotAuthorizedTooltip: '{{tool}} 認可されていません', + toolNotInstallTooltip: '{{tool}}はインストールされていません', + tools: 'ツール', + learnMore: 'もっと学ぶ', + configureModel: 'モデルを設定する', + model: 'モデル', + linkToPlugin: 'プラグインへのリンク', + notAuthorized: '権限がありません', + modelNotInstallTooltip: 'このモデルはインストールされていません', + maxIterations: '最大反復回数', + toolbox: 'ツールボックス', + pluginNotInstalled: 'このプラグインはインストールされていません', + strategyNotFoundDescAndSwitchVersion: 'インストールされたプラグインのバージョンはこの戦略を提供していません。バージョンを切り替えるにはクリックしてください。', + pluginNotInstalledDesc: 'このプラグインは GitHub からインストールされています。再インストールするにはプラグインに移動してください。', + unsupportedStrategy: 'サポートされていない戦略', + pluginNotFoundDesc: 'このプラグインは GitHub からインストールされています。再インストールするにはプラグインに移動してください。', + strategyNotFoundDesc: 'インストールされたプラグインのバージョンは、この戦略を提供していません。', + parameterSchema: 'パラメータスキーマ', + clickToViewParameterSchema: 'パラメータースキーマを見るにはクリックしてください', + }, + dataSource: { + add: 'データソースを追加', + supportedFileFormats: 'サポートされているファイル形式', + supportedFileFormatsPlaceholder: 'ファイル拡張子、例:doc', + }, + knowledgeBase: { + chunkStructureTip: { + title: 'チャンク構造を選択してください', + learnMore: 'もっと学ぶ', + message: 'Difyナレッジベースは、一般的な、親子関係、Q&Aの3つのチャンク構造をサポートしています。各ナレッジベースには一つの構造のみが持てます。前のノードからの出力は、選択されたチャンク構造と一致する必要があります。チャンク構造の選択が利用可能なインデックス方式に影響を与えることに注意してください。', + }, + aboutRetrieval: '取得方法について。', + chooseChunkStructure: 'チャンク構造を選択する', + chunkStructure: 'チャンク構造', + chunkIsRequired: 'チャンク構造が必要です', + retrievalSettingIsRequired: 'リトリーバル設定が必要です', + changeChunkStructure: 'チャンク構造を変更する', + indexMethodIsRequired: 'インデックスメソッドが必要です', + chunksInput: 'チャンク', + chunksInputTip: '知識ベースノードの入力変数はチャンクです。変数のタイプは、選択されたチャンク構造と一貫性のある特定のJSONスキーマを持つオブジェクトです。', + chunksVariableIsRequired: 'Chunks変数は必須です', + embeddingModelIsRequired: '埋め込みモデルが必要です', + rerankingModelIsRequired: '再ランキングモデルが必要です', + embeddingModelIsInvalid: '埋め込みモデルが無効です', + rerankingModelIsInvalid: 'リランキングモデルは無効です', + }, + triggerSchedule: { + frequency: { + label: '頻度', + monthly: '毎月', + weekly: '毎週', + daily: '毎日', + hourly: '毎時', + }, + frequencyLabel: '頻度', + days: '日', + title: 'スケジュール', + minutes: '分', + time: '時刻', + useCronExpression: 'Cron 式を使用', + nextExecutionTimes: '次の5回の実行時刻', + nextExecution: '次回実行', + notConfigured: '未設定', + startTime: '開始時刻', + hours: '時間', + onMinute: '分', + executeNow: '今すぐ実行', + weekdays: '曜日', + selectDateTime: '日時を選択', + cronExpression: 'Cron 式', + selectFrequency: '頻度を選択', + lastDay: '月末', + nextExecutionTime: '次回実行時刻', + lastDayTooltip: 'すべての月に31日があるわけではありません。「月末」オプションを使用して各月の最終日を選択してください。', + useVisualPicker: 'ビジュアル設定を使用', + nodeTitle: 'スケジュールトリガー', + mode: 'モード', + timezone: 'タイムゾーン', + visualConfig: 'ビジュアル設定', + monthlyDay: '月の日', + executionTime: '実行時間', + invalidTimezone: '無効なタイムゾーン', + invalidCronExpression: '無効なCron式', + noValidExecutionTime: '有効な実行時間を計算できません', + executionTimeCalculationError: '実行時間の計算に失敗しました', + invalidFrequency: '無効な頻度', + invalidStartTime: '無効な開始時間', + startTimeMustBeFuture: '開始時間は未来の時間である必要があります', + invalidTimeFormat: '無効な時間形式(期待される形式:HH:MM AM/PM)', + invalidWeekday: '無効な曜日:{{weekday}}', + invalidMonthlyDay: '月の日は1-31の間または"last"である必要があります', + invalidOnMinute: '分は0-59の間である必要があります', + invalidExecutionTime: '無効な実行時間', + executionTimeMustBeFuture: '実行時間は未来の時間である必要があります', + }, + triggerWebhook: { + title: 'Webhook トリガー', + nodeTitle: '🔗 Webhook トリガー', + configPlaceholder: 'Webhook トリガーの設定がここに実装されます', + webhookUrl: 'Webhook URL', + webhookUrlPlaceholder: '生成をクリックして Webhook URL を作成', + generate: '生成', + copy: 'コピー', + test: 'テスト', + urlGenerated: 'Webhook URL を生成しました', + urlGenerationFailed: 'Webhook URL の生成に失敗しました', + urlCopied: 'URL をクリップボードにコピーしました', + method: 'メソッド', + contentType: 'コンテンツタイプ', + queryParameters: 'クエリパラメータ', + headerParameters: 'ヘッダーパラメータ', + requestBodyParameters: 'リクエストボディパラメータ', + parameterName: '変数名', + varName: '変数名', + varType: 'タイプ', + varNamePlaceholder: '変数名を入力...', + required: '必須', + addParameter: '追加', + addHeader: '追加', + noParameters: '設定されたパラメータはありません', + noQueryParameters: 'クエリパラメータは設定されていません', + noHeaders: 'ヘッダーは設定されていません', + noBodyParameters: 'ボディパラメータは設定されていません', + debugUrlTitle: 'テスト実行には、常にこのURLを使用してください', + debugUrlCopy: 'クリックしてコピー', + debugUrlCopied: 'コピーしました!', + errorHandling: 'エラー処理', + errorStrategy: 'エラー処理', + responseConfiguration: 'レスポンス', + asyncMode: '非同期モード', + statusCode: 'ステータスコード', + responseBody: 'レスポンスボディ', + responseBodyPlaceholder: 'ここにレスポンスボディを入力してください', + headers: 'ヘッダー', + validation: { + webhookUrlRequired: 'Webhook URLが必要です', + invalidParameterType: 'パラメータ"{{name}}"の無効なパラメータタイプ"{{type}}"です', + }, + debugUrlPrivateAddressWarning: 'このURLは内部アドレスのようです。Webhookリクエストが失敗する可能性があります。TRIGGER_URL を公開アドレスに変更できます。', + }, + triggerPlugin: { + authorized: '認可された', + notConfigured: '設定されていません', + error: 'エラー', + configuration: '構成', + remove: '削除する', + or: 'または', + useOAuth: 'OAuth を使用', + useApiKey: 'API キーを使用', + authenticationFailed: '認証に失敗しました', + authenticationSuccess: '認証に成功しました', + oauthConfigFailed: 'OAuth 設定に失敗しました', + configureOAuthClient: 'OAuth クライアントを設定', + oauthClientDescription: '認証を有効にするために OAuth クライアント認証情報を設定してください', + oauthClientSaved: 'OAuth クライアント設定が正常に保存されました', + configureApiKey: 'API キーを設定', + apiKeyDescription: '認証のための API キー認証情報を設定してください', + apiKeyConfigured: 'API キーが正常に設定されました', + configurationFailed: '設定に失敗しました', + failedToStart: '認証フローの開始に失敗しました', + credentialsVerified: '認証情報が正常に検証されました', + credentialVerificationFailed: '認証情報の検証に失敗しました', + verifyAndContinue: '検証して続行', + configureParameters: 'パラメーターを設定', + parametersDescription: 'トリガーのパラメーターとプロパティを設定してください', + configurationComplete: '設定完了', + configurationCompleteDescription: 'トリガーが正常に設定されました', + configurationCompleteMessage: 'トリガーの設定が完了し、使用する準備ができました。', + parameters: 'パラメーター', + properties: 'プロパティ', + propertiesDescription: 'このトリガーの追加設定プロパティ', + noConfigurationRequired: 'このトリガーには追加の設定は必要ありません。', + subscriptionName: 'サブスクリプション名', + subscriptionNameDescription: 'このトリガーサブスクリプションの一意な名前を入力してください', + subscriptionNamePlaceholder: 'サブスクリプション名を入力...', + subscriptionNameRequired: 'サブスクリプション名は必須です', + notAuthorized: '認可されていません', + selectSubscription: 'サブスクリプションを選択', + availableSubscriptions: '利用可能なサブスクリプション', + addSubscription: '新しいサブスクリプションを追加', + removeSubscription: 'サブスクリプションを解除', + subscriptionRemoved: 'サブスクリプションが正常に削除されました', + subscriptionRequired: 'サブスクリプションが必要です', + }, + }, + tracing: { + stopBy: '{{user}}によって停止', + }, + versionHistory: { + title: 'バージョン', + currentDraft: '現在の下書き', + latest: '最新版', + filter: { + all: 'すべて', + onlyYours: '自分のみ', + onlyShowNamedVersions: '名前付きバージョンのみ', + reset: 'リセット', + empty: '該当するバージョンがありません', + }, + defaultName: '名称未設定', + nameThisVersion: 'バージョン名を付ける', + editVersionInfo: 'バージョン情報を編集', + editField: { + title: 'タイトル', + releaseNotes: 'リリースノート', + titleLengthLimit: 'タイトルは{{limit}}文字以内で入力してください', + releaseNotesLengthLimit: 'リリースノートは{{limit}}文字以内で入力してください', + }, + releaseNotesPlaceholder: '変更内容を入力してください', + restorationTip: 'バージョンを復元すると、現在の下書きが上書きされます', + deletionTip: '削除したデータは復元できません。よろしいですか?', + action: { + restoreSuccess: '復元が完了しました', + restoreFailure: '復元に失敗しました', + deleteSuccess: '削除が完了しました', + deleteFailure: '削除に失敗しました', + updateSuccess: '更新が完了しました', + updateFailure: '更新に失敗しました', + copyIdSuccess: 'IDがクリップボードにコピーされました', + }, + copyId: 'IDをコピー', + }, + debug: { + noData: { + runThisNode: 'このノードを実行してください', + description: '最後の実行の結果がここに表示されます', + }, + variableInspect: { + listening: { + title: 'トリガーからのイベントを待機中…', + tip: 'HTTP {{nodeName}} エンドポイントにテストリクエストを送信するか、ライブイベントデバッグ用のコールバック URL として利用してイベントトリガーをシミュレートできます。すべての出力は Variable Inspector で直接確認できます。', + tipPlugin: '{{- pluginName}} でイベントを作成し、これらのイベントの出力を Variable Inspector で取得できます。', + tipSchedule: 'スケジュールトリガーからのイベントを待機しています。\n次回の予定実行: {{nextTriggerTime}}', + tipFallback: 'トリガーイベントを待機しています。出力はここに表示されます。', + defaultNodeName: 'このトリガー', + defaultPluginName: 'このプラグイントリガー', + defaultScheduleTime: '未設定', + selectedTriggers: '選択したトリガー', + stopButton: '停止', + }, + trigger: { + clear: 'クリア', + running: 'キャッシング実行状況', + cached: 'キャッシュされた変数を表示', + stop: '走るのを止めて', + normal: '変数検査', + }, + clearAll: 'すべてリセット', + emptyLink: 'もっと学ぶ', + systemNode: 'システム', + view: 'ログを表示', + resetConversationVar: '会話の変数をデフォルト値にリセットする', + chatNode: '会話', + reset: '最後の実行値にリセットする', + clearNode: 'キャッシュされた変数をクリアする', + edited: '編集された', + title: '変数検査', + envNode: '環境', + emptyTip: 'キャンバス上でノードをステップ実行するか、ノードを一歩ずつ実行した後、変数インスペクトでノード変数の現在の値を確認できます。', + export: '輸出', + largeDataNoExport: '大規模データ - 一部プレビューのみ', + exportToolTip: '変数をファイルとしてエクスポートする', + largeData: '大きなデータ、読み取り専用のプレビュー。すべてを表示するにはエクスポートしてください。', + }, + settingsTab: '設定', + lastRunTab: '最後の実行', + relationsTab: '関係', + relations: { + dependencies: '依存元', + dependents: '依存先', + dependenciesDescription: 'このノードが依存している他のノード', + dependentsDescription: 'このノードに依存している他のノード', + noDependencies: '依存元なし', + noDependents: '依存先なし', + }, + copyLastRun: '最後の実行をコピー', + noLastRunFound: '以前の実行が見つかりませんでした。', + copyLastRunError: '最後の実行の入力をコピーできませんでした', + noMatchingInputsFound: '前回の実行から一致する入力が見つかりませんでした。', + lastRunInputsCopied: '前回の実行から{{count}}個の入力をコピーしました', + lastOutput: '最後の出力', + }, + triggerStatus: { + enabled: 'トリガー', + disabled: 'トリガー • 無効', + }, + entryNodeStatus: { + enabled: 'スタート', + disabled: '開始 • 無効', + }, + onboarding: { + title: '開始するには開始ノードを選択してください', + description: '異なる開始ノードには異なる機能があります。心配しないでください、いつでも変更できます。', + userInputFull: 'ユーザー入力(元の開始ノード)', + userInputDescription: 'ユーザー入力変数の設定を可能にする開始ノードで、Webアプリ、サービスAPI、MCPサーバー、およびツールとしてのワークフロー機能を持ちます。', + trigger: 'トリガー', + triggerDescription: 'トリガーは、スケジュールされたタスク、カスタムwebhook、または他のアプリとの統合など、ワークフローの開始ノードとして機能できます。', + back: '戻る', + learnMore: '詳細を見る', + aboutStartNode: '開始ノードについて。', + escTip: { + press: '', + key: 'esc', + toDismiss: 'キーで閉じる', + }, + }, +} + +export default translation diff --git a/web/i18n/zh-Hans/workflow.json b/web/i18n/zh-Hans/workflow.json index c8217614a6..356aea5656 100644 --- a/web/i18n/zh-Hans/workflow.json +++ b/web/i18n/zh-Hans/workflow.json @@ -173,6 +173,7 @@ "common.needConnectTip": "此节点尚未连接到其他节点", "common.needOutputNode": "必须添加输出节点", "common.needStartNode": "必须添加至少一个开始节点", + "common.noAgentNodes": "没有可用的 Agent 节点", "common.noHistory": "没有历史版本", "common.noVar": "没有变量", "common.notRunning": "尚未运行", @@ -204,6 +205,7 @@ "common.runApp": "运行", "common.runHistory": "运行历史", "common.running": "运行中", + "common.searchAgent": "搜索 Agent...", "common.searchVar": "搜索变量", "common.setVarValuePlaceholder": "设置变量值", "common.showRunHistory": "显示运行历史", @@ -215,6 +217,7 @@ "common.variableNamePlaceholder": "变量名", "common.versionHistory": "版本历史", "common.viewDetailInTracingPanel": "查看详细信息", + "common.viewInternals": "查看内部结构", "common.viewOnly": "只读", "common.viewRunHistory": "查看运行历史", "common.workflowAsTool": "发布为工具", @@ -629,8 +632,10 @@ "nodes.listFilter.outputVars.last_record": "最后一条记录", "nodes.listFilter.outputVars.result": "过滤结果", "nodes.listFilter.selectVariableKeyPlaceholder": "选择子变量的 Key", + "nodes.llm.addContext": "添加上下文", "nodes.llm.addMessage": "添加消息", "nodes.llm.context": "上下文", + "nodes.llm.contextBlock": "上下文块", "nodes.llm.contextTooltip": "您可以导入知识库作为上下文", "nodes.llm.files": "文件", "nodes.llm.jsonSchema.addChildField": "添加子字段", @@ -667,6 +672,7 @@ "nodes.llm.reasoningFormat.tagged": "保持思考标签", "nodes.llm.reasoningFormat.title": "启用推理标签分离", "nodes.llm.reasoningFormat.tooltip": "从think标签中提取内容,并将其存储在reasoning_content字段中。", + "nodes.llm.removeContext": "删除上下文", "nodes.llm.resolution.high": "高", "nodes.llm.resolution.low": "低", "nodes.llm.resolution.name": "分辨率", @@ -767,10 +773,13 @@ "nodes.templateTransform.codeSupportTip": "只支持 Jinja2", "nodes.templateTransform.inputVars": "输入变量", "nodes.templateTransform.outputVars.output": "转换后内容", + "nodes.tool.agentPlaceholder": "告诉我 {{paramKey}}...", + "nodes.tool.assembleVariables": "组装变量", "nodes.tool.authorize": "授权", "nodes.tool.inputVars": "输入变量", "nodes.tool.insertPlaceholder1": "键入", "nodes.tool.insertPlaceholder2": "插入变量", + "nodes.tool.insertPlaceholder3": "添加代理", "nodes.tool.outputVars.files.title": "工具生成的文件", "nodes.tool.outputVars.files.transfer_method": "传输方式。值为 remote_url 或 local_file", "nodes.tool.outputVars.files.type": "支持类型。现在只支持图片", @@ -945,6 +954,7 @@ "operator.distributeHorizontal": "水平等间距", "operator.distributeVertical": "垂直等间距", "operator.horizontal": "水平方向", + "operator.makeGroup": "建立群组", "operator.selectionAlignment": "选择对齐", "operator.vertical": "垂直方向", "operator.zoomIn": "放大", @@ -973,6 +983,7 @@ "panel.scrollToSelectedNode": "滚动至选中节点", "panel.selectNextStep": "选择下一个节点", "panel.startNode": "开始节点", + "panel.ungroup": "取消编组", "panel.userInputField": "用户输入字段", "publishLimit.startNodeDesc": "您已达到此计划上每个工作流最多 2 个触发器的限制。请升级后再发布此工作流。", "publishLimit.startNodeTitlePrefix": "升级以", @@ -1031,6 +1042,22 @@ "skillSidebar.unsavedChanges.content": "您有未保存的更改,是否放弃?", "skillSidebar.unsavedChanges.title": "未保存的更改", "skillSidebar.uploading": "上传中...", + "subGraphModal.canvasPlaceholder": "点击配置内部结构", + "subGraphModal.defaultValueHint": "返回以下值", + "subGraphModal.internalStructure": "内部结构", + "subGraphModal.internalStructureDesc": "@{{name}} 的内部结构", + "subGraphModal.lastRun": "上次运行", + "subGraphModal.noRunHistory": "暂无运行记录", + "subGraphModal.outputVariables": "输出变量", + "subGraphModal.settings": "设置", + "subGraphModal.sourceNode": "来源", + "subGraphModal.title": "内部结构", + "subGraphModal.whenOutputIsNone": "当输出为空时", + "subGraphModal.whenOutputNone.default": "默认值", + "subGraphModal.whenOutputNone.defaultDesc": "返回以下值", + "subGraphModal.whenOutputNone.error": "抛出错误", + "subGraphModal.whenOutputNone.errorDesc": "将错误传递给外部工作流", + "subGraphModal.whenOutputNone.skip": "跳过此步骤", "tabs.-": "默认", "tabs.addAll": "添加全部", "tabs.agent": "Agent 策略", diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts new file mode 100644 index 0000000000..6d00f279a3 --- /dev/null +++ b/web/i18n/zh-Hans/workflow.ts @@ -0,0 +1,1298 @@ +const translation = { + common: { + undo: '撤销', + redo: '重做', + editing: '编辑中', + autoSaved: '自动保存', + unpublished: '未发布', + published: '已发布', + publish: '发布', + update: '更新', + publishUpdate: '发布更新', + run: '测试运行', + running: '运行中', + listening: '监听中', + chooseStartNodeToRun: '选择启动节点进行运行', + runAllTriggers: '运行所有触发器', + inRunMode: '在运行模式中', + inPreview: '预览中', + inPreviewMode: '预览中', + preview: '预览', + viewRunHistory: '查看运行历史', + runHistory: '运行历史', + goBackToEdit: '返回编辑模式', + conversationLog: '对话记录', + features: '功能', + featuresDescription: '增强 web app 用户体验', + ImageUploadLegacyTip: '现在可以在 start 表单中创建文件类型变量。未来我们将不继续支持图片上传功能。', + fileUploadTip: '图片上传功能已扩展为文件上传。', + featuresDocLink: '了解更多', + debugAndPreview: '预览', + restart: '重新开始', + currentDraft: '当前草稿', + currentDraftUnpublished: '当前草稿未发布', + latestPublished: '最新发布', + publishedAt: '发布于', + restore: '恢复', + versionHistory: '版本历史', + exitVersions: '退出版本历史', + runApp: '运行', + batchRunApp: '批量运行', + accessAPIReference: '访问 API', + embedIntoSite: '嵌入网站', + addTitle: '添加标题...', + addDescription: '添加描述...', + noVar: '没有变量', + variableNamePlaceholder: '变量名', + searchVar: '搜索变量', + setVarValuePlaceholder: '设置变量值', + needConnectTip: '此节点尚未连接到其他节点', + maxTreeDepth: '每个分支最大限制 {{depth}} 个节点', + needAdd: '必须添加{{node}}节点', + needOutputNode: '必须添加输出节点', + needStartNode: '必须添加至少一个开始节点', + needAnswerNode: '必须添加直接回复节点', + workflowProcess: '工作流', + notRunning: '尚未运行', + previewPlaceholder: '在下面的框中输入内容开始调试聊天机器人', + effectVarConfirm: { + title: '移除变量', + content: '该变量在其他节点中使用。您是否仍要删除它?', + }, + insertVarTip: '按 \'/\' 键快速插入', + processData: '数据处理', + input: '输入', + output: '输出', + jinjaEditorPlaceholder: '输入“/”或“{”插入变量', + viewOnly: '只读', + showRunHistory: '显示运行历史', + enableJinja: '开启支持 Jinja 模板', + learnMore: '了解更多', + copy: '拷贝', + duplicate: '复制', + addBlock: '添加节点', + pasteHere: '粘贴到这里', + pointerMode: '指针模式', + handMode: '手模式', + exportImage: '导出图片', + exportPNG: '导出为 PNG', + exportJPEG: '导出为 JPEG', + exportSVG: '导出为 SVG', + currentView: '当前视图', + currentWorkflow: '整个工作流', + moreActions: '更多操作', + model: '模型', + workflowAsTool: '发布为工具', + configureRequired: '需要进行配置', + configure: '配置', + manageInTools: '访问工具页', + workflowAsToolTip: '工作流更新后需要重新配置工具参数', + workflowAsToolDisabledHint: '请先发布最新的工作流,并确保已连接的 User Input 节点后再配置为工具。', + viewDetailInTracingPanel: '查看详细信息', + syncingData: '同步数据中,只需几秒钟。', + importDSL: '导入 DSL', + importDSLTip: '当前草稿将被覆盖。在导入之前请导出工作流作为备份。', + backupCurrentDraft: '备份当前草稿', + chooseDSL: '选择 DSL(yml) 文件', + overwriteAndImport: '覆盖并导入', + importFailure: '导入失败', + importWarning: '注意', + importWarningDetails: 'DSL 版本差异可能影响部分功能表现', + importSuccess: '导入成功', + parallelTip: { + click: { + title: '点击', + desc: '添加节点', + }, + drag: { + title: '拖拽', + desc: '连接节点', + }, + limit: '并行分支限制为 {{num}} 个', + depthLimit: '并行嵌套层数限制 {{num}} 层', + }, + disconnect: '断开连接', + jumpToNode: '跳转到节点', + addParallelNode: '添加并行节点', + parallel: '并行', + branch: '分支', + onFailure: '异常时', + addFailureBranch: '添加异常分支', + openInExplore: '在“探索”中打开', + loadMore: '加载更多', + noHistory: '没有历史版本', + tagBound: '使用此标签的应用数量', + }, + publishLimit: { + startNodeTitlePrefix: '升级以', + startNodeTitleSuffix: '解锁每个工作流无限制的触发器', + startNodeDesc: '您已达到此计划上每个工作流最多 2 个触发器的限制。请升级后再发布此工作流。', + }, + env: { + envPanelTitle: '环境变量', + envDescription: '环境变量是一种存储敏感信息的方法,如 API 密钥、数据库密码等。它们被存储在工作流程中,而不是代码中,以便在不同环境中共享。', + envPanelButton: '添加环境变量', + modal: { + title: '添加环境变量', + editTitle: '编辑环境变量', + type: '类型', + name: '名称', + namePlaceholder: '变量名', + value: '值', + valuePlaceholder: '变量值', + secretTip: '用于定义敏感信息或数据,导出 DSL 时设置了防泄露机制。', + description: '描述', + descriptionPlaceholder: '变量的描述', + }, + export: { + title: '导出 Secret 类型环境变量?', + checkbox: '导出 secret 值', + ignore: '导出 DSL', + export: '导出包含 Secret 值的 DSL', + }, + }, + globalVar: { + title: '系统变量', + description: '系统变量是全局变量,在类型匹配时无需连线即可被任意节点引用,例如终端用户 ID 和工作流 ID。', + fieldsDescription: { + conversationId: '会话 ID', + dialogCount: '会话次数', + userId: '用户 ID', + triggerTimestamp: '应用开始运行的时间戳', + appId: '应用 ID', + workflowId: '工作流 ID', + workflowRunId: '工作流运行 ID', + }, + }, + sidebar: { + exportWarning: '导出当前已保存版本', + exportWarningDesc: '这将导出您工作流的当前已保存版本。如果您在编辑器中有未保存的更改,请先使用工作流画布中的导出选项保存它们。', + }, + chatVariable: { + panelTitle: '会话变量', + panelDescription: '会话变量用于存储 LLM 需要的上下文信息,如用户偏好、对话历史等。它是可读写的。', + docLink: '查看文档了解更多。', + button: '添加变量', + modal: { + title: '添加会话变量', + editTitle: '编辑会话变量', + name: '名称', + namePlaceholder: '变量名', + type: '类型', + value: '默认值', + valuePlaceholder: '默认值,可以为空', + description: '描述', + descriptionPlaceholder: '变量的描述', + editInJSON: '在 JSON 中编辑', + oneByOne: '逐个添加', + editInForm: '在表单中编辑', + arrayValue: '值', + addArrayValue: '添加值', + objectKey: '属性', + objectType: '类型', + objectValue: '默认值', + }, + storedContent: '存储内容', + updatedAt: '更新时间 ', + }, + changeHistory: { + title: '变更历史', + placeholder: '尚未更改任何内容', + clearHistory: '清除历史记录', + hint: '提示', + hintText: '您的编辑操作将被跟踪并存储在您的设备上,直到您离开编辑器。此历史记录将在您离开编辑器时被清除。', + stepBackward_one: '{{count}} 步后退', + stepBackward_other: '{{count}} 步后退', + stepForward_one: '{{count}} 步前进', + stepForward_other: '{{count}} 步前进', + sessionStart: '会话开始', + currentState: '当前状态', + nodeTitleChange: '块标题已更改', + nodeDescriptionChange: '块描述已更改', + nodeDragStop: '块已移动', + nodeChange: '块已更改', + nodeConnect: '块已连接', + nodePaste: '块已粘贴', + nodeDelete: '块已删除', + nodeAdd: '块已添加', + nodeResize: '块已调整大小', + noteAdd: '注释已添加', + noteChange: '注释已更改', + noteDelete: '注释已删除', + edgeDelete: '块已断开连接', + }, + errorMsg: { + fieldRequired: '{{field}} 不能为空', + rerankModelRequired: '未配置 Rerank 模型', + authRequired: '请先授权', + invalidJson: '{{field}} 是非法的 JSON', + fields: { + variable: '变量名', + variableValue: '变量值', + code: '代码', + model: '模型', + rerankModel: 'Rerank 模型', + visionVariable: '视觉变量', + }, + invalidVariable: '无效的变量', + noValidTool: '{{field}} 无可用工具', + toolParameterRequired: '{{field}}: 参数 [{{param}}] 不能为空', + startNodeRequired: '请先添加开始节点,然后再{{operation}}', + }, + error: { + startNodeRequired: '请先添加开始节点,然后再{{operation}}', + operations: { + connectingNodes: '连接节点', + addingNodes: '添加节点', + modifyingWorkflow: '修改工作流', + updatingWorkflow: '更新工作流', + }, + }, + singleRun: { + testRun: '测试运行', + startRun: '开始运行', + preparingDataSource: '准备数据源', + reRun: '重新运行', + running: '运行中', + testRunIteration: '测试运行迭代', + back: '返回', + iteration: '迭代', + loop: '循环', + }, + tabs: { + 'searchBlock': '搜索节点', + 'blocks': '节点', + 'searchTool': '搜索工具', + 'searchTrigger': '搜索触发器...', + 'allTriggers': '全部触发器', + 'tools': '工具', + 'allTool': '全部', + 'plugin': '插件', + 'customTool': '自定义', + 'workflowTool': '工作流', + 'question-understand': '问题理解', + 'logic': '逻辑', + 'transform': '转换', + 'utilities': '工具', + 'noResult': '未找到匹配项', + 'noPluginsFound': '未找到插件', + 'requestToCommunity': '向社区反馈', + 'agent': 'Agent 策略', + 'allAdded': '已添加全部', + 'addAll': '添加全部', + 'sources': '数据源', + 'searchDataSource': '搜索数据源', + 'start': '开始', + 'featuredTools': '精选推荐', + 'showMoreFeatured': '查看更多', + 'showLessFeatured': '收起', + 'installed': '已安装', + 'pluginByAuthor': '来自 {{author}}', + 'usePlugin': '选择工具', + 'hideActions': '收起工具', + 'noFeaturedPlugins': '前往插件市场查看更多工具', + 'noFeaturedTriggers': '前往插件市场查看更多触发器', + 'startDisabledTip': '触发节点与用户输入节点互斥。', + }, + blocks: { + 'start': '用户输入', + 'originalStartNode': '原始开始节点', + 'end': '输出', + 'answer': '直接回复', + 'llm': 'LLM', + 'knowledge-retrieval': '知识检索', + 'question-classifier': '问题分类器', + 'if-else': '条件分支', + 'code': '代码执行', + 'template-transform': '模板转换', + 'http-request': 'HTTP 请求', + 'variable-assigner': '变量赋值器', + 'variable-aggregator': '变量聚合器', + 'assigner': '变量赋值', + 'iteration-start': '迭代开始', + 'iteration': '迭代', + 'parameter-extractor': '参数提取器', + 'document-extractor': '文档提取器', + 'list-operator': '列表操作', + 'agent': 'Agent', + 'loop-start': '循环开始', + 'loop': '循环', + 'loop-end': '退出循环', + 'knowledge-index': '知识库', + 'datasource': '数据源', + 'trigger-webhook': 'Webhook 触发器', + 'trigger-schedule': '定时触发器', + 'trigger-plugin': '插件触发器', + }, + customWebhook: '自定义 Webhook', + blocksAbout: { + 'start': '定义一个 workflow 流程启动的初始参数', + 'end': '定义一个 workflow 流程的输出和结果类型', + 'answer': '定义一个聊天对话的回复内容', + 'llm': '调用大语言模型回答问题或者对自然语言进行处理', + 'knowledge-retrieval': '允许你从知识库中查询与用户问题相关的文本内容', + 'question-classifier': '定义用户问题的分类条件,LLM 能够根据分类描述定义对话的进展方式', + 'if-else': '允许你根据 if/else 条件将 workflow 拆分成两个分支', + 'code': '执行一段 Python 或 NodeJS 代码实现自定义逻辑', + 'template-transform': '使用 Jinja 模板语法将数据转换为字符串', + 'http-request': '允许通过 HTTP 协议发送服务器请求', + 'variable-assigner': '将多路分支的变量聚合为一个变量,以实现下游节点统一配置。', + 'assigner': '变量赋值节点用于向可写入变量(例如会话变量)进行变量赋值。', + 'variable-aggregator': '将多路分支的变量聚合为一个变量,以实现下游节点统一配置。', + 'iteration': '对列表对象执行多次步骤直至输出所有结果。', + 'loop': '循环执行一段逻辑直到满足结束条件或者到达循环次数上限。', + 'loop-end': '相当于“break”此节点没有配置项,当循环体内运行到此节点后循环终止。', + 'parameter-extractor': '利用 LLM 从自然语言内推理提取出结构化参数,用于后置的工具调用或 HTTP 请求。', + 'document-extractor': '用于将用户上传的文档解析为 LLM 便于理解的文本内容。', + 'list-operator': '用于过滤或排序数组内容。', + 'agent': '调用大型语言模型回答问题或处理自然语言', + 'knowledge-index': '知识库节点', + 'datasource': '数据源节点', + 'trigger-webhook': 'Webhook 触发器接收来自第三方系统的 HTTP 推送以自动触发工作流。', + 'trigger-schedule': '基于时间的工作流触发器,按计划启动工作流', + 'trigger-plugin': '从外部平台事件启动工作流的第三方集成触发器', + 'group': '将多个节点组合在一起,以便更好地组织和管理', + }, + difyTeam: 'Dify 团队', + operator: { + zoomIn: '放大', + zoomOut: '缩小', + zoomTo50: '缩放到 50%', + zoomTo100: '放大到 100%', + zoomToFit: '自适应视图', + makeGroup: '创建分组', + alignNodes: '对齐节点', + alignLeft: '左对齐', + alignCenter: '居中对齐', + alignRight: '右对齐', + alignTop: '顶部对齐', + alignMiddle: '中部对齐', + alignBottom: '底部对齐', + vertical: '垂直方向', + horizontal: '水平方向', + distributeHorizontal: '水平等间距', + distributeVertical: '垂直等间距', + selectionAlignment: '选择对齐', + }, + variableReference: { + noAvailableVars: '没有可用变量', + noVarsForOperation: '当前选择的操作没有可用的变量进行赋值。', + noAssignedVars: '没有可用的赋值变量', + assignedVarsDescription: '赋值变量必须是可写入的变量,例如:', + conversationVars: '会话变量', + }, + panel: { + userInputField: '用户输入字段', + changeBlock: '更改节点', + helpLink: '查看帮助文档', + openWorkflow: '打开工作流', + about: '关于', + createdBy: '作者', + nextStep: '下一步', + addNextStep: '添加此工作流程中的下一个节点', + selectNextStep: '选择下一个节点', + runThisStep: '运行此步骤', + checklist: '检查清单', + checklistTip: '发布前确保所有问题均已解决', + checklistResolved: '所有问题均已解决', + goTo: '转到', + startNode: '开始节点', + organizeBlocks: '整理节点', + change: '更改', + optional: '(选填)', + maximize: '最大化画布', + minimize: '退出最大化', + scrollToSelectedNode: '滚动至选中节点', + optional_and_hidden: '(选填 & 隐藏)', + }, + nodes: { + common: { + outputVars: '输出变量', + insertVarTip: '插入变量', + memory: { + memory: '记忆', + memoryTip: '聊天记忆设置', + windowSize: '记忆窗口', + conversationRoleName: '对话角色名', + user: '用户前缀', + assistant: '助手前缀', + }, + memories: { + title: '记忆', + tip: '聊天记忆', + builtIn: '内置', + }, + errorHandle: { + title: '异常处理', + tip: '配置异常处理策略,当节点发生异常时触发。', + none: { + title: '无', + desc: '当发生异常且未处理时,节点将停止运行', + }, + defaultValue: { + title: '默认值', + desc: '当发生异常时,指定默认输出内容。', + tip: '当发生异常时,将返回以下值。', + inLog: '节点异常,根据默认值输出。', + output: '输出默认值', + }, + failBranch: { + title: '异常分支', + desc: '当发生异常时,将执行异常分支', + customize: '在画布自定义失败分支逻辑。', + customizeTip: '当节点发生异常时,将自动执行失败分支。失败分支允许您灵活地提供错误消息、报告、修复或跳过操作。', + inLog: '节点异常,将自动执行失败分支。节点输出将返回错误类型和错误信息,并传递给下游。', + }, + partialSucceeded: { + tip: '流程中有 {{num}} 个节点运行异常,请前往追踪查看日志。', + }, + }, + retry: { + retry: '重试', + retryOnFailure: '失败时重试', + maxRetries: '最大重试次数', + retryInterval: '重试间隔', + retryTimes: '失败时重试 {{times}} 次', + retrying: '重试中...', + retrySuccessful: '重试成功', + retryFailed: '重试失败', + retryFailedTimes: '{{times}} 次重试失败', + times: '次', + ms: '毫秒', + retries: '{{num}} 重试次数', + }, + typeSwitch: { + input: '输入值', + variable: '使用变量', + }, + inputVars: '输入变量', + pluginNotInstalled: '插件未安装', + }, + start: { + required: '必填', + inputField: '输入字段', + builtInVar: '内置变量', + outputVars: { + query: '用户输入', + memories: { + des: '会话历史', + type: '消息类型', + content: '消息内容', + }, + files: '文件列表', + }, + noVarTip: '设置的输入可在工作流程中使用', + }, + end: { + outputs: '输出', + output: { + type: '输出类型', + variable: '输出变量', + }, + type: { + 'none': '无', + 'plain-text': '纯文本', + 'structured': '结构化', + }, + }, + answer: { + answer: '回复', + outputVars: '输出变量', + }, + llm: { + model: '模型', + variables: '变量', + context: '上下文', + contextTooltip: '您可以导入知识库作为上下文', + notSetContextInPromptTip: '要启用上下文功能,请在提示中填写上下文变量。', + prompt: '提示词', + addMessage: '添加消息', + roleDescription: { + system: '为对话提供高层指导', + user: '向模型提供指令、查询或任何基于文本的输入', + assistant: '基于用户消息的模型回复', + }, + vision: '视觉', + files: '文件', + resolution: { + name: '分辨率', + high: '高', + low: '低', + }, + outputVars: { + output: '生成内容', + reasoning_content: '推理内容', + usage: '模型用量信息', + }, + singleRun: { + variable: '变量', + }, + sysQueryInUser: 'user message 中必须包含 sys.query', + jsonSchema: { + title: '结构化输出 Schema', + instruction: '指令', + promptTooltip: '将文本描述转换为标准化的 JSON Schema 结构', + promptPlaceholder: '描述你的 JSON Schema...', + generate: '生成', + import: '从 JSON 导入', + generateJsonSchema: '生成 JSON Schema', + generationTip: '可以使用自然语言快速创建 JSON Schema。', + generating: '正在为您生成 JSON Schema...', + generatedResult: '生成结果', + resultTip: '以下是生成的结果。如果你对这个结果不满意,可以返回并修改你的提示词。', + back: '返回', + regenerate: '重新生成', + apply: '应用', + doc: '了解有关结构化输出的更多信息', + resetDefaults: '清空配置', + required: '必填', + addField: '添加字段', + addChildField: '添加子字段', + showAdvancedOptions: '显示高级选项', + stringValidations: '字符串验证', + fieldNamePlaceholder: '字段名', + descriptionPlaceholder: '添加描述', + warningTips: { + saveSchema: '请先完成当前字段的编辑', + }, + }, + reasoningFormat: { + tooltip: '从think标签中提取内容,并将其存储在reasoning_content字段中。', + title: '启用推理标签分离', + tagged: '保持思考标签', + separated: '分开思考标签', + }, + }, + knowledgeRetrieval: { + queryVariable: '查询变量', + queryText: '查询文本', + queryAttachment: '查询图片', + knowledge: '知识库', + outputVars: { + output: '召回的分段', + content: '分段内容', + title: '分段标题', + icon: '分段图标', + url: '分段链接', + metadata: '其他元数据', + files: '召回的文件', + }, + metadata: { + title: '元数据过滤', + tip: '元数据过滤是使用元数据属性(例如标签、类别或访问权限)来细化和控制系统内相关信息的检索过程。', + options: { + disabled: { + title: '禁用', + subTitle: '禁用元数据过滤', + }, + automatic: { + title: '自动', + subTitle: '根据用户查询自动生成元数据过滤条件', + desc: '根据 Query Variable 自动生成元数据过滤条件', + }, + manual: { + title: '手动', + subTitle: '手动添加元数据过滤条件', + }, + }, + panel: { + title: '元数据过滤条件', + conditions: '条件', + add: '添加条件', + search: '搜索元数据', + placeholder: '输入值', + datePlaceholder: '选择日期...', + select: '选择变量...', + }, + }, + }, + http: { + inputVars: '输入变量', + api: 'API', + apiPlaceholder: '输入 URL,输入变量时请键入‘/’', + extractListPlaceholder: '输入提取列表编号,输入变量时请键入‘/’', + notStartWithHttp: 'API 应该以 http:// 或 https:// 开头', + key: '键', + type: '类型', + value: '值', + bulkEdit: '批量编辑', + keyValueEdit: '键值编辑', + headers: 'Headers', + params: 'Params', + body: 'Body', + binaryFileVariable: 'Binary 文件变量', + outputVars: { + body: '响应内容', + statusCode: '响应状态码', + headers: '响应头列表 JSON', + files: '文件列表', + }, + authorization: { + 'authorization': '鉴权', + 'authorizationType': '鉴权类型', + 'no-auth': '无', + 'api-key': 'API-Key', + 'auth-type': 'API 鉴权类型', + 'basic': '基础', + 'bearer': 'Bearer', + 'custom': '自定义', + 'api-key-title': 'API Key', + 'header': 'Header', + }, + insertVarPlaceholder: '键入 \'/\' 键快速插入变量', + timeout: { + title: '超时设置', + connectLabel: '连接超时', + connectPlaceholder: '输入连接超时(以秒为单位)', + readLabel: '读取超时', + readPlaceholder: '输入读取超时(以秒为单位)', + writeLabel: '写入超时', + writePlaceholder: '输入写入超时(以秒为单位)', + }, + curl: { + title: '导入 cURL', + placeholder: '粘贴 cURL 字符串', + }, + verifySSL: { + title: '验证 SSL 证书', + warningTooltip: '不建议在生产环境中禁用 SSL 验证。这仅应在开发或测试中使用,因为它会使连接容易受到诸如中间人攻击等安全威胁。', + }, + }, + code: { + inputVars: '输入变量', + outputVars: '输出变量', + advancedDependencies: '高级依赖', + advancedDependenciesTip: '在这里添加一些预加载需要消耗较多时间或非默认内置的依赖包', + searchDependencies: '搜索依赖', + syncFunctionSignature: '同步函数签名至代码', + }, + templateTransform: { + inputVars: '输入变量', + code: '代码', + codeSupportTip: '只支持 Jinja2', + outputVars: { + output: '转换后内容', + }, + }, + ifElse: { + if: 'If', + else: 'Else', + elseDescription: '用于定义当 if 条件不满足时应执行的逻辑。', + and: 'and', + or: 'or', + operator: '操作符', + notSetVariable: '请先设置变量', + comparisonOperator: { + 'contains': '包含', + 'not contains': '不包含', + 'start with': '开始是', + 'end with': '结束是', + 'is': '是', + 'is not': '不是', + 'empty': '为空', + 'not empty': '不为空', + 'null': '空', + 'not null': '不为空', + 'in': '在', + 'not in': '不在', + 'all of': '全部是', + 'exists': '存在', + 'not exists': '不存在', + 'before': '早于', + 'after': '晚于', + }, + optionName: { + image: '图片', + doc: '文档', + audio: '音频', + video: '视频', + localUpload: '本地上传', + url: 'URL', + }, + enterValue: '输入值', + addCondition: '添加条件', + conditionNotSetup: '条件未设置', + selectVariable: '选择变量', + addSubVariable: '添加子变量', + select: '选择', + }, + variableAssigner: { + title: '变量赋值', + outputType: '输出类型', + varNotSet: '未设置变量', + noVarTip: '添加需要赋值的变量', + type: { + string: 'String', + number: 'Number', + object: 'Object', + array: 'Array', + }, + aggregationGroup: '聚合分组', + aggregationGroupTip: '开启该功能后,变量聚合器内可以同时聚合多组变量', + addGroup: '添加分组', + outputVars: { + varDescribe: '{{groupName}}的输出变量', + }, + setAssignVariable: '设置赋值变量', + }, + assigner: { + 'assignedVariable': '赋值的变量', + 'varNotSet': '未设置变量', + 'noVarTip': '点击 "+" 按钮添加变量', + 'writeMode': '写入模式', + 'writeModeTip': '使用追加模式时,赋值的变量必须是数组类型。', + 'over-write': '覆盖', + 'append': '追加', + 'plus': '加', + 'clear': '清空', + 'setVariable': '设置变量', + 'selectAssignedVariable': '选择要赋值的变量...', + 'setParameter': '设置参数...', + 'operations': { + 'title': '操作', + 'over-write': '覆盖', + 'overwrite': '覆盖', + 'set': '设置', + 'clear': '清空', + 'extend': '扩展', + 'append': '追加', + 'remove-first': '移除首项', + 'remove-last': '移除末项', + '+=': '+=', + '-=': '-=', + '*=': '*=', + '/=': '/=', + }, + 'variable': '变量', + 'variables': '变量', + 'noAssignedVars': '没有可用的赋值变量', + 'assignedVarsDescription': '赋值变量必须是可写入的变量,例如会话变量。', + }, + tool: { + authorize: '授权', + inputVars: '输入变量', + settings: '设置', + insertPlaceholder1: '键入', + insertPlaceholder2: '插入变量', + outputVars: { + text: '工具生成的内容', + files: { + title: '工具生成的文件', + type: '支持类型。现在只支持图片', + transfer_method: '传输方式。值为 remote_url 或 local_file', + url: '图片链接', + upload_file_id: '上传文件 ID', + }, + json: '工具生成的 json', + }, + }, + questionClassifiers: { + model: '模型', + inputVars: '输入变量', + outputVars: { + className: '分类名称', + usage: '模型用量信息', + }, + class: '分类', + classNamePlaceholder: '输入你的分类名称', + advancedSetting: '高级设置', + topicName: '主题内容', + topicPlaceholder: '在这里输入你的主题内容', + addClass: '添加分类', + instruction: '指令', + instructionTip: '你可以输入额外的附加指令,帮助问题分类器更好的理解如何分类', + instructionPlaceholder: '在这里输入你的指令', + }, + parameterExtractor: { + inputVar: '输入变量', + outputVars: { + isSuccess: '是否成功。成功时值为 1,失败时值为 0。', + errorReason: '错误原因', + usage: '模型用量信息', + }, + extractParameters: '提取参数', + importFromTool: '从工具导入', + addExtractParameter: '添加提取参数', + addExtractParameterContent: { + name: '名称', + namePlaceholder: '提取参数名称', + type: '类型', + typePlaceholder: '提取参数类型', + description: '描述', + descriptionPlaceholder: '提取参数描述', + required: '必填', + requiredContent: '必填仅作为模型推理的参考,不用于参数输出的强制验证。', + }, + extractParametersNotSet: '提取参数未设置', + instruction: '指令', + instructionTip: '你可以输入额外的附加指令,帮助参数提取器理解如何提取参数', + advancedSetting: '高级设置', + reasoningMode: '推理模式', + reasoningModeTip: '你可以根据模型对于 Function calling 或 Prompt 的指令响应能力选择合适的推理模式', + }, + iteration: { + deleteTitle: '删除迭代节点?', + deleteDesc: '删除迭代节点将删除所有子节点', + input: '输入', + output: '输出变量', + iteration_one: '{{count}}个迭代', + iteration_other: '{{count}}个迭代', + currentIteration: '当前迭代', + comma: ',', + error_one: '{{count}}个失败', + error_other: '{{count}}个失败', + parallelMode: '并行模式', + parallelModeUpper: '并行模式', + parallelModeEnableTitle: '并行模式启用', + parallelModeEnableDesc: '启用并行模式时迭代内的任务支持并行执行。你可以在右侧的属性面板中进行配置。', + parallelPanelDesc: '在并行模式下,迭代中的任务支持并行执行。', + MaxParallelismTitle: '最大并行度', + MaxParallelismDesc: '最大并行度用于控制单次迭代中同时执行的任务数量。', + errorResponseMethod: '错误响应方法', + ErrorMethod: { + operationTerminated: '错误时终止', + continueOnError: '忽略错误并继续', + removeAbnormalOutput: '移除错误输出', + }, + answerNodeWarningDesc: '并行模式警告:在迭代中,回答节点、会话变量赋值和工具持久读/写操作可能会导致异常。', + flattenOutput: '扁平化输出', + flattenOutputDesc: '启用时,如果所有迭代输出都是数组,它们将被扁平化为单个数组。禁用时,输出将保持嵌套数组结构。', + }, + loop: { + deleteTitle: '删除循环节点?', + deleteDesc: '删除循环节点将删除所有子节点', + input: '输入', + output: '输出变量', + loop_one: '{{count}} 个循环', + loop_other: '{{count}} 个循环', + currentLoop: '当前循环', + comma: ',', + error_one: '{{count}}个失败', + error_other: '{{count}}个失败', + breakCondition: '循环终止条件', + breakConditionTip: '支持引用终止条件循环内的变量和会话变量。', + loopMaxCount: '最大循环次数', + loopMaxCountError: '请输入正确的 最大循环次数,范围为 1 到 {{maxCount}}', + errorResponseMethod: '错误响应方法', + ErrorMethod: { + operationTerminated: '错误时终止', + continueOnError: '忽略错误并继续', + removeAbnormalOutput: '移除错误输出', + }, + loopVariables: '循环变量', + initialLoopVariables: '初始循环变量', + finalLoopVariables: '最终循环变量', + setLoopVariables: '在循环范围内设置变量', + variableName: '变量名', + inputMode: '输入模式', + exitConditionTip: '循环节点至少需要一个退出条件', + loopNode: '循环节点', + currentLoopCount: '当前循环次数:{{count}}', + totalLoopCount: '总循环次数:{{count}}', + }, + note: { + addNote: '添加注释', + editor: { + placeholder: '输入注释...', + small: '小', + medium: '中', + large: '大', + bold: '加粗', + italic: '斜体', + strikethrough: '删除线', + link: '链接', + openLink: '打开', + unlink: '取消链接', + enterUrl: '输入链接...', + invalidUrl: '无效的链接', + bulletList: '列表', + showAuthor: '显示作者', + }, + }, + docExtractor: { + inputVar: '输入变量', + outputVars: { + text: '提取的文本', + }, + supportFileTypes: '支持的文件类型:{{types}}。', + learnMore: '了解更多', + }, + listFilter: { + inputVar: '输入变量', + filterCondition: '过滤条件', + filterConditionKey: '过滤条件的 Key', + filterConditionComparisonOperator: '过滤条件比较操作符', + filterConditionComparisonValue: '过滤条件比较值', + selectVariableKeyPlaceholder: '选择子变量的 Key', + extractsCondition: '取第 N 项', + limit: '取前 N 项', + orderBy: '排序', + asc: '升序', + desc: '降序', + outputVars: { + result: '过滤结果', + first_record: '第一条记录', + last_record: '最后一条记录', + }, + }, + agent: { + strategy: { + label: 'Agent 策略', + tooltip: '不同的 Agent 策略决定了系统如何规划和执行多步工具调用', + shortLabel: '策略', + configureTip: '请配置 Agent 策略。', + configureTipDesc: '配置完成后,此节点将自动加载剩余配置。策略将影响多步工具推理的机制。', + selectTip: '选择 Agent 策略', + searchPlaceholder: '搜索 Agent 策略', + }, + learnMore: '了解更多', + pluginNotInstalled: '插件未安装', + pluginNotInstalledDesc: '此插件是从 GitHub 安装的。请转到插件重新安装', + linkToPlugin: '转到插件', + pluginInstaller: { + install: '安装', + installing: '安装中', + }, + modelNotInMarketplace: { + title: '模型未安装', + desc: '此模型安装自本地或 GitHub 仓库。请安装后使用。', + manageInPlugins: '在插件中管理', + }, + modelNotSupport: { + title: '不支持的模型', + desc: '已安装的插件版本不提供此模型。', + descForVersionSwitch: '已安装的插件版本不提供此模型。点击切换版本。', + }, + model: '模型', + toolbox: '工具箱', + strategyNotSet: '代理策略未设置', + configureModel: '配置模型', + notAuthorized: '未授权', + tools: '工具', + maxIterations: '最大迭代次数', + modelNotInstallTooltip: '此模型未安装', + modelNotSelected: '未选择模型', + toolNotInstallTooltip: '{{tool}} 未安装', + toolNotAuthorizedTooltip: '{{tool}} 未授权', + strategyNotInstallTooltip: '{{strategy}} 未安装', + unsupportedStrategy: '不支持的策略', + strategyNotFoundDesc: '安装的插件版本不提供此策略。', + pluginNotFoundDesc: '此插件安装自 GitHub。请转到插件重新安装。', + strategyNotFoundDescAndSwitchVersion: '安装的插件版本不提供此策略。点击切换版本。', + modelSelectorTooltips: { + deprecated: '此模型已弃用', + }, + outputVars: { + text: 'agent 生成的内容', + usage: '模型用量信息', + files: { + title: 'agent 生成的文件', + type: '支持类型。现在只支持图片', + transfer_method: '传输方式。值为 remote_url 或 local_file', + url: '图片链接', + upload_file_id: '上传文件 ID', + }, + json: 'agent 生成的 json', + }, + checkList: { + strategyNotSelected: '未选择策略', + }, + installPlugin: { + title: '安装插件', + desc: '即将安装以下插件', + changelog: '更新日志', + install: '安装', + cancel: '取消', + }, + clickToViewParameterSchema: '点击查看参数 schema', + parameterSchema: '参数 Schema', + }, + dataSource: { + supportedFileFormats: '支持的文件格式', + supportedFileFormatsPlaceholder: '文件格式,例如:doc', + add: '添加数据源', + }, + knowledgeBase: { + chunkStructure: '分段结构', + chooseChunkStructure: '选择分段结构', + chunkStructureTip: { + title: '请选择分段结构', + message: 'Dify 知识库支持三种分块结构:通用、父子和问答。每个知识库只能有一种结构。前一节点的输出必须与所选的分块结构相匹配。请注意,分块结构的选择会影响可用的索引方法。', + learnMore: '了解更多', + }, + changeChunkStructure: '更改分段结构', + chunksInput: '分块', + chunksInputTip: '知识库节点的输入变量为 Chunks。该变量类型是符合特定 JSON Schema 的对象,必须与所选块结构一致。', + aboutRetrieval: '关于知识检索。', + chunkIsRequired: '分段结构是必需的', + indexMethodIsRequired: '索引方法是必需的', + chunksVariableIsRequired: 'Chunks 变量是必需的', + embeddingModelIsRequired: 'Embedding 模型是必需的', + embeddingModelIsInvalid: '无效的 Embedding 模型', + retrievalSettingIsRequired: '检索设置是必需的', + rerankingModelIsRequired: 'Reranking 模型是必需的', + rerankingModelIsInvalid: '无效的 Reranking 模型', + }, + triggerSchedule: { + frequency: { + label: '频率', + monthly: '每月', + daily: '每日', + hourly: '每小时', + weekly: '每周', + }, + title: '定时触发', + nodeTitle: '定时触发器', + useCronExpression: '使用 Cron 表达式', + selectFrequency: '选择频率', + nextExecutionTimes: '接下来 5 次执行时间', + hours: '小时', + minutes: '分钟', + onMinute: '分钟', + cronExpression: 'Cron 表达式', + weekdays: '星期', + executeNow: '立即执行', + frequencyLabel: '频率', + nextExecution: '下次执行', + time: '时间', + lastDay: '最后一天', + startTime: '开始时间', + selectDateTime: '选择日期和时间', + lastDayTooltip: '并非所有月份都有 31 天。使用"最后一天"选项来选择每个月的最后一天。', + nextExecutionTime: '下次执行时间', + useVisualPicker: '使用可视化配置', + days: '天', + notConfigured: '未配置', + mode: '模式', + timezone: '时区', + visualConfig: '可视化配置', + monthlyDay: '月份日期', + executionTime: '执行时间', + invalidTimezone: '无效的时区', + invalidCronExpression: '无效的 Cron 表达式', + noValidExecutionTime: '无法计算有效的执行时间', + executionTimeCalculationError: '执行时间计算失败', + invalidFrequency: '无效的频率', + invalidStartTime: '无效的开始时间', + startTimeMustBeFuture: '开始时间必须是将来的时间', + invalidTimeFormat: '无效的时间格式(预期格式:HH:MM AM/PM)', + invalidWeekday: '无效的工作日:{{weekday}}', + invalidMonthlyDay: '月份日期必须在 1-31 之间或为"last"', + invalidOnMinute: '分钟必须在 0-59 之间', + invalidExecutionTime: '无效的执行时间', + executionTimeMustBeFuture: '执行时间必须是将来的时间', + }, + triggerWebhook: { + configPlaceholder: 'Webhook 触发器配置将在此处实现', + title: 'Webhook 触发器', + nodeTitle: '🔗 Webhook 触发器', + webhookUrl: 'Webhook URL', + webhookUrlPlaceholder: '点击生成以创建 webhook URL', + generate: '生成', + copy: '复制', + test: '测试', + urlGenerated: 'Webhook URL 生成成功', + urlGenerationFailed: '生成 Webhook URL 失败', + urlCopied: 'URL 已复制到剪贴板', + method: '方法', + contentType: '内容类型', + queryParameters: '查询参数', + headerParameters: 'Header 参数', + requestBodyParameters: '请求体参数', + parameterName: '变量名', + varName: '变量名', + varType: '类型', + varNamePlaceholder: '输入变量名...', + required: '必填', + addParameter: '添加', + addHeader: '添加', + noParameters: '未配置任何参数', + noQueryParameters: '未配置查询参数', + noHeaders: '未配置 Header', + noBodyParameters: '未配置请求体参数', + debugUrlTitle: '测试运行时,请始终使用此URL', + debugUrlCopy: '点击复制', + debugUrlCopied: '已复制!', + errorHandling: '错误处理', + errorStrategy: '错误处理', + responseConfiguration: '响应', + asyncMode: '异步模式', + statusCode: '状态码', + responseBody: '响应体', + responseBodyPlaceholder: '在此输入您的响应体', + headers: 'Headers', + validation: { + webhookUrlRequired: '需要提供Webhook URL', + invalidParameterType: '参数"{{name}}"的参数类型"{{type}}"无效', + }, + debugUrlPrivateAddressWarning: '此 URL 似乎是内部地址,可能导致 webhook 请求失败。您可以将 TRIGGER_URL 更改为公共地址。', + }, + triggerPlugin: { + authorized: '已授权', + notConfigured: '未配置', + error: '错误', + configuration: '配置', + remove: '移除', + or: '或', + useOAuth: '使用 OAuth', + useApiKey: '使用 API Key', + authenticationFailed: '身份验证失败', + authenticationSuccess: '身份验证成功', + oauthConfigFailed: 'OAuth 配置失败', + configureOAuthClient: '配置 OAuth 客户端', + oauthClientDescription: '配置 OAuth 客户端凭据以启用身份验证', + oauthClientSaved: 'OAuth 客户端配置保存成功', + configureApiKey: '配置 API Key', + apiKeyDescription: '配置 API key 凭据进行身份验证', + apiKeyConfigured: 'API key 配置成功', + configurationFailed: '配置失败', + failedToStart: '启动身份验证流程失败', + credentialsVerified: '凭据验证成功', + credentialVerificationFailed: '凭据验证失败', + verifyAndContinue: '验证并继续', + configureParameters: '配置参数', + parametersDescription: '配置触发器参数和属性', + configurationComplete: '配置完成', + configurationCompleteDescription: '您的触发器已成功配置', + configurationCompleteMessage: '您的触发器配置已完成,现在可以使用了。', + parameters: '参数', + properties: '属性', + propertiesDescription: '此触发器的额外配置属性', + noConfigurationRequired: '此触发器不需要额外配置。', + subscriptionName: '订阅名称', + subscriptionNameDescription: '为此触发器订阅输入一个唯一名称', + subscriptionNamePlaceholder: '输入订阅名称...', + subscriptionNameRequired: '订阅名称是必需的', + subscriptionRequired: '需要配置订阅', + notAuthorized: '未授权', + selectSubscription: '选择订阅', + availableSubscriptions: '可用订阅', + addSubscription: '添加新订阅', + removeSubscription: '取消订阅', + subscriptionRemoved: '订阅已成功取消', + }, + }, + tracing: { + stopBy: '由{{user}}终止', + }, + versionHistory: { + title: '版本', + currentDraft: '当前草稿', + latest: '最新', + filter: { + all: '全部', + onlyYours: '仅你的', + onlyShowNamedVersions: '只显示已命名版本', + reset: '重置', + empty: '没有匹配的版本', + }, + defaultName: '未命名', + nameThisVersion: '命名', + editVersionInfo: '编辑信息', + copyId: '复制 ID', + editField: { + title: '标题', + releaseNotes: '发布说明', + titleLengthLimit: '标题不能超过{{limit}}个字符', + releaseNotesLengthLimit: '发布说明不能超过{{limit}}个字符', + }, + releaseNotesPlaceholder: '请描述变更', + restorationTip: '版本回滚后,当前草稿将被覆盖。', + deletionTip: '删除不可逆,请确认。', + action: { + restoreSuccess: '回滚成功', + restoreFailure: '回滚失败', + deleteSuccess: '版本已删除', + deleteFailure: '删除失败', + updateSuccess: '版本信息已更新', + updateFailure: '更新失败', + copyIdSuccess: 'ID 已复制到剪贴板', + }, + }, + debug: { + settingsTab: '设置', + lastRunTab: '上次运行', + relationsTab: '关系', + copyLastRun: '复制上次运行值', + noLastRunFound: '未找到上次运行记录', + noMatchingInputsFound: '上次运行中未找到匹配的输入', + lastRunInputsCopied: '已复制{{count}}个输入值', + copyLastRunError: '复制上次运行输入失败', + noData: { + description: '上次运行的结果将显示在这里', + runThisNode: '运行此节点', + }, + variableInspect: { + title: '变量检查', + emptyTip: '在画布上逐步浏览节点或逐步运行节点后,您可以在变量检查中查看节点变量的当前值', + emptyLink: '了解更多', + clearAll: '重置所有', + clearNode: '清除缓存', + resetConversationVar: '重置会话变量为默认值', + view: '查看记录', + edited: '已编辑', + reset: '还原至上一次运行', + listening: { + title: '正在监听触发器事件…', + tip: '您现在可以向 HTTP {{nodeName}} 端点发送测试请求以模拟事件触发,或将其用作实时事件调试的回调 URL。所有输出都可以在变量检查器中直接查看。', + tipPlugin: '现在您可以在 {{- pluginName}} 中创建事件,并在变量检查器中查看这些事件的输出。', + tipSchedule: '正在监听计划触发器事件。\n下一次计划运行时间:{{nextTriggerTime}}', + tipFallback: '正在等待触发器事件,输出结果将在此显示。', + defaultNodeName: '此触发器', + defaultPluginName: '此插件触发器', + defaultScheduleTime: '未设置', + selectedTriggers: '所选触发器', + stopButton: '停止', + }, + trigger: { + normal: '变量检查', + running: '缓存中', + stop: '停止运行', + cached: '查看缓存', + clear: '清除', + }, + envNode: '环境变量', + chatNode: '会话变量', + systemNode: '系统变量', + exportToolTip: '导出变量为文件', + largeData: '大数据 - 仅部分只读预览。请导出查看完整数据。', + largeDataNoExport: '大数据 - 仅部分预览', + export: '导出', + }, + lastOutput: '上次输出', + relations: { + dependencies: '依赖', + dependents: '被依赖', + dependenciesDescription: '本节点依赖的其他节点', + dependentsDescription: '依赖于本节点的其他节点', + noDependencies: '无依赖', + noDependents: '无被依赖', + }, + }, + triggerStatus: { + enabled: '触发器', + disabled: '触发器 • 已禁用', + }, + entryNodeStatus: { + enabled: '开始', + disabled: '开始 • 已禁用', + }, + onboarding: { + title: '选择开始节点来开始', + description: '不同的开始节点具有不同的功能。不用担心,您随时可以更改它们。', + userInputFull: '用户输入(原始开始节点)', + userInputDescription: '允许设置用户输入变量的开始节点,具有Web应用程序、服务API、MCP服务器和工作流即工具功能。', + trigger: '触发器', + triggerDescription: '触发器可以作为工作流的开始节点,例如定时任务、自定义webhook或与其他应用程序的集成。', + back: '返回', + learnMore: '了解更多', + aboutStartNode: '关于开始节点。', + escTip: { + press: '按', + key: 'esc', + toDismiss: '键关闭', + }, + }, +} + +export default translation diff --git a/web/i18n/zh-Hant/workflow.json b/web/i18n/zh-Hant/workflow.json index f4d2199db2..64848d881e 100644 --- a/web/i18n/zh-Hant/workflow.json +++ b/web/i18n/zh-Hant/workflow.json @@ -171,6 +171,7 @@ "common.needConnectTip": "此節點尚未連接到其他節點", "common.needOutputNode": "必須新增輸出節點", "common.needStartNode": "至少必須新增一個起始節點", + "common.noAgentNodes": "沒有可用的 Agent 節點", "common.noHistory": "無歷史記錄", "common.noVar": "沒有變數", "common.notRunning": "尚未運行", @@ -202,6 +203,7 @@ "common.runApp": "運行", "common.runHistory": "運行歷史", "common.running": "運行中", + "common.searchAgent": "搜尋 Agent...", "common.searchVar": "搜索變數", "common.setVarValuePlaceholder": "設置變數值", "common.showRunHistory": "顯示運行歷史", @@ -213,6 +215,7 @@ "common.variableNamePlaceholder": "變數名", "common.versionHistory": "版本歷史", "common.viewDetailInTracingPanel": "查看詳細信息", + "common.viewInternals": "檢視內部結構", "common.viewOnly": "只讀", "common.viewRunHistory": "查看運行歷史", "common.workflowAsTool": "發佈為工具", @@ -620,8 +623,10 @@ "nodes.listFilter.outputVars.last_record": "最後一條記錄", "nodes.listFilter.outputVars.result": "篩選結果", "nodes.listFilter.selectVariableKeyPlaceholder": "Select sub variable key(選擇子變數鍵)", + "nodes.llm.addContext": "新增上下文", "nodes.llm.addMessage": "新增消息", "nodes.llm.context": "上下文", + "nodes.llm.contextBlock": "上下文區塊", "nodes.llm.contextTooltip": "您可以導入知識庫作為上下文", "nodes.llm.files": "文件", "nodes.llm.jsonSchema.addChildField": "新增子欄位", @@ -658,6 +663,7 @@ "nodes.llm.reasoningFormat.tagged": "保持思考標籤", "nodes.llm.reasoningFormat.title": "啟用推理標籤分離", "nodes.llm.reasoningFormat.tooltip": "從 think 標籤中提取內容並將其存儲在 reasoning_content 欄位中。", + "nodes.llm.removeContext": "刪除上下文", "nodes.llm.resolution.high": "高", "nodes.llm.resolution.low": "低", "nodes.llm.resolution.name": "分辨率", @@ -758,10 +764,13 @@ "nodes.templateTransform.codeSupportTip": "只支持 Jinja2", "nodes.templateTransform.inputVars": "輸入變數", "nodes.templateTransform.outputVars.output": "轉換後內容", + "nodes.tool.agentPlaceholder": "告訴我 {{paramKey}}...", + "nodes.tool.assembleVariables": "組裝變數", "nodes.tool.authorize": "授權", "nodes.tool.inputVars": "輸入變數", "nodes.tool.insertPlaceholder1": "輸入或按壓", "nodes.tool.insertPlaceholder2": "插入變數", + "nodes.tool.insertPlaceholder3": "添加代理", "nodes.tool.outputVars.files.title": "工具生成的文件", "nodes.tool.outputVars.files.transfer_method": "傳輸方式。值為 remote_url 或 local_file", "nodes.tool.outputVars.files.type": "支持類型。現在只支持圖片", @@ -964,6 +973,7 @@ "panel.scrollToSelectedNode": "捲動至選取的節點", "panel.selectNextStep": "選擇下一個節點", "panel.startNode": "起始節點", + "panel.ungroup": "取消群組", "panel.userInputField": "用戶輸入字段", "publishLimit.startNodeDesc": "目前方案最多允許 2 個開始節點,升級後才能發布此工作流程。", "publishLimit.startNodeTitlePrefix": "升級以", @@ -981,6 +991,22 @@ "singleRun.testRunIteration": "測試運行迭代", "singleRun.testRunLoop": "測試運行循環", "skillEditor.referenceFiles": "參考檔案", + "subGraphModal.canvasPlaceholder": "點擊配置內部結構", + "subGraphModal.defaultValueHint": "返回以下值", + "subGraphModal.internalStructure": "內部結構", + "subGraphModal.internalStructureDesc": "@{{name}} 的內部結構", + "subGraphModal.lastRun": "上次執行", + "subGraphModal.noRunHistory": "暫無執行記錄", + "subGraphModal.outputVariables": "輸出變數", + "subGraphModal.settings": "設定", + "subGraphModal.sourceNode": "來源", + "subGraphModal.title": "內部結構", + "subGraphModal.whenOutputIsNone": "當輸出為空時", + "subGraphModal.whenOutputNone.default": "預設值", + "subGraphModal.whenOutputNone.defaultDesc": "返回以下值", + "subGraphModal.whenOutputNone.error": "拋出錯誤", + "subGraphModal.whenOutputNone.errorDesc": "將錯誤傳遞給外部工作流程", + "subGraphModal.whenOutputNone.skip": "跳過此步驟", "tabs.-": "預設", "tabs.addAll": "全部新增", "tabs.agent": "代理策略", diff --git a/web/i18n/zh-Hant/workflow.ts b/web/i18n/zh-Hant/workflow.ts new file mode 100644 index 0000000000..bd4bc720f6 --- /dev/null +++ b/web/i18n/zh-Hant/workflow.ts @@ -0,0 +1,1298 @@ +const translation = { + common: { + undo: '復原', + redo: '重做', + editing: '編輯中', + autoSaved: '自動保存', + unpublished: '未發佈', + published: '已發佈', + publish: '發佈', + update: '更新', + run: '測試運行', + running: '運行中', + inRunMode: '在運行模式中', + inPreview: '預覽中', + inPreviewMode: '預覽中', + preview: '預覽', + viewRunHistory: '查看運行歷史', + runHistory: '運行歷史', + goBackToEdit: '返回編輯模式', + conversationLog: '對話記錄', + debugAndPreview: '預覽', + restart: '重新開始', + currentDraft: '當前草稿', + currentDraftUnpublished: '當前草稿未發佈', + latestPublished: '最新發佈', + publishedAt: '發佈於', + restore: '恢復', + runApp: '運行', + batchRunApp: '批量運行', + accessAPIReference: '訪問 API', + embedIntoSite: '嵌入網站', + addTitle: '新增標題...', + addDescription: '新增描述...', + noVar: '沒有變數', + variableNamePlaceholder: '變數名', + searchVar: '搜索變數', + setVarValuePlaceholder: '設置變數值', + needConnectTip: '此節點尚未連接到其他節點', + maxTreeDepth: '每個分支最大限制 {{depth}} 個節點', + needAdd: '必須新增{{node}}節點', + needOutputNode: '必須新增輸出節點', + needAnswerNode: '必須新增直接回覆節點', + workflowProcess: '工作流', + notRunning: '尚未運行', + previewPlaceholder: '在下面的框中輸入內容開始測試聊天機器人', + effectVarConfirm: { + title: '移除變數', + content: '該變數在其他節點中使用。您是否仍要刪除它?', + }, + insertVarTip: '按 \'/\' 鍵快速插入', + processData: '資料處理', + input: '輸入', + output: '輸出', + jinjaEditorPlaceholder: '輸入“/”或“{”插入變數', + viewOnly: '只讀', + showRunHistory: '顯示運行歷史', + enableJinja: '開啟支持 Jinja 模板', + learnMore: '了解更多', + copy: '拷貝', + duplicate: '複製', + addBlock: '新增節點', + pasteHere: '粘貼到這裡', + pointerMode: '指針模式', + handMode: '手模式', + model: '模型', + workflowAsTool: '發佈為工具', + configureRequired: '需要進行配置', + configure: '配置', + manageInTools: '訪問工具頁', + workflowAsToolTip: '工作流更新後需要重新配置工具參數', + viewDetailInTracingPanel: '查看詳細信息', + importDSL: '導入 DSL', + backupCurrentDraft: 'Backup Current Draft', + overwriteAndImport: '覆蓋和導入', + importSuccess: '導入成功', + chooseDSL: '選擇 DSL(yml)檔', + syncingData: '同步資料,只需幾秒鐘。', + importDSLTip: '當前草稿將被覆蓋。在導入之前將工作流匯出為備份。', + importFailure: '匯入失敗', + parallelTip: { + click: { + title: '點擊', + desc: '新增', + }, + drag: { + title: '拖動', + desc: '連接', + }, + limit: '並行度僅限於 {{num}} 個分支。', + depthLimit: '並行嵌套層限制為 {{num}} 個層', + }, + disconnect: '斷開', + jumpToNode: '跳轉到此節點', + addParallelNode: '新增並行節點', + parallel: '並行', + branch: '分支', + fileUploadTip: '圖片上傳功能已升級為檔上傳。', + ImageUploadLegacyTip: '現在,您可以在起始表單中創建檔案類型變數。我們將來不再支持圖片上傳功能。', + importWarning: '謹慎', + importWarningDetails: 'DSL 版本差異可能會影響某些功能', + openInExplore: '在“探索”中打開', + onFailure: '失敗時', + addFailureBranch: '新增 Fail Branch', + loadMore: '載入更多工作流', + noHistory: '無歷史記錄', + publishUpdate: '發布更新', + exportSVG: '匯出為 SVG', + exportPNG: '匯出為 PNG', + versionHistory: '版本歷史', + exitVersions: '退出版本', + exportImage: '匯出圖像', + exportJPEG: '匯出為 JPEG', + tagBound: '使用此標籤的應用程式數量', + currentView: '當前檢視', + currentWorkflow: '當前工作流程', + moreActions: '更多動作', + listening: '聆聽', + chooseStartNodeToRun: '選擇要執行的起始節點', + runAllTriggers: '執行所有觸發器', + features: '功能', + featuresDescription: '提升網頁應用程式的使用者體驗', + featuresDocLink: '了解更多', + needStartNode: '至少必須新增一個起始節點', + workflowAsToolDisabledHint: '發布最新的工作流程,並確保在將其配置為工具之前有一個已連接的使用者輸入節點。', + }, + publishLimit: { + startNodeTitlePrefix: '升級以', + startNodeTitleSuffix: '解鎖無限開始節點', + startNodeDesc: '目前方案最多允許 2 個開始節點,升級後才能發布此工作流程。', + }, + env: { + envPanelTitle: '環境變數', + envDescription: '環境變數可用於存儲私人信息和憑證。它們是唯讀的,並且可以在導出時與 DSL 文件分開。', + envPanelButton: '新增變數', + modal: { + title: '新增環境變數', + editTitle: '編輯環境變數', + type: '類型', + name: '名稱', + namePlaceholder: '環境名稱', + value: '值', + valuePlaceholder: '環境值', + secretTip: '用於定義敏感信息或資料,DSL 設置配置為防止洩露。', + description: '描述', + descriptionPlaceholder: '描述此變數', + }, + export: { + title: '導出機密環境變數?', + checkbox: '導出機密值', + ignore: '導出 DSL', + export: '導出帶有機密值的 DSL', + }, + }, + globalVar: { + title: '系統變數', + description: '系統變數是全域變數,在類型符合時可由任意節點在無需連線的情況下引用,例如終端使用者 ID 與工作流程 ID。', + fieldsDescription: { + conversationId: '對話 ID', + dialogCount: '對話次數', + userId: '使用者 ID', + triggerTimestamp: '應用程式開始運行的時間戳', + appId: '應用程式 ID', + workflowId: '工作流程 ID', + workflowRunId: '工作流程執行 ID', + }, + }, + chatVariable: { + panelTitle: '對話變數', + panelDescription: '對話變數用於儲存 LLM 需要記住的互動資訊,包括對話歷史、上傳的檔案、使用者偏好等。這些變數可讀寫。', + docLink: '查看我們的文件以了解更多。', + button: '新增變數', + modal: { + title: '新增對話變數', + editTitle: '編輯對話變數', + name: '名稱', + namePlaceholder: '變數名稱', + type: '類型', + value: '預設值', + valuePlaceholder: '預設值,留空則不設定', + description: '描述', + descriptionPlaceholder: '描述此變數', + editInJSON: '以 JSON 編輯', + oneByOne: '逐一新增', + editInForm: '在表單中編輯', + arrayValue: '值', + addArrayValue: '新增值', + objectKey: '鍵', + objectType: '類型', + objectValue: '預設值', + }, + storedContent: '已儲存內容', + updatedAt: '更新於 ', + }, + changeHistory: { + title: '變更履歷', + placeholder: '尚未更改任何內容', + clearHistory: '清除歷史記錄', + hint: '提示', + hintText: '您的編輯操作將被跟踪並存儲在您的設備上,直到您離開編輯器。此歷史記錄將在您離開編輯器時被清除。', + stepBackward_one: '{{count}} 步後退', + stepBackward_other: '{{count}} 步後退', + stepForward_one: '{{count}} 步前進', + stepForward_other: '{{count}} 步前進', + sessionStart: '會話開始', + currentState: '當前狀態', + nodeTitleChange: '區塊標題已更改', + nodeDescriptionChange: '區塊描述已更改', + nodeDragStop: '區塊已移動', + nodeChange: '區塊已更改', + nodeConnect: '區塊已連接', + nodePaste: '區塊已粘貼', + nodeDelete: '區塊已刪除', + nodeAdd: '區塊已新增', + nodeResize: '區塊已調整大小', + noteAdd: '註釋已新增', + noteChange: '註釋已更改', + edgeDelete: '區塊已斷開連接', + noteDelete: '註釋已刪除', + }, + errorMsg: { + fieldRequired: '{{field}} 不能為空', + authRequired: '請先授權', + invalidJson: '{{field}} 是非法的 JSON', + fields: { + variable: '變數名', + variableValue: '變數值', + code: '程式碼', + model: '模型', + rerankModel: 'Rerank 模型', + visionVariable: 'Vision Variable', + }, + invalidVariable: '無效的變數', + rerankModelRequired: '在開啟 Rerank 模型之前,請在設置中確認模型配置成功。', + toolParameterRequired: '{{field}}:参數 [{{param}}] 為必填項', + noValidTool: '{{field}} 未選擇有效工具', + startNodeRequired: '請先新增一個起始節點,再執行 {{operation}}', + }, + singleRun: { + testRun: '測試運行', + startRun: '開始運行', + running: '運行中', + testRunIteration: '測試運行迭代', + back: '返回', + iteration: '迭代', + loop: '循環', + preparingDataSource: '準備資料來源', + reRun: '重新運行', + }, + tabs: { + 'searchBlock': '搜索節點', + 'blocks': '節點', + 'tools': '工具', + 'searchTrigger': '搜尋觸發器...', + 'allTriggers': '所有觸發器', + 'allTool': '全部', + 'customTool': '自定義', + 'workflowTool': '工作流', + 'question-understand': '問題理解', + 'logic': '邏輯', + 'transform': '轉換', + 'utilities': '工具', + 'noResult': '未找到匹配項', + 'searchTool': '搜索工具', + 'agent': '代理策略', + 'plugin': '插件', + 'allAdded': '所有已新增的', + 'addAll': '全部新增', + 'sources': '來源', + 'searchDataSource': '搜尋資料來源', + 'noFeaturedPlugins': '前往 Marketplace 查看更多工具', + 'noFeaturedTriggers': '前往 Marketplace 查看更多觸發器', + 'start': '開始', + 'noPluginsFound': '未找到任何插件', + 'requestToCommunity': '對社群的請求', + 'featuredTools': '精選', + 'showMoreFeatured': '顯示更多', + 'showLessFeatured': '顯示較少', + 'installed': '已安裝', + 'pluginByAuthor': '由 {{author}}', + 'usePlugin': '選取工具', + 'hideActions': '隱藏工具', + 'startDisabledTip': '觸發節點與使用者輸入節點是互斥的。', + }, + blocks: { + 'start': '開始', + 'end': '輸出', + 'answer': '直接回覆', + 'llm': 'LLM', + 'knowledge-retrieval': '知識檢索', + 'question-classifier': '問題分類器', + 'if-else': '條件分支', + 'code': '程式碼執行', + 'template-transform': '模板轉換', + 'http-request': 'HTTP 請求', + 'variable-assigner': '變數聚合器', + 'variable-aggregator': '變數聚合器', + 'assigner': '變數分配器', + 'iteration-start': '迭代開始', + 'iteration': '迭代', + 'parameter-extractor': '參數提取器', + 'list-operator': '清單運算子', + 'document-extractor': '文件提取器', + 'agent': '代理', + 'loop-start': '循環開始', + 'loop': '循環', + 'loop-end': '退出循環', + 'knowledge-index': '知識庫', + 'datasource': '資料來源', + 'originalStartNode': '原始起始節點', + 'trigger-schedule': '排程觸發', + 'trigger-webhook': 'Webhook 觸發', + 'trigger-plugin': '插件觸發器', + }, + blocksAbout: { + 'start': '定義一個 workflow 流程啟動的參數', + 'end': '定義一個 workflow 流程的輸出和結果類型', + 'answer': '定義一個聊天對話的回覆內容', + 'llm': '調用大語言模型回答問題或者對自然語言進行處理', + 'knowledge-retrieval': '允許你從知識庫中查詢與用戶問題相關的文本內容', + 'question-classifier': '定義用戶問題的分類條件,LLM 能夠根據分類描述定義對話的進展方式', + 'if-else': '允許你根據 if/else 條件將 workflow 拆分成兩個分支', + 'code': '執行一段 Python 或 NodeJS 程式碼實現自定義邏輯', + 'template-transform': '使用 Jinja 模板語法將資料轉換為字符串', + 'http-request': '允許通過 HTTP 協議發送服務器請求', + 'variable-assigner': '將多路分支的變數聚合為一個變數,以實現下游節點統一配置。', + 'assigner': '變數分配節點用於為可寫入的變數(如對話變數)分配值。', + 'variable-aggregator': '將多路分支的變數聚合為一個變數,以實現下游節點統一配置。', + 'iteration': '對列表對象執行多次步驟直至輸出所有結果。', + 'parameter-extractor': '利用 LLM 從自然語言內推理提取出結構化參數,用於後置的工具調用或 HTTP 請求。', + 'document-extractor': '用於將上傳的文件解析為 LLM 易於理解的文字內容。', + 'list-operator': '用於篩選或排序陣列內容。', + 'agent': '調用大型語言模型來回答問題或處理自然語言', + 'loop-end': '等同於「中斷」。這個節點沒有配置項目。當循環體達到這個節點時,循環終止。', + 'loop': '執行邏輯迴圈,直到滿足終止條件或達到最大迴圈次數。', + 'datasource': '資料來源 關於', + 'knowledge-index': '知識庫 關於', + 'trigger-schedule': '基於時間的工作流程觸發器,可按計劃啟動工作流程', + 'trigger-webhook': 'Webhook 觸發器接收來自第三方系統的 HTTP 推送,以自動觸發工作流程。', + 'trigger-plugin': '第三方整合觸發器,從外部平台事件啟動工作流程', + 'group': '將多個節點組合在一起,以便更好地組織和管理', + }, + operator: { + zoomIn: '放大', + zoomOut: '縮小', + zoomTo50: '縮放到 50%', + zoomTo100: '放大到 100%', + zoomToFit: '自適應視圖', + makeGroup: '建立群組', + alignNodes: '對齊節點', + distributeVertical: '垂直等間距', + alignLeft: '左對齊', + distributeHorizontal: '水平等間距', + vertical: '垂直', + alignTop: '頂部對齊', + alignCenter: '居中對齊', + horizontal: '水平', + selectionAlignment: '選擇對齊', + alignRight: '右對齊', + alignBottom: '底部對齊', + alignMiddle: '中部對齊', + }, + panel: { + userInputField: '用戶輸入字段', + changeBlock: '更改節點', + helpLink: '查看幫助文件', + about: '關於', + createdBy: '作者', + nextStep: '下一步', + addNextStep: '新增此工作流程中的下一個節點', + selectNextStep: '選擇下一個節點', + runThisStep: '運行此步驟', + checklist: '檢查清單', + checklistTip: '發佈前確保所有問題均已解決', + checklistResolved: '所有問題均已解決', + organizeBlocks: '整理節點', + change: '更改', + optional: '(選擇性)', + minimize: '退出全螢幕', + maximize: '最大化畫布', + scrollToSelectedNode: '捲動至選取的節點', + optional_and_hidden: '(可選且隱藏)', + goTo: '前往', + startNode: '起始節點', + openWorkflow: '打開工作流程', + }, + nodes: { + common: { + outputVars: '輸出變數', + insertVarTip: '插入變數', + memory: { + memory: '記憶', + memoryTip: '聊天記憶設置', + windowSize: '記憶窗口', + conversationRoleName: '對話角色名', + user: '用戶前綴', + assistant: '助手前綴', + }, + memories: { + title: '記憶', + tip: '聊天記憶', + builtIn: '內置', + }, + errorHandle: { + none: { + title: '沒有', + desc: '如果發生異常且未得到處理,節點將停止運行', + }, + defaultValue: { + title: '預設值', + desc: '發生錯誤時,請指定靜態輸出內容。', + tip: '出錯時,將返回以下值。', + inLog: 'Node 異常,按照預設值輸出。', + output: '輸出預設值', + }, + failBranch: { + title: '失敗分支', + desc: '當發生錯誤時,它會執行 exception 分支', + customize: '轉到畫布以自定義 fail 分支邏輯。', + inLog: 'Node 異常,將自動執行 fail 分支。節點輸出將返回錯誤類型和錯誤消息,並將其傳遞給下游。', + customizeTip: '啟動 fail 分支後,節點引發的異常不會終止進程。相反,它將自動執行預定義的 fail 分支,允許您靈活地提供錯誤消息、報告、修復或跳過操作。', + }, + partialSucceeded: { + tip: '進程中有 {{num}} 個節點運行異常,請前往 tracing 查看日誌。', + }, + title: '錯誤處理', + tip: '異常處理策略,當節點遇到異常時觸發。', + }, + retry: { + retry: '重試', + retryOnFailure: '失敗時重試', + maxRetries: '最大重試次數', + retryInterval: '重試間隔', + retryTimes: '失敗時重試 {{times}} 次', + retrying: '重試。。。', + retrySuccessful: '重試成功', + retryFailed: '重試失敗', + retryFailedTimes: '{{times}} 次重試失敗', + times: '次', + ms: '毫秒', + retries: '{{num}}重試', + }, + typeSwitch: { + input: '輸入值', + variable: '使用變數', + }, + inputVars: '輸入變數', + pluginNotInstalled: '插件未安裝', + }, + start: { + required: '必填', + inputField: '輸入字段', + builtInVar: '內置變數', + outputVars: { + query: '用戶輸入', + memories: { + des: '會話歷史', + type: '消息類型', + content: '消息內容', + }, + files: '文件列表', + }, + noVarTip: '設置的輸入可在工作流程中使用', + }, + end: { + outputs: '輸出', + output: { + type: '輸出類型', + variable: '輸出變數', + }, + type: { + 'none': '無', + 'plain-text': '純文本', + 'structured': '結構化', + }, + }, + answer: { + answer: '回覆', + outputVars: '輸出變數', + }, + llm: { + model: '模型', + variables: '變數', + context: '上下文', + contextTooltip: '您可以導入知識庫作為上下文', + notSetContextInPromptTip: '要啟用上下文功能,請在提示中填寫上下文變數。', + prompt: '提示詞', + addMessage: '新增消息', + roleDescription: { + system: '為對話提供高層指導', + user: '向模型提供指令、查詢或任何基於文本的輸入', + assistant: '基於用戶消息的模型回覆', + }, + vision: '視覺', + files: '文件', + resolution: { + name: '分辨率', + high: '高', + low: '低', + }, + outputVars: { + output: '生成內容', + reasoning_content: '推理內容', + usage: '模型用量信息', + }, + singleRun: { + variable: '變數', + }, + sysQueryInUser: 'user message 中必須包含 sys.query', + jsonSchema: { + warningTips: { + saveSchema: '請在保存結構之前完成當前欄位的編輯', + }, + resetDefaults: '重置', + instruction: '指示', + apply: '申請', + promptPlaceholder: '描述你的 JSON 架構...', + addField: '新增字段', + generate: '生成', + descriptionPlaceholder: '新增描述', + fieldNamePlaceholder: '欄位名稱', + showAdvancedOptions: '顯示進階選項', + import: '從 JSON 匯入', + generatedResult: '生成的結果', + generateJsonSchema: '生成 JSON 架構', + promptTooltip: '將文本描述轉換成標準化的 JSON Schema 結構。', + doc: '了解更多有關結構化輸出的資訊', + addChildField: '新增子欄位', + title: '結構化輸出模式', + regenerate: '重新生成', + stringValidations: '字串驗證', + generationTip: '您可以使用自然語言快速創建 JSON Schema。', + generating: '生成 JSON 架構...', + back: '返回', + required: '必需的', + resultTip: '這是生成的結果。如果您不滿意,可以回去修改您的提示。', + }, + reasoningFormat: { + title: '啟用推理標籤分離', + tooltip: '從 think 標籤中提取內容並將其存儲在 reasoning_content 欄位中。', + tagged: '保持思考標籤', + separated: '分開思考標籤', + }, + }, + knowledgeRetrieval: { + queryVariable: '查詢變數', + knowledge: '知識庫', + outputVars: { + output: '檢索的分段', + content: '分段內容', + title: '分段標題', + icon: '分段圖標', + url: '分段鏈接', + metadata: '其他元資料', + files: '已檢索的檔案', + }, + metadata: { + options: { + disabled: { + subTitle: '不啟用元資料過濾', + title: '禁用', + }, + automatic: { + title: '自動的', + subTitle: '根據用戶查詢自動生成元資料過濾條件', + desc: '根據查詢變數自動生成元資料過濾條件', + }, + manual: { + title: '手動', + subTitle: '手動新增元資料過濾條件', + }, + }, + panel: { + add: '新增條件', + datePlaceholder: '選擇一個時間...', + search: '搜尋元資料', + conditions: '條件', + title: '元資料過濾條件', + select: '選擇變數...', + placeholder: '輸入數值', + }, + title: '元資料過濾', + tip: '元資料過濾是使用元資料屬性(如標籤、類別或訪問權限)來精煉和控制在系統內檢索相關信息的過程。', + }, + queryText: '查詢文字', + queryAttachment: '查詢圖片', + }, + http: { + inputVars: '輸入變數', + api: 'API', + apiPlaceholder: '輸入 URL,輸入變數時請鍵入‘/’', + notStartWithHttp: 'API 應該以 http:// 或 https:// 開頭', + key: '鍵', + value: '值', + bulkEdit: '批量編輯', + keyValueEdit: '鍵值編輯', + headers: 'Headers', + params: 'Params', + body: 'Body', + outputVars: { + body: '響應內容', + statusCode: '響應狀態碼', + headers: '響應頭列表 JSON', + files: '文件列表', + }, + authorization: { + 'authorization': '鑑權', + 'authorizationType': '鑑權類型', + 'no-auth': '無', + 'api-key': 'API-Key', + 'auth-type': 'API 鑑權類型', + 'basic': '基礎', + 'bearer': 'Bearer', + 'custom': '自定義', + 'api-key-title': 'API Key', + 'header': 'Header', + }, + insertVarPlaceholder: '鍵入 \'/\' 鍵快速插入變數', + timeout: { + title: '超時設置', + connectLabel: '連接超時', + connectPlaceholder: '輸入連接超時(以秒為單位)', + readLabel: '讀取超時', + readPlaceholder: '輸入讀取超時(以秒為單位)', + writeLabel: '寫入超時', + writePlaceholder: '輸入寫入超時(以秒為單位)', + }, + type: '類型', + binaryFileVariable: '二進位檔變數', + extractListPlaceholder: '輸入清單項索引,鍵入 『/』 插入變數', + curl: { + placeholder: '在此處粘貼 cURL 字串', + title: '從 cURL 導入', + }, + verifySSL: { + title: '驗證 SSL 證書', + warningTooltip: '不建議在生產環境中禁用SSL驗證。這僅應用於開發或測試,因為這樣會使連接容易受到中間人攻擊等安全威脅的威脅。', + }, + }, + code: { + inputVars: '輸入變數', + outputVars: '輸出變數', + advancedDependencies: '高級依賴', + advancedDependenciesTip: '在這裡新增一些預加載需要消耗較多時間或非默認內置的依賴包', + searchDependencies: '搜索依賴', + syncFunctionSignature: '同步函數簽名至代碼', + }, + templateTransform: { + inputVars: '輸入變數', + code: '模板程式碼', + codeSupportTip: '只支持 Jinja2', + outputVars: { + output: '轉換後內容', + }, + }, + ifElse: { + if: 'If', + else: 'Else', + elseDescription: '用於定義當 if 條件不滿足時應執行的邏輯。', + and: 'and', + or: 'or', + operator: '操作符', + notSetVariable: '請先設置變數', + comparisonOperator: { + 'contains': '包含', + 'not contains': '不包含', + 'start with': '開始是', + 'end with': '結束是', + 'is': '是', + 'is not': '不是', + 'empty': '為空', + 'not empty': '不為空', + 'null': '空', + 'not null': '不為空', + 'all of': '全部', + 'exists': '存在', + 'in': '在', + 'not in': '不在', + 'not exists': '不存在', + 'after': '之後', + 'before': '之前', + }, + enterValue: '輸入值', + addCondition: '新增條件', + conditionNotSetup: '條件未設置', + selectVariable: '選擇變數...', + optionName: { + image: '圖像', + url: '網址', + doc: '文檔', + localUpload: '本地上傳', + video: '視頻', + audio: '音訊', + }, + select: '選擇', + addSubVariable: '子變數', + }, + variableAssigner: { + title: '變數賦值', + outputType: '輸出類型', + varNotSet: '未設置變數', + noVarTip: '新增需要賦值的變數', + type: { + string: 'String', + number: 'Number', + object: 'Object', + array: 'Array', + }, + aggregationGroup: '聚合分組', + aggregationGroupTip: '開啟該功能後,變數聚合器內可以同時聚合多組變數', + addGroup: '新增分組', + outputVars: { + varDescribe: '{{groupName}}的輸出變數', + }, + setAssignVariable: '設置賦值變數', + }, + assigner: { + 'assignedVariable': '已分配變數', + 'writeMode': '寫入模式', + 'writeModeTip': '當已分配變數是陣列時,附加模式會新增到末尾。', + 'over-write': '覆寫', + 'append': '附加', + 'plus': '加', + 'clear': '清除', + 'setVariable': '設定變數', + 'variable': '變數', + 'operations': { + 'overwrite': '覆寫', + '/=': '/=', + 'title': '操作', + '*=': '*=', + 'extend': '擴展', + '+=': '+=', + 'set': '設置', + 'over-write': '覆寫', + '-=': '-=', + 'append': '附加', + 'clear': '清除', + 'remove-first': '移除首項', + 'remove-last': '移除末項', + }, + 'noAssignedVars': '沒有可用的已分配變數', + 'variables': '變數', + 'selectAssignedVariable': '選擇配置的變數...', + 'setParameter': '設定參數...', + 'noVarTip': '點擊「+」按鈕新增變數', + 'assignedVarsDescription': '分配的變數必須是可寫變數,例如對話變數。', + 'varNotSet': '未設置變數', + }, + tool: { + authorize: '授權', + inputVars: '輸入變數', + outputVars: { + text: '工具生成的內容', + files: { + title: '工具生成的文件', + type: '支持類型。現在只支持圖片', + transfer_method: '傳輸方式。值為 remote_url 或 local_file', + url: '圖片鏈接', + upload_file_id: '上傳文件 ID', + }, + json: '工具生成的 JSON', + }, + insertPlaceholder2: '插入變數', + insertPlaceholder1: '輸入或按壓', + settings: '設定', + }, + questionClassifiers: { + model: '模型', + inputVars: '輸入變數', + outputVars: { + className: '分類名稱', + usage: '模型用量信息', + }, + class: '分類', + classNamePlaceholder: '輸入你的分類名稱', + advancedSetting: '高級設置', + topicName: '主題內容', + topicPlaceholder: '在這裡輸入你的主題內容', + addClass: '新增分類', + instruction: '指令', + instructionTip: '你可以輸入額外的附加指令,幫助問題分類器更好的理解如何分類', + instructionPlaceholder: '在這裡輸入你的指令', + }, + parameterExtractor: { + inputVar: '輸入變數', + outputVars: { + isSuccess: '是否成功。成功時值為 1,失敗時值為 0。', + errorReason: '錯誤原因', + usage: '模型用量信息', + }, + extractParameters: '提取參數', + importFromTool: '從工具導入', + addExtractParameter: '新增提取參數', + addExtractParameterContent: { + name: '名稱', + namePlaceholder: '提取參數名稱', + type: '類型', + typePlaceholder: '提取參數類型', + description: '描述', + descriptionPlaceholder: '提取參數描述', + required: '必填', + requiredContent: '必填僅作為模型推理的參考,不用於參數輸出的強制驗證。', + }, + extractParametersNotSet: '提取參數未設置', + instruction: '指令', + instructionTip: '你可以輸入額外的附加指令,幫助參數提取器理解如何提取參數', + advancedSetting: '高級設置', + reasoningMode: '推理模式', + reasoningModeTip: '你可以根據模型對於 Function calling 或 Prompt 的指令響應能力選擇合適的推理模式', + }, + iteration: { + deleteTitle: '刪除迭代節點?', + deleteDesc: '刪除迭代節點將刪除所有子節點', + input: '輸入', + output: '輸出變數', + iteration_one: '{{count}}個迭代', + iteration_other: '{{count}}個迭代', + currentIteration: '當前迭代', + ErrorMethod: { + operationTerminated: '終止', + removeAbnormalOutput: 'remove-abnormal-output', + continueOnError: '出錯時繼續', + }, + comma: ',', + parallelMode: '並行模式', + parallelModeEnableTitle: 'Parallel Mode 已啟用', + MaxParallelismTitle: '最大並行度', + parallelModeUpper: '並行模式', + parallelPanelDesc: '在並行模式下,反覆運算中的任務支援並行執行。', + error_one: '{{count}}錯誤', + errorResponseMethod: '錯誤回應方法', + parallelModeEnableDesc: '在並行模式下,反覆運算中的任務支援並行執行。您可以在右側的 properties 面板中進行配置。', + answerNodeWarningDesc: '並行模式警告:反覆運算中的應答節點、對話變數賦值和持久讀/寫操作可能會導致異常。', + error_other: '{{count}}錯誤', + MaxParallelismDesc: '最大並行度用於控制在單個反覆運算中同時執行的任務數。', + flattenOutput: '展平成輸出', + flattenOutputDesc: '啟用時,如果所有的迭代輸出都是陣列,它們將被展平成單一陣列。禁用時,輸出將保持巢狀陣列結構。', + }, + note: { + editor: { + link: '連結', + openLink: '打開', + medium: '中等', + small: '小', + invalidUrl: 'URL 無效', + italic: '斜體的', + bulletList: '項目符號清單', + large: '大', + unlink: '取消連結', + enterUrl: '輸入網址...', + bold: '大膽', + showAuthor: '顯示作者', + strikethrough: '刪除線', + placeholder: '寫下您的筆記...', + }, + addNote: '新增註解', + }, + docExtractor: { + outputVars: { + text: '提取的文字', + }, + learnMore: '瞭解更多資訊', + inputVar: '輸入變數', + supportFileTypes: '支援文件類型:{{types}}。', + }, + listFilter: { + outputVars: { + last_record: '最後一條記錄', + first_record: '第一條記錄', + result: '篩選結果', + }, + desc: '描述', + asc: 'ASC 的', + orderBy: '排序依據', + inputVar: '輸入變數', + filterConditionComparisonValue: 'Filter Condition 值', + filterCondition: '篩選條件', + limit: '前 N 名', + selectVariableKeyPlaceholder: 'Select sub variable key(選擇子變數鍵)', + filterConditionComparisonOperator: 'Filter Condition Comparison 運算符', + filterConditionKey: '篩選條件鍵', + extractsCondition: '提取第 N 項', + }, + agent: { + strategy: { + label: '代理策略', + shortLabel: '策略', + tooltip: '不同的 Agentic 策略決定了系統如何規劃和執行多步驟工具調用', + configureTip: '請配置 agentic 策略。', + searchPlaceholder: '搜索代理策略', + selectTip: '選擇代理策略', + configureTipDesc: '配置代理策略后,該節點將自動載入剩餘的配置。該策略將影響多步驟工具推理的機制。', + }, + pluginInstaller: { + installing: '安裝', + install: '安裝', + }, + modelNotInMarketplace: { + title: '未安裝模型', + manageInPlugins: '在插件中管理', + desc: '此模型是從 Local 或 GitHub 儲存庫安裝的。請在安裝後使用。', + }, + modelNotSupport: { + title: '不支援的型號', + desc: '已安裝的插件版本不提供此模型。', + descForVersionSwitch: '已安裝的插件版本不提供此模型。按兩下以切換版本。', + }, + modelSelectorTooltips: { + deprecated: '此模型已棄用', + }, + outputVars: { + files: { + type: '支撐類型。現在僅支援鏡像', + transfer_method: '轉移方法。值為 remote_url 或 local_file', + title: '代理生成的檔', + url: '圖片網址', + upload_file_id: '上傳檔 ID', + }, + text: '代理生成的內容', + usage: '模型用量信息', + json: '代理生成的 JSON', + }, + checkList: { + strategyNotSelected: '未選擇策略', + }, + installPlugin: { + title: '安裝插件', + changelog: '更新日誌', + cancel: '取消', + desc: '即將安裝以下插件', + install: '安裝', + }, + pluginNotFoundDesc: '此插件是從 GitHub 安裝的。請前往插件 重新安裝', + modelNotSelected: '未選擇模型', + tools: '工具', + strategyNotFoundDesc: '已安裝的插件版本不提供此策略。', + pluginNotInstalledDesc: '此插件是從 GitHub 安裝的。請前往插件 重新安裝', + strategyNotFoundDescAndSwitchVersion: '已安裝的插件版本不提供此策略。按兩下以切換版本。', + strategyNotInstallTooltip: '{{strategy}} 未安裝', + toolNotAuthorizedTooltip: '{{工具}}未授權', + unsupportedStrategy: '不支援的策略', + model: '型', + modelNotInstallTooltip: '此模型未安裝', + strategyNotSet: '代理策略未設置', + toolNotInstallTooltip: '{{tool}} 未安裝', + maxIterations: '最大反覆運算次數', + toolbox: '工具箱', + configureModel: '配置模型', + learnMore: '瞭解更多資訊', + linkToPlugin: '連結到插件', + pluginNotInstalled: '此插件未安裝', + notAuthorized: '未授權', + clickToViewParameterSchema: '點擊查看參數架構', + parameterSchema: '參數架構', + }, + loop: { + ErrorMethod: { + operationTerminated: '終止', + continueOnError: '繼續出錯', + removeAbnormalOutput: '移除異常輸出', + }, + loop_other: '{{count}} 循環', + variableName: '變數名稱', + error_one: '{{count}} 錯誤', + loopMaxCount: '最大迴圈次數', + input: '輸入', + loopVariables: '循環變數', + output: '輸出變數', + comma: ',', + errorResponseMethod: '錯誤回應方法', + breakCondition: '迴圈終止條件', + loopMaxCountError: '請輸入一個有效的最大迴圈次數,範圍為 1 到 {{maxCount}}', + loop_one: '{{count}} 次循環', + exitConditionTip: '循環節點至少需要一個退出條件', + breakConditionTip: '只有在具有終止條件的循環內和對話變數中,才能引用變數。', + totalLoopCount: '總迴圈次數:{{count}}', + error_other: '{{count}} 錯誤', + currentLoop: '電流迴路', + finalLoopVariables: '最後迴圈變數', + currentLoopCount: '當前循環次數:{{count}}', + inputMode: '輸入模式', + loopNode: '循環節點', + initialLoopVariables: '初始迴圈變數', + deleteDesc: '刪除循環節點將移除所有子節點', + setLoopVariables: '在迴圈範圍內設置變數', + deleteTitle: '刪除循環節點嗎?', + }, + dataSource: { + add: '新增資料來源', + supportedFileFormats: '支援的檔案格式', + supportedFileFormatsPlaceholder: '檔案副檔名, e.g. doc', + }, + knowledgeBase: { + chunkStructureTip: { + learnMore: '瞭解詳情', + title: '請選擇區塊結構', + message: 'Dify 知識庫支援三種區塊結構:一般、親子和 Q&A。每個知識庫只能有一個結構。前一個節點的輸出必須與選取的區塊結構一致。請注意,區塊結構的選擇會影響可用的索引方法。', + }, + chunkIsRequired: '需要區塊結構', + aboutRetrieval: '關於檢索方法。', + chooseChunkStructure: '選擇區塊結構', + indexMethodIsRequired: '索引方法是必填的', + chunkStructure: '區塊結構', + changeChunkStructure: '變更區塊結構', + retrievalSettingIsRequired: '需要檢索設定', + chunksInput: '區塊', + chunksInputTip: '知識庫節點的輸入變數是 Chunks。該變數類型是一個物件,具有特定的 JSON Schema,必須與所選的塊結構一致。', + rerankingModelIsRequired: '需要重新排序模型', + chunksVariableIsRequired: 'Chunks 變數是必需的', + embeddingModelIsRequired: '需要嵌入模型', + rerankingModelIsInvalid: '重排序模型無效', + embeddingModelIsInvalid: '嵌入模型無效', + }, + triggerPlugin: { + authorized: '已授權', + notConfigured: '未設定', + notAuthorized: '未被授權', + selectSubscription: '選擇訂閱', + availableSubscriptions: '可用訂閱', + addSubscription: '新增訂閱', + removeSubscription: '取消訂閱', + subscriptionRemoved: '訂閱已成功移除', + error: '錯誤', + configuration: '配置', + remove: '移除', + or: '或', + useOAuth: '使用 OAuth', + useApiKey: '使用 API 金鑰', + authenticationFailed: '驗證失敗', + authenticationSuccess: '驗證成功', + oauthConfigFailed: 'OAuth 配置失敗', + configureOAuthClient: '配置 OAuth 客戶端', + oauthClientDescription: '配置 OAuth 客戶端憑證以啟用身份驗證', + oauthClientSaved: 'OAuth 用戶端設定已成功儲存', + configureApiKey: '配置 API 金鑰', + apiKeyDescription: '配置 API 金鑰憑證以進行身份驗證', + apiKeyConfigured: 'API 金鑰設定成功', + configurationFailed: '配置失敗', + failedToStart: '啟動驗證流程失敗', + credentialsVerified: '憑證驗證成功', + credentialVerificationFailed: '憑證驗證失敗', + verifyAndContinue: '驗證並繼續', + configureParameters: '配置參數', + parametersDescription: '配置觸發器參數和屬性', + configurationComplete: '配置完成', + configurationCompleteDescription: '您的觸發器已成功配置', + configurationCompleteMessage: '您的觸發器配置現已完成並可使用。', + parameters: '參數', + properties: '屬性', + propertiesDescription: '此觸發器的額外配置屬性', + noConfigurationRequired: '此觸發器無需額外配置。', + subscriptionName: '訂閱名稱', + subscriptionNameDescription: '為此觸發器訂閱輸入一個唯一名稱', + subscriptionNamePlaceholder: '輸入訂閱名稱...', + subscriptionNameRequired: '需要訂閱名稱', + subscriptionRequired: '需要訂閱', + }, + triggerSchedule: { + title: '時間表', + nodeTitle: '排程觸發', + notConfigured: '未配置', + useCronExpression: '使用 cron 表達式', + useVisualPicker: '使用視覺選擇器', + frequency: { + label: '頻率', + hourly: '每小時', + daily: '每日', + weekly: '每週', + monthly: '每月', + }, + selectFrequency: '選擇頻率', + frequencyLabel: '頻率', + nextExecution: '下一次執行', + weekdays: '平日', + time: '時間', + cronExpression: 'Cron 表達式', + nextExecutionTime: '下一次執行時間', + nextExecutionTimes: '接下來的 5 次執行時間', + startTime: '開始時間', + executeNow: '立即執行', + selectDateTime: '選擇日期和時間', + hours: '小時', + minutes: '分鐘', + onMinute: '在一分鐘內', + days: '天', + lastDay: '最後一天', + lastDayTooltip: '並非所有月份都有31天。使用「最後一天」選項來選擇每個月的最後一天。', + mode: '時尚', + timezone: '時區', + visualConfig: '視覺配置', + monthlyDay: '每月日', + executionTime: '執行時間', + invalidTimezone: '無效的時區', + invalidCronExpression: '無效的 cron 表達式', + noValidExecutionTime: '無法計算有效的執行時間', + executionTimeCalculationError: '無法計算執行時間', + invalidFrequency: '無效頻率', + invalidStartTime: '開始時間無效', + startTimeMustBeFuture: '開始時間必須是未來的時間', + invalidTimeFormat: '時間格式無效(預期為 HH:MM AM/PM)', + invalidWeekday: '無效的星期日:{{weekday}}', + invalidMonthlyDay: '每月日期必須在 1 到 31 之間或為「最後一天」', + invalidOnMinute: '分鐘必須介於 0 到 59 之間', + invalidExecutionTime: '無效的執行時間', + executionTimeMustBeFuture: '執行時間必須在未來', + }, + triggerWebhook: { + title: 'Webhook 觸發', + nodeTitle: '🔗 網路鉤子觸發', + configPlaceholder: 'Webhook 觸發配置將在此實施', + webhookUrl: 'Webhook 網址', + webhookUrlPlaceholder: '點擊生成以創建 webhook URL', + generate: '生成', + copy: '複製', + test: '測試', + urlGenerated: 'Webhook URL 已成功生成', + urlGenerationFailed: '無法生成網絡掛鉤 URL', + urlCopied: 'URL 已複製到剪貼簿', + method: '方法', + contentType: '內容類型', + queryParameters: '查詢參數', + headerParameters: '標頭參數', + requestBodyParameters: '請求主體參數', + parameterName: '變數名稱', + varName: '變數名稱', + varType: '類型', + varNamePlaceholder: '輸入變數名稱...', + required: '必填', + addParameter: '添加', + addHeader: '添加', + noParameters: '未設定任何參數', + noQueryParameters: '未配置查詢參數', + noHeaders: '未配置標頭', + noBodyParameters: '未配置任何正文參數', + debugUrlTitle: '在測試運行中,請始終使用此 URL', + debugUrlCopy: '點擊複製', + debugUrlCopied: '已複製!', + debugUrlPrivateAddressWarning: '此 URL 似乎是內部位址,可能會導致 webhook 請求失敗。您可以將 TRIGGER_URL 更改為公開位址。', + errorHandling: '錯誤處理', + errorStrategy: '錯誤處理', + responseConfiguration: '回應', + asyncMode: '非同步模式', + statusCode: '狀態碼', + responseBody: '回應正文', + responseBodyPlaceholder: '在這裡撰寫您的回覆內容', + headers: '標題', + validation: { + webhookUrlRequired: '需要 Webhook URL', + invalidParameterType: '參數 "{{name}}" 的類型 "{{type}}" 無效', + }, + }, + }, + tracing: { + stopBy: '由{{user}}終止', + }, + variableReference: { + noAvailableVars: '無可用變數', + noAssignedVars: '沒有可用的已分配變數', + noVarsForOperation: '所選操作沒有可用於賦值的變數。', + assignedVarsDescription: '分配的變數必須是可寫變數,例如', + conversationVars: '對話變數', + }, + versionHistory: { + filter: { + onlyShowNamedVersions: '僅顯示命名版本', + onlyYours: '只有妳的', + empty: '未找到匹配的版本歷史', + all: '所有', + reset: '重置過濾器', + }, + editField: { + releaseNotes: '發佈說明', + titleLengthLimit: '標題不能超過 {{limit}} 個字符', + releaseNotesLengthLimit: '發佈說明不能超過 {{limit}} 個字符', + title: '標題', + }, + action: { + updateFailure: '更新版本失敗', + restoreFailure: '無法恢復版本', + restoreSuccess: '恢復版本', + updateSuccess: '版本已更新', + deleteSuccess: '版本已刪除', + deleteFailure: '無法刪除版本', + copyIdSuccess: 'ID 已複製到剪貼板', + }, + nameThisVersion: '給這個版本命名', + latest: '最新', + currentDraft: '當前草稿', + title: '版本', + editVersionInfo: '編輯版本信息', + restorationTip: '版本恢復後,當前草稿將被覆蓋。', + deletionTip: '刪除是不可逆的,請確認。', + releaseNotesPlaceholder: '描述發生了什麼變化', + defaultName: '未命名版本', + copyId: '複製ID', + }, + debug: { + settingsTab: '設定', + lastRunTab: '最後一次運行', + relationsTab: '關係', + noData: { + runThisNode: '運行此節點', + description: '上次運行的結果將顯示在這裡', + }, + variableInspect: { + listening: { + title: '正在監聽觸發器事件…', + tip: '您現在可以向 HTTP {{nodeName}} 端點發送測試請求來模擬事件觸發,或將其作為即時事件除錯的回呼 URL。所有輸出都可在變數檢視器中直接查看。', + tipPlugin: '您現在可以在 {{- pluginName}} 中建立事件,並在變數檢視器中檢視這些事件的輸出。', + tipSchedule: '正在監聽排程觸發器事件。\n下一次排程執行時間:{{nextTriggerTime}}', + tipFallback: '正在等待觸發器事件,輸出會顯示在此處。', + defaultNodeName: '此觸發器', + defaultPluginName: '此插件觸發器', + defaultScheduleTime: '未設定', + selectedTriggers: '已選觸發器', + stopButton: '停止', + }, + trigger: { + cached: '查看快取的變數', + stop: '停止運行', + clear: '清晰', + running: '快取運行狀態', + normal: '變數檢查', + }, + emptyLink: '了解更多', + view: '查看日誌', + clearAll: '重置所有', + envNode: '環境', + title: '變數檢查', + clearNode: '清除快取變數', + systemNode: '系統', + reset: '重置為上次運行值', + chatNode: '對話', + edited: '編輯的', + emptyTip: '在畫布上逐步執行節點或逐步運行節點後,您可以在變數檢視中查看節點變數的當前值。', + resetConversationVar: '將對話變數重置為默認值', + export: '出口', + largeData: '大資料,唯讀預覽。匯出以檢視全部。', + exportToolTip: '將變數匯出為檔案', + largeDataNoExport: '大型資料 - 僅部分預覽', + }, + relations: { + dependencies: '依賴', + dependents: '被依賴', + dependenciesDescription: '此節點所依賴的其他節點', + dependentsDescription: '依賴此節點的其他節點', + noDependencies: '無依賴', + noDependents: '無被依賴', + }, + copyLastRun: '複製上一次運行', + copyLastRunError: '未能複製上一次運行的輸入', + noMatchingInputsFound: '在上次運行中未找到匹配的輸入', + noLastRunFound: '沒有找到之前的運行', + lastOutput: '最後的輸出', + lastRunInputsCopied: '從上次運行複製的 {{count}} 個輸入', + }, + sidebar: { + exportWarning: '導出當前保存的版本', + exportWarningDesc: '這將導出當前保存的工作流程版本。如果您在編輯器中有未保存的更改,請先通過使用工作流程畫布中的導出選項來保存它們。', + }, + error: { + startNodeRequired: '請先新增一個起始節點,再執行 {{operation}}', + operations: { + connectingNodes: '連接節點', + addingNodes: '新增節點', + modifyingWorkflow: '修改工作流程', + updatingWorkflow: '更新工作流程', + }, + }, + customWebhook: '自訂 Webhook', + difyTeam: 'Dify 團隊', + triggerStatus: { + enabled: '觸發', + disabled: '觸發器 • 已停用', + }, + entryNodeStatus: { + enabled: '開始', + disabled: '開始 • 已停用', + }, + onboarding: { + title: '選擇一個起始節點開始', + description: '不同的起始節點有不同的能力。別擔心,你之後總是可以更改它們。', + userInputFull: '使用者輸入(原始起始節點)', + userInputDescription: '啟動節點,允許設定使用者輸入變數,工具功能包括網頁應用程式、服務 API、MCP 伺服器和工作流程。', + trigger: '觸發器', + triggerDescription: '觸發器可以作為工作流程的起始節點,例如排程任務、自訂網絡掛鉤或與其他應用程式的整合。', + back: '返回', + learnMore: '了解更多', + aboutStartNode: '關於起始節點。', + escTip: { + press: '新聞媒體', + key: '取消', + toDismiss: '解僱', + }, + }, +} + +export default translation diff --git a/web/package.json b/web/package.json index 3ebfe0bdf4..568ac5745f 100644 --- a/web/package.json +++ b/web/package.json @@ -3,7 +3,7 @@ "type": "module", "version": "1.11.4", "private": true, - "packageManager": "pnpm@10.27.0+sha512.72d699da16b1179c14ba9e64dc71c9a40988cbdc65c264cb0e489db7de917f20dcf4d64d8723625f2969ba52d4b7e2a1170682d9ac2a5dcaeaab732b7e16f04a", + "packageManager": "pnpm@10.28.0+sha512.05df71d1421f21399e053fde567cea34d446fa02c76571441bfc1c7956e98e363088982d940465fd34480d4d90a0668bc12362f8aa88000a64e83d0b0e47be48", "imports": { "#i18n": { "react-server": "./i18n-config/lib.server.ts",