From 2b23c43434b535dc1aba2f50e1da57d661c26991 Mon Sep 17 00:00:00 2001 From: Novice Date: Tue, 9 Dec 2025 11:26:02 +0800 Subject: [PATCH 01/18] feat: add agent package --- api/controllers/console/app/message.py | 1 + api/controllers/console/app/workflow_run.py | 1 - api/controllers/service_api/app/message.py | 1 + api/controllers/web/message.py | 1 + api/core/agent/agent_app_runner.py | 358 ++++++++++++++ api/core/agent/base_agent_runner.py | 13 +- api/core/agent/cot_agent_runner.py | 431 ---------------- api/core/agent/cot_chat_agent_runner.py | 118 ----- api/core/agent/cot_completion_agent_runner.py | 87 ---- api/core/agent/entities.py | 93 ++++ api/core/agent/fc_agent_runner.py | 465 ------------------ api/core/agent/patterns/README.md | 67 +++ api/core/agent/patterns/__init__.py | 19 + api/core/agent/patterns/base.py | 444 +++++++++++++++++ api/core/agent/patterns/function_call.py | 273 ++++++++++ api/core/agent/patterns/react.py | 402 +++++++++++++++ api/core/agent/patterns/strategy_factory.py | 107 ++++ .../advanced_chat/generate_task_pipeline.py | 177 ++++++- api/core/app/apps/agent_chat/app_runner.py | 24 +- .../common/workflow_response_converter.py | 2 +- .../apps/workflow/generate_task_pipeline.py | 38 +- api/core/app/apps/workflow_app_runner.py | 9 + .../app/entities/llm_generation_entities.py | 69 +++ api/core/app/entities/queue_entities.py | 31 ++ api/core/app/entities/task_entities.py | 55 ++- .../easy_ui_based_generate_task_pipeline.py | 83 +++- .../task_pipeline/message_cycle_manager.py | 27 +- ...hemy_workflow_node_execution_repository.py | 89 ++++ api/core/tools/__base/tool.py | 55 +++ api/core/workflow/enums.py | 1 + .../response_coordinator/coordinator.py | 167 +++++-- api/core/workflow/graph_events/__init__.py | 2 + api/core/workflow/graph_events/node.py | 27 +- api/core/workflow/node_events/__init__.py | 8 + api/core/workflow/node_events/node.py | 40 +- api/core/workflow/nodes/base/node.py | 56 +++ api/core/workflow/nodes/llm/__init__.py | 2 + api/core/workflow/nodes/llm/entities.py | 28 ++ api/core/workflow/nodes/llm/llm_utils.py | 92 ++++ api/core/workflow/nodes/llm/node.py | 465 +++++++++++++++++- api/fields/conversation_fields.py | 1 + api/fields/message_fields.py | 1 + api/fields/workflow_run_fields.py | 1 + api/models/__init__.py | 2 + api/models/model.py | 97 ++++ api/services/llm_generation_service.py | 131 +++++ api/services/workflow_run_service.py | 22 +- .../core/agent/patterns/__init__.py | 0 .../core/agent/patterns/test_base.py | 324 ++++++++++++ .../core/agent/patterns/test_function_call.py | 332 +++++++++++++ .../core/agent/patterns/test_react.py | 224 +++++++++ .../agent/patterns/test_strategy_factory.py | 203 ++++++++ .../core/agent/test_agent_app_runner.py | 388 +++++++++++++++ .../unit_tests/core/agent/test_entities.py | 191 +++++++ .../graph_engine/test_response_coordinator.py | 169 +++++++ .../node_events/test_stream_chunk_events.py | 336 +++++++++++++ web/app/components/workflow/constants.ts | 4 + .../nodes/agent/components/tool-icon.tsx | 6 +- .../nodes/llm/components/tools-config.tsx | 58 +++ .../workflow/nodes/llm/constants.ts | 41 ++ .../components/workflow/nodes/llm/default.ts | 1 + .../components/workflow/nodes/llm/node.tsx | 41 +- .../components/workflow/nodes/llm/panel.tsx | 28 ++ .../components/workflow/nodes/llm/types.ts | 2 + .../workflow/nodes/llm/use-config.ts | 100 +++- .../run/agent-log/agent-log-trigger.tsx | 11 +- web/app/components/workflow/run/node.tsx | 3 +- .../components/workflow/run/result-panel.tsx | 3 +- .../run/utils/format-log/agent/index.ts | 2 +- web/i18n/en-US/workflow.ts | 4 + web/i18n/zh-Hans/workflow.ts | 4 + 71 files changed, 5945 insertions(+), 1213 deletions(-) create mode 100644 api/core/agent/agent_app_runner.py delete mode 100644 api/core/agent/cot_agent_runner.py delete mode 100644 api/core/agent/cot_chat_agent_runner.py delete mode 100644 api/core/agent/cot_completion_agent_runner.py delete mode 100644 api/core/agent/fc_agent_runner.py create mode 100644 api/core/agent/patterns/README.md create mode 100644 api/core/agent/patterns/__init__.py create mode 100644 api/core/agent/patterns/base.py create mode 100644 api/core/agent/patterns/function_call.py create mode 100644 api/core/agent/patterns/react.py create mode 100644 api/core/agent/patterns/strategy_factory.py create mode 100644 api/core/app/entities/llm_generation_entities.py create mode 100644 api/services/llm_generation_service.py create mode 100644 api/tests/unit_tests/core/agent/patterns/__init__.py create mode 100644 api/tests/unit_tests/core/agent/patterns/test_base.py create mode 100644 api/tests/unit_tests/core/agent/patterns/test_function_call.py create mode 100644 api/tests/unit_tests/core/agent/patterns/test_react.py create mode 100644 api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py create mode 100644 api/tests/unit_tests/core/agent/test_agent_app_runner.py create mode 100644 api/tests/unit_tests/core/agent/test_entities.py create mode 100644 api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py create mode 100644 api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py create mode 100644 web/app/components/workflow/nodes/llm/components/tools-config.tsx create mode 100644 web/app/components/workflow/nodes/llm/constants.ts diff --git a/api/controllers/console/app/message.py b/api/controllers/console/app/message.py index 377297c84c..6b5b0d9eb3 100644 --- a/api/controllers/console/app/message.py +++ b/api/controllers/console/app/message.py @@ -201,6 +201,7 @@ message_detail_model = console_ns.model( "status": fields.String, "error": fields.String, "parent_message_id": fields.String, + "generation_detail": fields.Raw, }, ) diff --git a/api/controllers/console/app/workflow_run.py b/api/controllers/console/app/workflow_run.py index 8f1871f1e9..8360785d19 100644 --- a/api/controllers/console/app/workflow_run.py +++ b/api/controllers/console/app/workflow_run.py @@ -359,7 +359,6 @@ class WorkflowRunNodeExecutionListApi(Resource): @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) - @marshal_with(workflow_run_node_execution_list_model) def get(self, app_model: App, run_id): """ Get workflow run node execution list diff --git a/api/controllers/service_api/app/message.py b/api/controllers/service_api/app/message.py index b8e5ed28e4..e134253547 100644 --- a/api/controllers/service_api/app/message.py +++ b/api/controllers/service_api/app/message.py @@ -86,6 +86,7 @@ def build_message_model(api_or_ns: Api | Namespace): "agent_thoughts": fields.List(fields.Nested(agent_thought_model)), "status": fields.String, "error": fields.String, + "generation_detail": fields.Raw, } return api_or_ns.model("Message", message_fields) diff --git a/api/controllers/web/message.py b/api/controllers/web/message.py index 9f9aa4838c..afa935afa6 100644 --- a/api/controllers/web/message.py +++ b/api/controllers/web/message.py @@ -55,6 +55,7 @@ class MessageListApi(WebApiResource): "metadata": fields.Raw(attribute="message_metadata_dict"), "status": fields.String, "error": fields.String, + "generation_detail": fields.Raw, } message_infinite_scroll_pagination_fields = { diff --git a/api/core/agent/agent_app_runner.py b/api/core/agent/agent_app_runner.py new file mode 100644 index 0000000000..9be5be5c7c --- /dev/null +++ b/api/core/agent/agent_app_runner.py @@ -0,0 +1,358 @@ +import logging +from collections.abc import Generator +from copy import deepcopy +from typing import Any + +from core.agent.base_agent_runner import BaseAgentRunner +from core.agent.entities import AgentEntity, AgentLog, AgentResult +from core.agent.patterns.strategy_factory import StrategyFactory +from core.app.apps.base_app_queue_manager import PublishFrom +from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent +from core.file import file_manager +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMUsage, + PromptMessage, + PromptMessageContentType, + SystemPromptMessage, + TextPromptMessageContent, + UserPromptMessage, +) +from core.model_runtime.entities.message_entities import ImagePromptMessageContent, PromptMessageContentUnionTypes +from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform +from core.tools.__base.tool import Tool +from core.tools.entities.tool_entities import ToolInvokeMeta +from core.tools.tool_engine import ToolEngine +from models.model import Message + +logger = logging.getLogger(__name__) + + +class AgentAppRunner(BaseAgentRunner): + def _create_tool_invoke_hook(self, message: Message): + """ + Create a tool invoke hook that uses ToolEngine.agent_invoke. + This hook handles file creation and returns proper meta information. + """ + # Get trace manager from app generate entity + trace_manager = self.application_generate_entity.trace_manager + + def tool_invoke_hook( + tool: Tool, tool_args: dict[str, Any], tool_name: str + ) -> tuple[str, list[str], ToolInvokeMeta]: + """Hook that uses agent_invoke for proper file and meta handling.""" + tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke( + tool=tool, + tool_parameters=tool_args, + user_id=self.user_id, + tenant_id=self.tenant_id, + message=message, + invoke_from=self.application_generate_entity.invoke_from, + agent_tool_callback=self.agent_callback, + trace_manager=trace_manager, + app_id=self.application_generate_entity.app_config.app_id, + message_id=message.id, + conversation_id=self.conversation.id, + ) + + # Publish files and track IDs + for message_file_id in message_files: + self.queue_manager.publish( + QueueMessageFileEvent(message_file_id=message_file_id), + PublishFrom.APPLICATION_MANAGER, + ) + self._current_message_file_ids.append(message_file_id) + + return tool_invoke_response, message_files, tool_invoke_meta + + return tool_invoke_hook + + def run(self, message: Message, query: str, **kwargs: Any) -> Generator[LLMResultChunk, None, None]: + """ + Run Agent application + """ + self.query = query + app_generate_entity = self.application_generate_entity + + app_config = self.app_config + assert app_config is not None, "app_config is required" + assert app_config.agent is not None, "app_config.agent is required" + + # convert tools into ModelRuntime Tool format + tool_instances, _ = self._init_prompt_tools() + + assert app_config.agent + + # Create tool invoke hook for agent_invoke + tool_invoke_hook = self._create_tool_invoke_hook(message) + + # Get instruction for ReAct strategy + instruction = self.app_config.prompt_template.simple_prompt_template or "" + + # Use factory to create appropriate strategy + strategy = StrategyFactory.create_strategy( + model_features=self.model_features, + model_instance=self.model_instance, + tools=list(tool_instances.values()), + files=list(self.files), + max_iterations=app_config.agent.max_iteration, + context=self.build_execution_context(), + agent_strategy=self.config.strategy, + tool_invoke_hook=tool_invoke_hook, + instruction=instruction, + ) + + # Initialize state variables + current_agent_thought_id = None + has_published_thought = False + current_tool_name: str | None = None + self._current_message_file_ids = [] + + # organize prompt messages + prompt_messages = self._organize_prompt_messages() + + # Run strategy + generator = strategy.run( + prompt_messages=prompt_messages, + model_parameters=app_generate_entity.model_conf.parameters, + stop=app_generate_entity.model_conf.stop, + stream=True, + ) + + # Consume generator and collect result + result: AgentResult | None = None + try: + while True: + try: + output = next(generator) + except StopIteration as e: + # Generator finished, get the return value + result = e.value + break + + if isinstance(output, LLMResultChunk): + # Handle LLM chunk + if current_agent_thought_id and not has_published_thought: + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + has_published_thought = True + + yield output + + elif isinstance(output, AgentLog): + # Handle Agent Log using log_type for type-safe dispatch + if output.status == AgentLog.LogStatus.START: + if output.log_type == AgentLog.LogType.ROUND: + # Start of a new round + message_file_ids: list[str] = [] + current_agent_thought_id = self.create_agent_thought( + message_id=message.id, + message="", + tool_name="", + tool_input="", + messages_ids=message_file_ids, + ) + has_published_thought = False + + elif output.log_type == AgentLog.LogType.TOOL_CALL: + if current_agent_thought_id is None: + continue + + # Tool call start - extract data from structured fields + current_tool_name = output.data.get("tool_name", "") + tool_input = output.data.get("tool_args", {}) + + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=current_tool_name, + tool_input=tool_input, + thought=None, + observation=None, + tool_invoke_meta=None, + answer=None, + messages_ids=[], + ) + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + elif output.status == AgentLog.LogStatus.SUCCESS: + if output.log_type == AgentLog.LogType.THOUGHT: + pass + + elif output.log_type == AgentLog.LogType.TOOL_CALL: + if current_agent_thought_id is None: + continue + + # Tool call finished + tool_output = output.data.get("output") + # Get meta from strategy output (now properly populated) + tool_meta = output.data.get("meta") + + # Wrap tool_meta with tool_name as key (required by agent_service) + if tool_meta and current_tool_name: + tool_meta = {current_tool_name: tool_meta} + + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=None, + tool_input=None, + thought=None, + observation=tool_output, + tool_invoke_meta=tool_meta, + answer=None, + messages_ids=self._current_message_file_ids, + ) + # Clear message file ids after saving + self._current_message_file_ids = [] + current_tool_name = None + + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + elif output.log_type == AgentLog.LogType.ROUND: + if current_agent_thought_id is None: + continue + + # Round finished - save LLM usage and answer + llm_usage = output.metadata.get(AgentLog.LogMetadata.LLM_USAGE) + llm_result = output.data.get("llm_result") + final_answer = output.data.get("final_answer") + + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=None, + tool_input=None, + thought=llm_result, + observation=None, + tool_invoke_meta=None, + answer=final_answer, + messages_ids=[], + llm_usage=llm_usage, + ) + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) + + except Exception: + # Re-raise any other exceptions + raise + + # Process final result + if isinstance(result, AgentResult): + final_answer = result.text + usage = result.usage or LLMUsage.empty_usage() + + # Publish end event + self.queue_manager.publish( + QueueMessageEndEvent( + llm_result=LLMResult( + model=self.model_instance.model, + prompt_messages=prompt_messages, + message=AssistantPromptMessage(content=final_answer), + usage=usage, + system_fingerprint="", + ) + ), + PublishFrom.APPLICATION_MANAGER, + ) + + def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + Initialize system message + """ + if not prompt_messages and prompt_template: + return [ + SystemPromptMessage(content=prompt_template), + ] + + if prompt_messages and not isinstance(prompt_messages[0], SystemPromptMessage) and prompt_template: + prompt_messages.insert(0, SystemPromptMessage(content=prompt_template)) + + return prompt_messages or [] + + def _organize_user_query(self, query: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + Organize user query + """ + if self.files: + # get image detail config + image_detail_config = ( + self.application_generate_entity.file_upload_config.image_config.detail + if ( + self.application_generate_entity.file_upload_config + and self.application_generate_entity.file_upload_config.image_config + ) + else None + ) + image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW + + prompt_message_contents: list[PromptMessageContentUnionTypes] = [] + for file in self.files: + prompt_message_contents.append( + file_manager.to_prompt_message_content( + file, + image_detail_config=image_detail_config, + ) + ) + prompt_message_contents.append(TextPromptMessageContent(data=query)) + + prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) + else: + prompt_messages.append(UserPromptMessage(content=query)) + + return prompt_messages + + def _clear_user_prompt_image_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: + """ + As for now, gpt supports both fc and vision at the first iteration. + We need to remove the image messages from the prompt messages at the first iteration. + """ + prompt_messages = deepcopy(prompt_messages) + + for prompt_message in prompt_messages: + if isinstance(prompt_message, UserPromptMessage): + if isinstance(prompt_message.content, list): + prompt_message.content = "\n".join( + [ + content.data + if content.type == PromptMessageContentType.TEXT + else "[image]" + if content.type == PromptMessageContentType.IMAGE + else "[file]" + for content in prompt_message.content + ] + ) + + return prompt_messages + + def _organize_prompt_messages(self): + # For ReAct strategy, use the agent prompt template + if self.config.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT and self.config.prompt: + prompt_template = self.config.prompt.first_prompt + else: + prompt_template = self.app_config.prompt_template.simple_prompt_template or "" + + self.history_prompt_messages = self._init_system_message(prompt_template, self.history_prompt_messages) + query_prompt_messages = self._organize_user_query(self.query or "", []) + + self.history_prompt_messages = AgentHistoryPromptTransform( + model_config=self.model_config, + prompt_messages=[*query_prompt_messages, *self._current_thoughts], + history_messages=self.history_prompt_messages, + memory=self.memory, + ).get_prompt() + + prompt_messages = [*self.history_prompt_messages, *query_prompt_messages, *self._current_thoughts] + if len(self._current_thoughts) != 0: + # clear messages after the first iteration + prompt_messages = self._clear_user_prompt_image_messages(prompt_messages) + return prompt_messages diff --git a/api/core/agent/base_agent_runner.py b/api/core/agent/base_agent_runner.py index c196dbbdf1..b59a9a3859 100644 --- a/api/core/agent/base_agent_runner.py +++ b/api/core/agent/base_agent_runner.py @@ -5,7 +5,7 @@ from typing import Union, cast from sqlalchemy import select -from core.agent.entities import AgentEntity, AgentToolEntity +from core.agent.entities import AgentEntity, AgentToolEntity, ExecutionContext from core.app.app_config.features.file_upload.manager import FileUploadConfigManager from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig from core.app.apps.base_app_queue_manager import AppQueueManager @@ -114,9 +114,20 @@ class BaseAgentRunner(AppRunner): features = model_schema.features if model_schema and model_schema.features else [] self.stream_tool_call = ModelFeature.STREAM_TOOL_CALL in features self.files = application_generate_entity.files if ModelFeature.VISION in features else [] + self.model_features = features self.query: str | None = "" self._current_thoughts: list[PromptMessage] = [] + def build_execution_context(self) -> ExecutionContext: + """Build execution context.""" + return ExecutionContext( + user_id=self.user_id, + app_id=self.app_config.app_id, + conversation_id=self.conversation.id, + message_id=self.message.id, + tenant_id=self.tenant_id, + ) + def _repack_app_generate_entity( self, app_generate_entity: AgentChatAppGenerateEntity ) -> AgentChatAppGenerateEntity: diff --git a/api/core/agent/cot_agent_runner.py b/api/core/agent/cot_agent_runner.py deleted file mode 100644 index b32e35d0ca..0000000000 --- a/api/core/agent/cot_agent_runner.py +++ /dev/null @@ -1,431 +0,0 @@ -import json -import logging -from abc import ABC, abstractmethod -from collections.abc import Generator, Mapping, Sequence -from typing import Any - -from core.agent.base_agent_runner import BaseAgentRunner -from core.agent.entities import AgentScratchpadUnit -from core.agent.output_parser.cot_output_parser import CotAgentOutputParser -from core.app.apps.base_app_queue_manager import PublishFrom -from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent -from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta, LLMUsage -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - PromptMessageTool, - ToolPromptMessage, - UserPromptMessage, -) -from core.ops.ops_trace_manager import TraceQueueManager -from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform -from core.tools.__base.tool import Tool -from core.tools.entities.tool_entities import ToolInvokeMeta -from core.tools.tool_engine import ToolEngine -from models.model import Message - -logger = logging.getLogger(__name__) - - -class CotAgentRunner(BaseAgentRunner, ABC): - _is_first_iteration = True - _ignore_observation_providers = ["wenxin"] - _historic_prompt_messages: list[PromptMessage] - _agent_scratchpad: list[AgentScratchpadUnit] - _instruction: str - _query: str - _prompt_messages_tools: Sequence[PromptMessageTool] - - def run( - self, - message: Message, - query: str, - inputs: Mapping[str, str], - ) -> Generator: - """ - Run Cot agent application - """ - - app_generate_entity = self.application_generate_entity - self._repack_app_generate_entity(app_generate_entity) - self._init_react_state(query) - - trace_manager = app_generate_entity.trace_manager - - # check model mode - if "Observation" not in app_generate_entity.model_conf.stop: - if app_generate_entity.model_conf.provider not in self._ignore_observation_providers: - app_generate_entity.model_conf.stop.append("Observation") - - app_config = self.app_config - assert app_config.agent - - # init instruction - inputs = inputs or {} - instruction = app_config.prompt_template.simple_prompt_template or "" - self._instruction = self._fill_in_inputs_from_external_data_tools(instruction, inputs) - - iteration_step = 1 - max_iteration_steps = min(app_config.agent.max_iteration, 99) + 1 - - # convert tools into ModelRuntime Tool format - tool_instances, prompt_messages_tools = self._init_prompt_tools() - self._prompt_messages_tools = prompt_messages_tools - - function_call_state = True - llm_usage: dict[str, LLMUsage | None] = {"usage": None} - final_answer = "" - prompt_messages: list = [] # Initialize prompt_messages - agent_thought_id = "" # Initialize agent_thought_id - - def increase_usage(final_llm_usage_dict: dict[str, LLMUsage | None], usage: LLMUsage): - if not final_llm_usage_dict["usage"]: - final_llm_usage_dict["usage"] = usage - else: - llm_usage = final_llm_usage_dict["usage"] - llm_usage.prompt_tokens += usage.prompt_tokens - llm_usage.completion_tokens += usage.completion_tokens - llm_usage.total_tokens += usage.total_tokens - llm_usage.prompt_price += usage.prompt_price - llm_usage.completion_price += usage.completion_price - llm_usage.total_price += usage.total_price - - model_instance = self.model_instance - - while function_call_state and iteration_step <= max_iteration_steps: - # continue to run until there is not any tool call - function_call_state = False - - if iteration_step == max_iteration_steps: - # the last iteration, remove all tools - self._prompt_messages_tools = [] - - message_file_ids: list[str] = [] - - agent_thought_id = self.create_agent_thought( - message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids - ) - - if iteration_step > 1: - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - # recalc llm max tokens - prompt_messages = self._organize_prompt_messages() - self.recalc_llm_max_tokens(self.model_config, prompt_messages) - # invoke model - chunks = model_instance.invoke_llm( - prompt_messages=prompt_messages, - model_parameters=app_generate_entity.model_conf.parameters, - tools=[], - stop=app_generate_entity.model_conf.stop, - stream=True, - user=self.user_id, - callbacks=[], - ) - - usage_dict: dict[str, LLMUsage | None] = {} - react_chunks = CotAgentOutputParser.handle_react_stream_output(chunks, usage_dict) - scratchpad = AgentScratchpadUnit( - agent_response="", - thought="", - action_str="", - observation="", - action=None, - ) - - # publish agent thought if it's first iteration - if iteration_step == 1: - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - for chunk in react_chunks: - if isinstance(chunk, AgentScratchpadUnit.Action): - action = chunk - # detect action - assert scratchpad.agent_response is not None - scratchpad.agent_response += json.dumps(chunk.model_dump()) - scratchpad.action_str = json.dumps(chunk.model_dump()) - scratchpad.action = action - else: - assert scratchpad.agent_response is not None - scratchpad.agent_response += chunk - assert scratchpad.thought is not None - scratchpad.thought += chunk - yield LLMResultChunk( - model=self.model_config.model, - prompt_messages=prompt_messages, - system_fingerprint="", - delta=LLMResultChunkDelta(index=0, message=AssistantPromptMessage(content=chunk), usage=None), - ) - - assert scratchpad.thought is not None - scratchpad.thought = scratchpad.thought.strip() or "I am thinking about how to help you" - self._agent_scratchpad.append(scratchpad) - - # get llm usage - if "usage" in usage_dict: - if usage_dict["usage"] is not None: - increase_usage(llm_usage, usage_dict["usage"]) - else: - usage_dict["usage"] = LLMUsage.empty_usage() - - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name=(scratchpad.action.action_name if scratchpad.action and not scratchpad.is_final() else ""), - tool_input={scratchpad.action.action_name: scratchpad.action.action_input} if scratchpad.action else {}, - tool_invoke_meta={}, - thought=scratchpad.thought or "", - observation="", - answer=scratchpad.agent_response or "", - messages_ids=[], - llm_usage=usage_dict["usage"], - ) - - if not scratchpad.is_final(): - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - if not scratchpad.action: - # failed to extract action, return final answer directly - final_answer = "" - else: - if scratchpad.action.action_name.lower() == "final answer": - # action is final answer, return final answer directly - try: - if isinstance(scratchpad.action.action_input, dict): - final_answer = json.dumps(scratchpad.action.action_input, ensure_ascii=False) - elif isinstance(scratchpad.action.action_input, str): - final_answer = scratchpad.action.action_input - else: - final_answer = f"{scratchpad.action.action_input}" - except TypeError: - final_answer = f"{scratchpad.action.action_input}" - else: - function_call_state = True - # action is tool call, invoke tool - tool_invoke_response, tool_invoke_meta = self._handle_invoke_action( - action=scratchpad.action, - tool_instances=tool_instances, - message_file_ids=message_file_ids, - trace_manager=trace_manager, - ) - scratchpad.observation = tool_invoke_response - scratchpad.agent_response = tool_invoke_response - - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name=scratchpad.action.action_name, - tool_input={scratchpad.action.action_name: scratchpad.action.action_input}, - thought=scratchpad.thought or "", - observation={scratchpad.action.action_name: tool_invoke_response}, - tool_invoke_meta={scratchpad.action.action_name: tool_invoke_meta.to_dict()}, - answer=scratchpad.agent_response, - messages_ids=message_file_ids, - llm_usage=usage_dict["usage"], - ) - - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - # update prompt tool message - for prompt_tool in self._prompt_messages_tools: - self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool) - - iteration_step += 1 - - yield LLMResultChunk( - model=model_instance.model, - prompt_messages=prompt_messages, - delta=LLMResultChunkDelta( - index=0, message=AssistantPromptMessage(content=final_answer), usage=llm_usage["usage"] - ), - system_fingerprint="", - ) - - # save agent thought - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name="", - tool_input={}, - tool_invoke_meta={}, - thought=final_answer, - observation={}, - answer=final_answer, - messages_ids=[], - ) - # publish end event - self.queue_manager.publish( - QueueMessageEndEvent( - llm_result=LLMResult( - model=model_instance.model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage(content=final_answer), - usage=llm_usage["usage"] or LLMUsage.empty_usage(), - system_fingerprint="", - ) - ), - PublishFrom.APPLICATION_MANAGER, - ) - - def _handle_invoke_action( - self, - action: AgentScratchpadUnit.Action, - tool_instances: Mapping[str, Tool], - message_file_ids: list[str], - trace_manager: TraceQueueManager | None = None, - ) -> tuple[str, ToolInvokeMeta]: - """ - handle invoke action - :param action: action - :param tool_instances: tool instances - :param message_file_ids: message file ids - :param trace_manager: trace manager - :return: observation, meta - """ - # action is tool call, invoke tool - tool_call_name = action.action_name - tool_call_args = action.action_input - tool_instance = tool_instances.get(tool_call_name) - - if not tool_instance: - answer = f"there is not a tool named {tool_call_name}" - return answer, ToolInvokeMeta.error_instance(answer) - - if isinstance(tool_call_args, str): - try: - tool_call_args = json.loads(tool_call_args) - except json.JSONDecodeError: - pass - - # invoke tool - tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke( - tool=tool_instance, - tool_parameters=tool_call_args, - user_id=self.user_id, - tenant_id=self.tenant_id, - message=self.message, - invoke_from=self.application_generate_entity.invoke_from, - agent_tool_callback=self.agent_callback, - trace_manager=trace_manager, - ) - - # publish files - for message_file_id in message_files: - # publish message file - self.queue_manager.publish( - QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER - ) - # add message file ids - message_file_ids.append(message_file_id) - - return tool_invoke_response, tool_invoke_meta - - def _convert_dict_to_action(self, action: dict) -> AgentScratchpadUnit.Action: - """ - convert dict to action - """ - return AgentScratchpadUnit.Action(action_name=action["action"], action_input=action["action_input"]) - - def _fill_in_inputs_from_external_data_tools(self, instruction: str, inputs: Mapping[str, Any]) -> str: - """ - fill in inputs from external data tools - """ - for key, value in inputs.items(): - try: - instruction = instruction.replace(f"{{{{{key}}}}}", str(value)) - except Exception: - continue - - return instruction - - def _init_react_state(self, query): - """ - init agent scratchpad - """ - self._query = query - self._agent_scratchpad = [] - self._historic_prompt_messages = self._organize_historic_prompt_messages() - - @abstractmethod - def _organize_prompt_messages(self) -> list[PromptMessage]: - """ - organize prompt messages - """ - - def _format_assistant_message(self, agent_scratchpad: list[AgentScratchpadUnit]) -> str: - """ - format assistant message - """ - message = "" - for scratchpad in agent_scratchpad: - if scratchpad.is_final(): - message += f"Final Answer: {scratchpad.agent_response}" - else: - message += f"Thought: {scratchpad.thought}\n\n" - if scratchpad.action_str: - message += f"Action: {scratchpad.action_str}\n\n" - if scratchpad.observation: - message += f"Observation: {scratchpad.observation}\n\n" - - return message - - def _organize_historic_prompt_messages( - self, current_session_messages: list[PromptMessage] | None = None - ) -> list[PromptMessage]: - """ - organize historic prompt messages - """ - result: list[PromptMessage] = [] - scratchpads: list[AgentScratchpadUnit] = [] - current_scratchpad: AgentScratchpadUnit | None = None - - for message in self.history_prompt_messages: - if isinstance(message, AssistantPromptMessage): - if not current_scratchpad: - assert isinstance(message.content, str) - current_scratchpad = AgentScratchpadUnit( - agent_response=message.content, - thought=message.content or "I am thinking about how to help you", - action_str="", - action=None, - observation=None, - ) - scratchpads.append(current_scratchpad) - if message.tool_calls: - try: - current_scratchpad.action = AgentScratchpadUnit.Action( - action_name=message.tool_calls[0].function.name, - action_input=json.loads(message.tool_calls[0].function.arguments), - ) - current_scratchpad.action_str = json.dumps(current_scratchpad.action.to_dict()) - except Exception: - logger.exception("Failed to parse tool call from assistant message") - elif isinstance(message, ToolPromptMessage): - if current_scratchpad: - assert isinstance(message.content, str) - current_scratchpad.observation = message.content - else: - raise NotImplementedError("expected str type") - elif isinstance(message, UserPromptMessage): - if scratchpads: - result.append(AssistantPromptMessage(content=self._format_assistant_message(scratchpads))) - scratchpads = [] - current_scratchpad = None - - result.append(message) - - if scratchpads: - result.append(AssistantPromptMessage(content=self._format_assistant_message(scratchpads))) - - historic_prompts = AgentHistoryPromptTransform( - model_config=self.model_config, - prompt_messages=current_session_messages or [], - history_messages=result, - memory=self.memory, - ).get_prompt() - return historic_prompts diff --git a/api/core/agent/cot_chat_agent_runner.py b/api/core/agent/cot_chat_agent_runner.py deleted file mode 100644 index 4d1d94eadc..0000000000 --- a/api/core/agent/cot_chat_agent_runner.py +++ /dev/null @@ -1,118 +0,0 @@ -import json - -from core.agent.cot_agent_runner import CotAgentRunner -from core.file import file_manager -from core.model_runtime.entities import ( - AssistantPromptMessage, - PromptMessage, - SystemPromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.entities.message_entities import ImagePromptMessageContent, PromptMessageContentUnionTypes -from core.model_runtime.utils.encoders import jsonable_encoder - - -class CotChatAgentRunner(CotAgentRunner): - def _organize_system_prompt(self) -> SystemPromptMessage: - """ - Organize system prompt - """ - assert self.app_config.agent - assert self.app_config.agent.prompt - - prompt_entity = self.app_config.agent.prompt - if not prompt_entity: - raise ValueError("Agent prompt configuration is not set") - first_prompt = prompt_entity.first_prompt - - system_prompt = ( - first_prompt.replace("{{instruction}}", self._instruction) - .replace("{{tools}}", json.dumps(jsonable_encoder(self._prompt_messages_tools))) - .replace("{{tool_names}}", ", ".join([tool.name for tool in self._prompt_messages_tools])) - ) - - return SystemPromptMessage(content=system_prompt) - - def _organize_user_query(self, query, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - Organize user query - """ - if self.files: - # get image detail config - image_detail_config = ( - self.application_generate_entity.file_upload_config.image_config.detail - if ( - self.application_generate_entity.file_upload_config - and self.application_generate_entity.file_upload_config.image_config - ) - else None - ) - image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW - - prompt_message_contents: list[PromptMessageContentUnionTypes] = [] - for file in self.files: - prompt_message_contents.append( - file_manager.to_prompt_message_content( - file, - image_detail_config=image_detail_config, - ) - ) - prompt_message_contents.append(TextPromptMessageContent(data=query)) - - prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) - else: - prompt_messages.append(UserPromptMessage(content=query)) - - return prompt_messages - - def _organize_prompt_messages(self) -> list[PromptMessage]: - """ - Organize - """ - # organize system prompt - system_message = self._organize_system_prompt() - - # organize current assistant messages - agent_scratchpad = self._agent_scratchpad - if not agent_scratchpad: - assistant_messages = [] - else: - assistant_message = AssistantPromptMessage(content="") - assistant_message.content = "" # FIXME: type check tell mypy that assistant_message.content is str - for unit in agent_scratchpad: - if unit.is_final(): - assert isinstance(assistant_message.content, str) - assistant_message.content += f"Final Answer: {unit.agent_response}" - else: - assert isinstance(assistant_message.content, str) - assistant_message.content += f"Thought: {unit.thought}\n\n" - if unit.action_str: - assistant_message.content += f"Action: {unit.action_str}\n\n" - if unit.observation: - assistant_message.content += f"Observation: {unit.observation}\n\n" - - assistant_messages = [assistant_message] - - # query messages - query_messages = self._organize_user_query(self._query, []) - - if assistant_messages: - # organize historic prompt messages - historic_messages = self._organize_historic_prompt_messages( - [system_message, *query_messages, *assistant_messages, UserPromptMessage(content="continue")] - ) - messages = [ - system_message, - *historic_messages, - *query_messages, - *assistant_messages, - UserPromptMessage(content="continue"), - ] - else: - # organize historic prompt messages - historic_messages = self._organize_historic_prompt_messages([system_message, *query_messages]) - messages = [system_message, *historic_messages, *query_messages] - - # join all messages - return messages diff --git a/api/core/agent/cot_completion_agent_runner.py b/api/core/agent/cot_completion_agent_runner.py deleted file mode 100644 index da9a001d84..0000000000 --- a/api/core/agent/cot_completion_agent_runner.py +++ /dev/null @@ -1,87 +0,0 @@ -import json - -from core.agent.cot_agent_runner import CotAgentRunner -from core.model_runtime.entities.message_entities import ( - AssistantPromptMessage, - PromptMessage, - TextPromptMessageContent, - UserPromptMessage, -) -from core.model_runtime.utils.encoders import jsonable_encoder - - -class CotCompletionAgentRunner(CotAgentRunner): - def _organize_instruction_prompt(self) -> str: - """ - Organize instruction prompt - """ - if self.app_config.agent is None: - raise ValueError("Agent configuration is not set") - prompt_entity = self.app_config.agent.prompt - if prompt_entity is None: - raise ValueError("prompt entity is not set") - first_prompt = prompt_entity.first_prompt - - system_prompt = ( - first_prompt.replace("{{instruction}}", self._instruction) - .replace("{{tools}}", json.dumps(jsonable_encoder(self._prompt_messages_tools))) - .replace("{{tool_names}}", ", ".join([tool.name for tool in self._prompt_messages_tools])) - ) - - return system_prompt - - def _organize_historic_prompt(self, current_session_messages: list[PromptMessage] | None = None) -> str: - """ - Organize historic prompt - """ - historic_prompt_messages = self._organize_historic_prompt_messages(current_session_messages) - historic_prompt = "" - - for message in historic_prompt_messages: - if isinstance(message, UserPromptMessage): - historic_prompt += f"Question: {message.content}\n\n" - elif isinstance(message, AssistantPromptMessage): - if isinstance(message.content, str): - historic_prompt += message.content + "\n\n" - elif isinstance(message.content, list): - for content in message.content: - if not isinstance(content, TextPromptMessageContent): - continue - historic_prompt += content.data - - return historic_prompt - - def _organize_prompt_messages(self) -> list[PromptMessage]: - """ - Organize prompt messages - """ - # organize system prompt - system_prompt = self._organize_instruction_prompt() - - # organize historic prompt messages - historic_prompt = self._organize_historic_prompt() - - # organize current assistant messages - agent_scratchpad = self._agent_scratchpad - assistant_prompt = "" - for unit in agent_scratchpad or []: - if unit.is_final(): - assistant_prompt += f"Final Answer: {unit.agent_response}" - else: - assistant_prompt += f"Thought: {unit.thought}\n\n" - if unit.action_str: - assistant_prompt += f"Action: {unit.action_str}\n\n" - if unit.observation: - assistant_prompt += f"Observation: {unit.observation}\n\n" - - # query messages - query_prompt = f"Question: {self._query}" - - # join all messages - prompt = ( - system_prompt.replace("{{historic_messages}}", historic_prompt) - .replace("{{agent_scratchpad}}", assistant_prompt) - .replace("{{query}}", query_prompt) - ) - - return [UserPromptMessage(content=prompt)] diff --git a/api/core/agent/entities.py b/api/core/agent/entities.py index 220feced1d..56319a14a3 100644 --- a/api/core/agent/entities.py +++ b/api/core/agent/entities.py @@ -1,3 +1,5 @@ +import uuid +from collections.abc import Mapping from enum import StrEnum from typing import Any, Union @@ -92,3 +94,94 @@ class AgentInvokeMessage(ToolInvokeMessage): """ pass + + +class ExecutionContext(BaseModel): + """Execution context containing trace and audit information. + + This context carries all the IDs and metadata that are not part of + the core business logic but needed for tracing, auditing, and + correlation purposes. + """ + + user_id: str | None = None + app_id: str | None = None + conversation_id: str | None = None + message_id: str | None = None + tenant_id: str | None = None + + @classmethod + def create_minimal(cls, user_id: str | None = None) -> "ExecutionContext": + """Create a minimal context with only essential fields.""" + return cls(user_id=user_id) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for passing to legacy code.""" + return { + "user_id": self.user_id, + "app_id": self.app_id, + "conversation_id": self.conversation_id, + "message_id": self.message_id, + "tenant_id": self.tenant_id, + } + + def with_updates(self, **kwargs) -> "ExecutionContext": + """Create a new context with updated fields.""" + data = self.to_dict() + data.update(kwargs) + + return ExecutionContext( + user_id=data.get("user_id"), + app_id=data.get("app_id"), + conversation_id=data.get("conversation_id"), + message_id=data.get("message_id"), + tenant_id=data.get("tenant_id"), + ) + + +class AgentLog(BaseModel): + """ + Agent Log. + """ + + class LogType(StrEnum): + """Type of agent log entry.""" + + ROUND = "round" # A complete iteration round + THOUGHT = "thought" # LLM thinking/reasoning + TOOL_CALL = "tool_call" # Tool invocation + + class LogMetadata(StrEnum): + STARTED_AT = "started_at" + FINISHED_AT = "finished_at" + ELAPSED_TIME = "elapsed_time" + TOTAL_PRICE = "total_price" + TOTAL_TOKENS = "total_tokens" + PROVIDER = "provider" + CURRENCY = "currency" + LLM_USAGE = "llm_usage" + + class LogStatus(StrEnum): + START = "start" + ERROR = "error" + SUCCESS = "success" + + id: str = Field(default_factory=lambda: str(uuid.uuid4()), description="The id of the log") + label: str = Field(..., description="The label of the log") + log_type: LogType = Field(..., description="The type of the log") + parent_id: str | None = Field(default=None, description="Leave empty for root log") + error: str | None = Field(default=None, description="The error message") + status: LogStatus = Field(..., description="The status of the log") + data: Mapping[str, Any] = Field(..., description="Detailed log data") + metadata: Mapping[LogMetadata, Any] = Field(default={}, description="The metadata of the log") + + +class AgentResult(BaseModel): + """ + Agent execution result. + """ + + text: str = Field(default="", description="The generated text") + files: list[Any] = Field(default_factory=list, description="Files produced during execution") + usage: Any | None = Field(default=None, description="LLM usage statistics") + finish_reason: str | None = Field(default=None, description="Reason for completion") diff --git a/api/core/agent/fc_agent_runner.py b/api/core/agent/fc_agent_runner.py deleted file mode 100644 index dcc1326b33..0000000000 --- a/api/core/agent/fc_agent_runner.py +++ /dev/null @@ -1,465 +0,0 @@ -import json -import logging -from collections.abc import Generator -from copy import deepcopy -from typing import Any, Union - -from core.agent.base_agent_runner import BaseAgentRunner -from core.app.apps.base_app_queue_manager import PublishFrom -from core.app.entities.queue_entities import QueueAgentThoughtEvent, QueueMessageEndEvent, QueueMessageFileEvent -from core.file import file_manager -from core.model_runtime.entities import ( - AssistantPromptMessage, - LLMResult, - LLMResultChunk, - LLMResultChunkDelta, - LLMUsage, - PromptMessage, - PromptMessageContentType, - SystemPromptMessage, - TextPromptMessageContent, - ToolPromptMessage, - UserPromptMessage, -) -from core.model_runtime.entities.message_entities import ImagePromptMessageContent, PromptMessageContentUnionTypes -from core.prompt.agent_history_prompt_transform import AgentHistoryPromptTransform -from core.tools.entities.tool_entities import ToolInvokeMeta -from core.tools.tool_engine import ToolEngine -from models.model import Message - -logger = logging.getLogger(__name__) - - -class FunctionCallAgentRunner(BaseAgentRunner): - def run(self, message: Message, query: str, **kwargs: Any) -> Generator[LLMResultChunk, None, None]: - """ - Run FunctionCall agent application - """ - self.query = query - app_generate_entity = self.application_generate_entity - - app_config = self.app_config - assert app_config is not None, "app_config is required" - assert app_config.agent is not None, "app_config.agent is required" - - # convert tools into ModelRuntime Tool format - tool_instances, prompt_messages_tools = self._init_prompt_tools() - - assert app_config.agent - - iteration_step = 1 - max_iteration_steps = min(app_config.agent.max_iteration, 99) + 1 - - # continue to run until there is not any tool call - function_call_state = True - llm_usage: dict[str, LLMUsage | None] = {"usage": None} - final_answer = "" - prompt_messages: list = [] # Initialize prompt_messages - - # get tracing instance - trace_manager = app_generate_entity.trace_manager - - def increase_usage(final_llm_usage_dict: dict[str, LLMUsage | None], usage: LLMUsage): - if not final_llm_usage_dict["usage"]: - final_llm_usage_dict["usage"] = usage - else: - llm_usage = final_llm_usage_dict["usage"] - llm_usage.prompt_tokens += usage.prompt_tokens - llm_usage.completion_tokens += usage.completion_tokens - llm_usage.total_tokens += usage.total_tokens - llm_usage.prompt_price += usage.prompt_price - llm_usage.completion_price += usage.completion_price - llm_usage.total_price += usage.total_price - - model_instance = self.model_instance - - while function_call_state and iteration_step <= max_iteration_steps: - function_call_state = False - - if iteration_step == max_iteration_steps: - # the last iteration, remove all tools - prompt_messages_tools = [] - - message_file_ids: list[str] = [] - agent_thought_id = self.create_agent_thought( - message_id=message.id, message="", tool_name="", tool_input="", messages_ids=message_file_ids - ) - - # recalc llm max tokens - prompt_messages = self._organize_prompt_messages() - self.recalc_llm_max_tokens(self.model_config, prompt_messages) - # invoke model - chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = model_instance.invoke_llm( - prompt_messages=prompt_messages, - model_parameters=app_generate_entity.model_conf.parameters, - tools=prompt_messages_tools, - stop=app_generate_entity.model_conf.stop, - stream=self.stream_tool_call, - user=self.user_id, - callbacks=[], - ) - - tool_calls: list[tuple[str, str, dict[str, Any]]] = [] - - # save full response - response = "" - - # save tool call names and inputs - tool_call_names = "" - tool_call_inputs = "" - - current_llm_usage = None - - if isinstance(chunks, Generator): - is_first_chunk = True - for chunk in chunks: - if is_first_chunk: - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - is_first_chunk = False - # check if there is any tool call - if self.check_tool_calls(chunk): - function_call_state = True - tool_calls.extend(self.extract_tool_calls(chunk) or []) - tool_call_names = ";".join([tool_call[1] for tool_call in tool_calls]) - try: - tool_call_inputs = json.dumps( - {tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False - ) - except TypeError: - # fallback: force ASCII to handle non-serializable objects - tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls}) - - if chunk.delta.message and chunk.delta.message.content: - if isinstance(chunk.delta.message.content, list): - for content in chunk.delta.message.content: - response += content.data - else: - response += str(chunk.delta.message.content) - - if chunk.delta.usage: - increase_usage(llm_usage, chunk.delta.usage) - current_llm_usage = chunk.delta.usage - - yield chunk - else: - result = chunks - # check if there is any tool call - if self.check_blocking_tool_calls(result): - function_call_state = True - tool_calls.extend(self.extract_blocking_tool_calls(result) or []) - tool_call_names = ";".join([tool_call[1] for tool_call in tool_calls]) - try: - tool_call_inputs = json.dumps( - {tool_call[1]: tool_call[2] for tool_call in tool_calls}, ensure_ascii=False - ) - except TypeError: - # fallback: force ASCII to handle non-serializable objects - tool_call_inputs = json.dumps({tool_call[1]: tool_call[2] for tool_call in tool_calls}) - - if result.usage: - increase_usage(llm_usage, result.usage) - current_llm_usage = result.usage - - if result.message and result.message.content: - if isinstance(result.message.content, list): - for content in result.message.content: - response += content.data - else: - response += str(result.message.content) - - if not result.message.content: - result.message.content = "" - - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - yield LLMResultChunk( - model=model_instance.model, - prompt_messages=result.prompt_messages, - system_fingerprint=result.system_fingerprint, - delta=LLMResultChunkDelta( - index=0, - message=result.message, - usage=result.usage, - ), - ) - - assistant_message = AssistantPromptMessage(content="", tool_calls=[]) - if tool_calls: - assistant_message.tool_calls = [ - AssistantPromptMessage.ToolCall( - id=tool_call[0], - type="function", - function=AssistantPromptMessage.ToolCall.ToolCallFunction( - name=tool_call[1], arguments=json.dumps(tool_call[2], ensure_ascii=False) - ), - ) - for tool_call in tool_calls - ] - else: - assistant_message.content = response - - self._current_thoughts.append(assistant_message) - - # save thought - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name=tool_call_names, - tool_input=tool_call_inputs, - thought=response, - tool_invoke_meta=None, - observation=None, - answer=response, - messages_ids=[], - llm_usage=current_llm_usage, - ) - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - final_answer += response + "\n" - - # call tools - tool_responses = [] - for tool_call_id, tool_call_name, tool_call_args in tool_calls: - tool_instance = tool_instances.get(tool_call_name) - if not tool_instance: - tool_response = { - "tool_call_id": tool_call_id, - "tool_call_name": tool_call_name, - "tool_response": f"there is not a tool named {tool_call_name}", - "meta": ToolInvokeMeta.error_instance(f"there is not a tool named {tool_call_name}").to_dict(), - } - else: - # invoke tool - tool_invoke_response, message_files, tool_invoke_meta = ToolEngine.agent_invoke( - tool=tool_instance, - tool_parameters=tool_call_args, - user_id=self.user_id, - tenant_id=self.tenant_id, - message=self.message, - invoke_from=self.application_generate_entity.invoke_from, - agent_tool_callback=self.agent_callback, - trace_manager=trace_manager, - app_id=self.application_generate_entity.app_config.app_id, - message_id=self.message.id, - conversation_id=self.conversation.id, - ) - # publish files - for message_file_id in message_files: - # publish message file - self.queue_manager.publish( - QueueMessageFileEvent(message_file_id=message_file_id), PublishFrom.APPLICATION_MANAGER - ) - # add message file ids - message_file_ids.append(message_file_id) - - tool_response = { - "tool_call_id": tool_call_id, - "tool_call_name": tool_call_name, - "tool_response": tool_invoke_response, - "meta": tool_invoke_meta.to_dict(), - } - - tool_responses.append(tool_response) - if tool_response["tool_response"] is not None: - self._current_thoughts.append( - ToolPromptMessage( - content=str(tool_response["tool_response"]), - tool_call_id=tool_call_id, - name=tool_call_name, - ) - ) - - if len(tool_responses) > 0: - # save agent thought - self.save_agent_thought( - agent_thought_id=agent_thought_id, - tool_name="", - tool_input="", - thought="", - tool_invoke_meta={ - tool_response["tool_call_name"]: tool_response["meta"] for tool_response in tool_responses - }, - observation={ - tool_response["tool_call_name"]: tool_response["tool_response"] - for tool_response in tool_responses - }, - answer="", - messages_ids=message_file_ids, - ) - self.queue_manager.publish( - QueueAgentThoughtEvent(agent_thought_id=agent_thought_id), PublishFrom.APPLICATION_MANAGER - ) - - # update prompt tool - for prompt_tool in prompt_messages_tools: - self.update_prompt_message_tool(tool_instances[prompt_tool.name], prompt_tool) - - iteration_step += 1 - - # publish end event - self.queue_manager.publish( - QueueMessageEndEvent( - llm_result=LLMResult( - model=model_instance.model, - prompt_messages=prompt_messages, - message=AssistantPromptMessage(content=final_answer), - usage=llm_usage["usage"] or LLMUsage.empty_usage(), - system_fingerprint="", - ) - ), - PublishFrom.APPLICATION_MANAGER, - ) - - def check_tool_calls(self, llm_result_chunk: LLMResultChunk) -> bool: - """ - Check if there is any tool call in llm result chunk - """ - if llm_result_chunk.delta.message.tool_calls: - return True - return False - - def check_blocking_tool_calls(self, llm_result: LLMResult) -> bool: - """ - Check if there is any blocking tool call in llm result - """ - if llm_result.message.tool_calls: - return True - return False - - def extract_tool_calls(self, llm_result_chunk: LLMResultChunk) -> list[tuple[str, str, dict[str, Any]]]: - """ - Extract tool calls from llm result chunk - - Returns: - List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)] - """ - tool_calls = [] - for prompt_message in llm_result_chunk.delta.message.tool_calls: - args = {} - if prompt_message.function.arguments != "": - args = json.loads(prompt_message.function.arguments) - - tool_calls.append( - ( - prompt_message.id, - prompt_message.function.name, - args, - ) - ) - - return tool_calls - - def extract_blocking_tool_calls(self, llm_result: LLMResult) -> list[tuple[str, str, dict[str, Any]]]: - """ - Extract blocking tool calls from llm result - - Returns: - List[Tuple[str, str, Dict[str, Any]]]: [(tool_call_id, tool_call_name, tool_call_args)] - """ - tool_calls = [] - for prompt_message in llm_result.message.tool_calls: - args = {} - if prompt_message.function.arguments != "": - args = json.loads(prompt_message.function.arguments) - - tool_calls.append( - ( - prompt_message.id, - prompt_message.function.name, - args, - ) - ) - - return tool_calls - - def _init_system_message(self, prompt_template: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - Initialize system message - """ - if not prompt_messages and prompt_template: - return [ - SystemPromptMessage(content=prompt_template), - ] - - if prompt_messages and not isinstance(prompt_messages[0], SystemPromptMessage) and prompt_template: - prompt_messages.insert(0, SystemPromptMessage(content=prompt_template)) - - return prompt_messages or [] - - def _organize_user_query(self, query: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - Organize user query - """ - if self.files: - # get image detail config - image_detail_config = ( - self.application_generate_entity.file_upload_config.image_config.detail - if ( - self.application_generate_entity.file_upload_config - and self.application_generate_entity.file_upload_config.image_config - ) - else None - ) - image_detail_config = image_detail_config or ImagePromptMessageContent.DETAIL.LOW - - prompt_message_contents: list[PromptMessageContentUnionTypes] = [] - for file in self.files: - prompt_message_contents.append( - file_manager.to_prompt_message_content( - file, - image_detail_config=image_detail_config, - ) - ) - prompt_message_contents.append(TextPromptMessageContent(data=query)) - - prompt_messages.append(UserPromptMessage(content=prompt_message_contents)) - else: - prompt_messages.append(UserPromptMessage(content=query)) - - return prompt_messages - - def _clear_user_prompt_image_messages(self, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: - """ - As for now, gpt supports both fc and vision at the first iteration. - We need to remove the image messages from the prompt messages at the first iteration. - """ - prompt_messages = deepcopy(prompt_messages) - - for prompt_message in prompt_messages: - if isinstance(prompt_message, UserPromptMessage): - if isinstance(prompt_message.content, list): - prompt_message.content = "\n".join( - [ - content.data - if content.type == PromptMessageContentType.TEXT - else "[image]" - if content.type == PromptMessageContentType.IMAGE - else "[file]" - for content in prompt_message.content - ] - ) - - return prompt_messages - - def _organize_prompt_messages(self): - prompt_template = self.app_config.prompt_template.simple_prompt_template or "" - self.history_prompt_messages = self._init_system_message(prompt_template, self.history_prompt_messages) - query_prompt_messages = self._organize_user_query(self.query or "", []) - - self.history_prompt_messages = AgentHistoryPromptTransform( - model_config=self.model_config, - prompt_messages=[*query_prompt_messages, *self._current_thoughts], - history_messages=self.history_prompt_messages, - memory=self.memory, - ).get_prompt() - - prompt_messages = [*self.history_prompt_messages, *query_prompt_messages, *self._current_thoughts] - if len(self._current_thoughts) != 0: - # clear messages after the first iteration - prompt_messages = self._clear_user_prompt_image_messages(prompt_messages) - return prompt_messages diff --git a/api/core/agent/patterns/README.md b/api/core/agent/patterns/README.md new file mode 100644 index 0000000000..f6437ba05a --- /dev/null +++ b/api/core/agent/patterns/README.md @@ -0,0 +1,67 @@ +# Agent Patterns + +A unified agent pattern module that provides common agent execution strategies for both Agent V2 nodes and Agent Applications in Dify. + +## Overview + +This module implements a strategy pattern for agent execution, automatically selecting the appropriate strategy based on model capabilities. It serves as the core engine for agent-based interactions across different components of the Dify platform. + +## Key Features + +### 1. Multiple Agent Strategies + +- **Function Call Strategy**: Leverages native function/tool calling capabilities of advanced LLMs (e.g., GPT-4, Claude) +- **ReAct Strategy**: Implements the ReAct (Reasoning + Acting) approach for models without native function calling support + +### 2. Automatic Strategy Selection + +The `StrategyFactory` intelligently selects the optimal strategy based on model features: + +- Models with `TOOL_CALL`, `MULTI_TOOL_CALL`, or `STREAM_TOOL_CALL` capabilities → Function Call Strategy +- Other models → ReAct Strategy + +### 3. Unified Interface + +- Common base class (`AgentPattern`) ensures consistent behavior across strategies +- Seamless integration with both workflow nodes and standalone agent applications +- Standardized input/output formats for easy consumption + +### 4. Advanced Capabilities + +- **Streaming Support**: Real-time response streaming for better user experience +- **File Handling**: Built-in support for processing and managing files during agent execution +- **Iteration Control**: Configurable maximum iterations with safety limits (capped at 99) +- **Tool Management**: Flexible tool integration supporting various tool types +- **Context Propagation**: Execution context for tracing, auditing, and debugging + +## Architecture + +``` +agent/patterns/ +├── base.py # Abstract base class defining the agent pattern interface +├── function_call.py # Implementation using native LLM function calling +├── react.py # Implementation using ReAct prompting approach +└── strategy_factory.py # Factory for automatic strategy selection +``` + +## Usage + +The module is designed to be used by: + +1. **Agent V2 Nodes**: In workflow orchestration for complex agent tasks +1. **Agent Applications**: For standalone conversational agents +1. **Custom Implementations**: As a foundation for building specialized agent behaviors + +## Integration Points + +- **Model Runtime**: Interfaces with Dify's model runtime for LLM interactions +- **Tool System**: Integrates with the tool framework for external capabilities +- **Memory Management**: Compatible with conversation memory systems +- **File Management**: Handles file inputs/outputs during agent execution + +## Benefits + +1. **Consistency**: Unified implementation reduces code duplication and maintenance overhead +1. **Flexibility**: Easy to extend with new strategies or customize existing ones +1. **Performance**: Optimized for each model's capabilities to ensure best performance +1. **Reliability**: Built-in safety mechanisms and error handling diff --git a/api/core/agent/patterns/__init__.py b/api/core/agent/patterns/__init__.py new file mode 100644 index 0000000000..8a3b125533 --- /dev/null +++ b/api/core/agent/patterns/__init__.py @@ -0,0 +1,19 @@ +"""Agent patterns module. + +This module provides different strategies for agent execution: +- FunctionCallStrategy: Uses native function/tool calling +- ReActStrategy: Uses ReAct (Reasoning + Acting) approach +- StrategyFactory: Factory for creating strategies based on model features +""" + +from .base import AgentPattern +from .function_call import FunctionCallStrategy +from .react import ReActStrategy +from .strategy_factory import StrategyFactory + +__all__ = [ + "AgentPattern", + "FunctionCallStrategy", + "ReActStrategy", + "StrategyFactory", +] diff --git a/api/core/agent/patterns/base.py b/api/core/agent/patterns/base.py new file mode 100644 index 0000000000..9f010bed6a --- /dev/null +++ b/api/core/agent/patterns/base.py @@ -0,0 +1,444 @@ +"""Base class for agent strategies.""" + +from __future__ import annotations + +import json +import re +import time +from abc import ABC, abstractmethod +from collections.abc import Callable, Generator +from typing import TYPE_CHECKING, Any + +from core.agent.entities import AgentLog, AgentResult, ExecutionContext +from core.file import File +from core.model_manager import ModelInstance +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMResultChunkDelta, + PromptMessage, + PromptMessageTool, +) +from core.model_runtime.entities.llm_entities import LLMUsage +from core.model_runtime.entities.message_entities import TextPromptMessageContent +from core.tools.entities.tool_entities import ToolInvokeMessage, ToolInvokeMeta + +if TYPE_CHECKING: + from core.tools.__base.tool import Tool + +# Type alias for tool invoke hook +# Returns: (response_content, message_file_ids, tool_invoke_meta) +ToolInvokeHook = Callable[["Tool", dict[str, Any], str], tuple[str, list[str], ToolInvokeMeta]] + + +class AgentPattern(ABC): + """Base class for agent execution strategies.""" + + def __init__( + self, + model_instance: ModelInstance, + tools: list[Tool], + context: ExecutionContext, + max_iterations: int = 10, + workflow_call_depth: int = 0, + files: list[File] = [], + tool_invoke_hook: ToolInvokeHook | None = None, + ): + """Initialize the agent strategy.""" + self.model_instance = model_instance + self.tools = tools + self.context = context + self.max_iterations = min(max_iterations, 99) # Cap at 99 iterations + self.workflow_call_depth = workflow_call_depth + self.files: list[File] = files + self.tool_invoke_hook = tool_invoke_hook + + @abstractmethod + def run( + self, + prompt_messages: list[PromptMessage], + model_parameters: dict[str, Any], + stop: list[str] = [], + stream: bool = True, + ) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]: + """Execute the agent strategy.""" + pass + + def _accumulate_usage(self, total_usage: dict[str, Any], delta_usage: LLMUsage) -> None: + """Accumulate LLM usage statistics.""" + if not total_usage.get("usage"): + # Create a copy to avoid modifying the original + total_usage["usage"] = LLMUsage( + prompt_tokens=delta_usage.prompt_tokens, + prompt_unit_price=delta_usage.prompt_unit_price, + prompt_price_unit=delta_usage.prompt_price_unit, + prompt_price=delta_usage.prompt_price, + completion_tokens=delta_usage.completion_tokens, + completion_unit_price=delta_usage.completion_unit_price, + completion_price_unit=delta_usage.completion_price_unit, + completion_price=delta_usage.completion_price, + total_tokens=delta_usage.total_tokens, + total_price=delta_usage.total_price, + currency=delta_usage.currency, + latency=delta_usage.latency, + ) + else: + current: LLMUsage = total_usage["usage"] + current.prompt_tokens += delta_usage.prompt_tokens + current.completion_tokens += delta_usage.completion_tokens + current.total_tokens += delta_usage.total_tokens + current.prompt_price += delta_usage.prompt_price + current.completion_price += delta_usage.completion_price + current.total_price += delta_usage.total_price + + def _extract_content(self, content: Any) -> str: + """Extract text content from message content.""" + if isinstance(content, list): + # Content items are PromptMessageContentUnionTypes + text_parts = [] + for c in content: + # Check if it's a TextPromptMessageContent (which has data attribute) + if isinstance(c, TextPromptMessageContent): + text_parts.append(c.data) + return "".join(text_parts) + return str(content) + + def _has_tool_calls(self, chunk: LLMResultChunk) -> bool: + """Check if chunk contains tool calls.""" + # LLMResultChunk always has delta attribute + return bool(chunk.delta.message and chunk.delta.message.tool_calls) + + def _has_tool_calls_result(self, result: LLMResult) -> bool: + """Check if result contains tool calls (non-streaming).""" + # LLMResult always has message attribute + return bool(result.message and result.message.tool_calls) + + def _extract_tool_calls(self, chunk: LLMResultChunk) -> list[tuple[str, str, dict[str, Any]]]: + """Extract tool calls from streaming chunk.""" + tool_calls: list[tuple[str, str, dict[str, Any]]] = [] + if chunk.delta.message and chunk.delta.message.tool_calls: + for tool_call in chunk.delta.message.tool_calls: + if tool_call.function: + try: + args = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {} + except json.JSONDecodeError: + args = {} + tool_calls.append((tool_call.id or "", tool_call.function.name, args)) + return tool_calls + + def _extract_tool_calls_result(self, result: LLMResult) -> list[tuple[str, str, dict[str, Any]]]: + """Extract tool calls from non-streaming result.""" + tool_calls = [] + if result.message and result.message.tool_calls: + for tool_call in result.message.tool_calls: + if tool_call.function: + try: + args = json.loads(tool_call.function.arguments) if tool_call.function.arguments else {} + except json.JSONDecodeError: + args = {} + tool_calls.append((tool_call.id or "", tool_call.function.name, args)) + return tool_calls + + def _extract_text_from_message(self, message: PromptMessage) -> str: + """Extract text content from a prompt message.""" + # PromptMessage always has content attribute + content = message.content + if isinstance(content, str): + return content + elif isinstance(content, list): + # Extract text from content list + text_parts = [] + for item in content: + if isinstance(item, TextPromptMessageContent): + text_parts.append(item.data) + return " ".join(text_parts) + return "" + + def _create_log( + self, + label: str, + log_type: AgentLog.LogType, + status: AgentLog.LogStatus, + data: dict[str, Any] | None = None, + parent_id: str | None = None, + extra_metadata: dict[AgentLog.LogMetadata, Any] | None = None, + ) -> AgentLog: + """Create a new AgentLog with standard metadata.""" + metadata = { + AgentLog.LogMetadata.STARTED_AT: time.perf_counter(), + } + if extra_metadata: + metadata.update(extra_metadata) + + return AgentLog( + label=label, + log_type=log_type, + status=status, + data=data or {}, + parent_id=parent_id, + metadata=metadata, + ) + + def _finish_log( + self, + log: AgentLog, + data: dict[str, Any] | None = None, + usage: LLMUsage | None = None, + ) -> AgentLog: + """Finish an AgentLog by updating its status and metadata.""" + log.status = AgentLog.LogStatus.SUCCESS + + if data is not None: + log.data = data + + # Calculate elapsed time + started_at = log.metadata.get(AgentLog.LogMetadata.STARTED_AT, time.perf_counter()) + finished_at = time.perf_counter() + + # Update metadata + log.metadata = { + **log.metadata, + AgentLog.LogMetadata.FINISHED_AT: finished_at, + AgentLog.LogMetadata.ELAPSED_TIME: finished_at - started_at, + } + + # Add usage information if provided + if usage: + log.metadata.update( + { + AgentLog.LogMetadata.TOTAL_PRICE: usage.total_price, + AgentLog.LogMetadata.CURRENCY: usage.currency, + AgentLog.LogMetadata.TOTAL_TOKENS: usage.total_tokens, + AgentLog.LogMetadata.LLM_USAGE: usage, + } + ) + + return log + + def _replace_file_references(self, tool_args: dict[str, Any]) -> dict[str, Any]: + """ + Replace file references in tool arguments with actual File objects. + + Args: + tool_args: Dictionary of tool arguments + + Returns: + Updated tool arguments with file references replaced + """ + # Process each argument in the dictionary + processed_args: dict[str, Any] = {} + for key, value in tool_args.items(): + processed_args[key] = self._process_file_reference(value) + return processed_args + + def _process_file_reference(self, data: Any) -> Any: + """ + Recursively process data to replace file references. + Supports both single file [File: file_id] and multiple files [Files: file_id1, file_id2, ...]. + + Args: + data: The data to process (can be dict, list, str, or other types) + + Returns: + Processed data with file references replaced + """ + single_file_pattern = re.compile(r"^\[File:\s*([^\]]+)\]$") + multiple_files_pattern = re.compile(r"^\[Files:\s*([^\]]+)\]$") + + if isinstance(data, dict): + # Process dictionary recursively + return {key: self._process_file_reference(value) for key, value in data.items()} + elif isinstance(data, list): + # Process list recursively + return [self._process_file_reference(item) for item in data] + elif isinstance(data, str): + # Check for single file pattern [File: file_id] + single_match = single_file_pattern.match(data.strip()) + if single_match: + file_id = single_match.group(1).strip() + # Find the file in self.files + for file in self.files: + if file.id and str(file.id) == file_id: + return file + # If file not found, return original value + return data + + # Check for multiple files pattern [Files: file_id1, file_id2, ...] + multiple_match = multiple_files_pattern.match(data.strip()) + if multiple_match: + file_ids_str = multiple_match.group(1).strip() + # Split by comma and strip whitespace + file_ids = [fid.strip() for fid in file_ids_str.split(",")] + + # Find all matching files + matched_files: list[File] = [] + for file_id in file_ids: + for file in self.files: + if file.id and str(file.id) == file_id: + matched_files.append(file) + break + + # Return list of files if any were found, otherwise return original + return matched_files or data + + return data + else: + # Return other types as-is + return data + + def _create_text_chunk(self, text: str, prompt_messages: list[PromptMessage]) -> LLMResultChunk: + """Create a text chunk for streaming.""" + return LLMResultChunk( + model=self.model_instance.model, + prompt_messages=prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=AssistantPromptMessage(content=text), + usage=None, + ), + system_fingerprint="", + ) + + def _invoke_tool( + self, + tool_instance: Tool, + tool_args: dict[str, Any], + tool_name: str, + ) -> tuple[str, list[File], ToolInvokeMeta | None]: + """ + Invoke a tool and collect its response. + + Args: + tool_instance: The tool instance to invoke + tool_args: Tool arguments + tool_name: Name of the tool + + Returns: + Tuple of (response_content, tool_files, tool_invoke_meta) + """ + # Process tool_args to replace file references with actual File objects + tool_args = self._replace_file_references(tool_args) + + # If a tool invoke hook is set, use it instead of generic_invoke + if self.tool_invoke_hook: + response_content, _, tool_invoke_meta = self.tool_invoke_hook(tool_instance, tool_args, tool_name) + # Note: message_file_ids are stored in DB, we don't convert them to File objects here + # The caller (AgentAppRunner) handles file publishing + return response_content, [], tool_invoke_meta + + # Default: use generic_invoke for workflow scenarios + # Import here to avoid circular import + from core.tools.tool_engine import DifyWorkflowCallbackHandler, ToolEngine + + tool_response = ToolEngine().generic_invoke( + tool=tool_instance, + tool_parameters=tool_args, + user_id=self.context.user_id or "", + workflow_tool_callback=DifyWorkflowCallbackHandler(), + workflow_call_depth=self.workflow_call_depth, + app_id=self.context.app_id, + conversation_id=self.context.conversation_id, + message_id=self.context.message_id, + ) + + # Collect response and files + response_content = "" + tool_files: list[File] = [] + + for response in tool_response: + if response.type == ToolInvokeMessage.MessageType.TEXT: + assert isinstance(response.message, ToolInvokeMessage.TextMessage) + response_content += response.message.text + + elif response.type == ToolInvokeMessage.MessageType.LINK: + # Handle link messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + response_content += f"[Link: {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.IMAGE: + # Handle image URL messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + response_content += f"[Image: {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.IMAGE_LINK: + # Handle image link messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + response_content += f"[Image: {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.BINARY_LINK: + # Handle binary file link messages + if isinstance(response.message, ToolInvokeMessage.TextMessage): + filename = response.meta.get("filename", "file") if response.meta else "file" + response_content += f"[File: {filename} - {response.message.text}]" + + elif response.type == ToolInvokeMessage.MessageType.JSON: + # Handle JSON messages + if isinstance(response.message, ToolInvokeMessage.JsonMessage): + response_content += json.dumps(response.message.json_object, ensure_ascii=False, indent=2) + + elif response.type == ToolInvokeMessage.MessageType.BLOB: + # Handle blob messages - convert to text representation + if isinstance(response.message, ToolInvokeMessage.BlobMessage): + mime_type = ( + response.meta.get("mime_type", "application/octet-stream") + if response.meta + else "application/octet-stream" + ) + size = len(response.message.blob) + response_content += f"[Binary data: {mime_type}, size: {size} bytes]" + + elif response.type == ToolInvokeMessage.MessageType.VARIABLE: + # Handle variable messages + if isinstance(response.message, ToolInvokeMessage.VariableMessage): + var_name = response.message.variable_name + var_value = response.message.variable_value + if isinstance(var_value, str): + response_content += var_value + else: + response_content += f"[Variable {var_name}: {json.dumps(var_value, ensure_ascii=False)}]" + + elif response.type == ToolInvokeMessage.MessageType.BLOB_CHUNK: + # Handle blob chunk messages - these are parts of a larger blob + if isinstance(response.message, ToolInvokeMessage.BlobChunkMessage): + response_content += f"[Blob chunk {response.message.sequence}: {len(response.message.blob)} bytes]" + + elif response.type == ToolInvokeMessage.MessageType.RETRIEVER_RESOURCES: + # Handle retriever resources messages + if isinstance(response.message, ToolInvokeMessage.RetrieverResourceMessage): + response_content += response.message.context + + elif response.type == ToolInvokeMessage.MessageType.FILE: + # Extract file from meta + if response.meta and "file" in response.meta: + file = response.meta["file"] + if isinstance(file, File): + # Check if file is for model or tool output + if response.meta.get("target") == "self": + # File is for model - add to files for next prompt + self.files.append(file) + response_content += f"File '{file.filename}' has been loaded into your context." + else: + # File is tool output + tool_files.append(file) + + return response_content, tool_files, None + + def _find_tool_by_name(self, tool_name: str) -> Tool | None: + """Find a tool instance by its name.""" + for tool in self.tools: + if tool.entity.identity.name == tool_name: + return tool + return None + + def _convert_tools_to_prompt_format(self) -> list[PromptMessageTool]: + """Convert tools to prompt message format.""" + prompt_tools: list[PromptMessageTool] = [] + for tool in self.tools: + prompt_tools.append(tool.to_prompt_message_tool()) + return prompt_tools + + def _update_usage_with_empty(self, llm_usage: dict[str, Any]) -> None: + """Initialize usage tracking with empty usage if not set.""" + if "usage" not in llm_usage or llm_usage["usage"] is None: + llm_usage["usage"] = LLMUsage.empty_usage() diff --git a/api/core/agent/patterns/function_call.py b/api/core/agent/patterns/function_call.py new file mode 100644 index 0000000000..2c8664c419 --- /dev/null +++ b/api/core/agent/patterns/function_call.py @@ -0,0 +1,273 @@ +"""Function Call strategy implementation.""" + +import json +from collections.abc import Generator +from typing import Any, Union + +from core.agent.entities import AgentLog, AgentResult +from core.file import File +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMResultChunkDelta, + LLMUsage, + PromptMessage, + PromptMessageTool, + ToolPromptMessage, +) +from core.tools.entities.tool_entities import ToolInvokeMeta + +from .base import AgentPattern + + +class FunctionCallStrategy(AgentPattern): + """Function Call strategy using model's native tool calling capability.""" + + def run( + self, + prompt_messages: list[PromptMessage], + model_parameters: dict[str, Any], + stop: list[str] = [], + stream: bool = True, + ) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]: + """Execute the function call agent strategy.""" + # Convert tools to prompt format + prompt_tools: list[PromptMessageTool] = self._convert_tools_to_prompt_format() + + # Initialize tracking + iteration_step: int = 1 + max_iterations: int = self.max_iterations + 1 + function_call_state: bool = True + total_usage: dict[str, LLMUsage | None] = {"usage": None} + messages: list[PromptMessage] = list(prompt_messages) # Create mutable copy + final_text: str = "" + finish_reason: str | None = None + output_files: list[File] = [] # Track files produced by tools + + while function_call_state and iteration_step <= max_iterations: + function_call_state = False + round_log = self._create_log( + label=f"ROUND {iteration_step}", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"round_index": iteration_step}, + ) + yield round_log + # On last iteration, remove tools to force final answer + current_tools: list[PromptMessageTool] = [] if iteration_step == max_iterations else prompt_tools + model_log = self._create_log( + label=f"{self.model_instance.model} Thought", + log_type=AgentLog.LogType.THOUGHT, + status=AgentLog.LogStatus.START, + data={}, + parent_id=round_log.id, + extra_metadata={ + AgentLog.LogMetadata.PROVIDER: self.model_instance.provider, + }, + ) + yield model_log + + # Track usage for this round only + round_usage: dict[str, LLMUsage | None] = {"usage": None} + + # Invoke model + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = self.model_instance.invoke_llm( + prompt_messages=messages, + model_parameters=model_parameters, + tools=current_tools, + stop=stop, + stream=stream, + user=self.context.user_id, + callbacks=[], + ) + + # Process response + tool_calls, response_content, chunk_finish_reason = yield from self._handle_chunks( + chunks, round_usage, model_log + ) + messages.append(self._create_assistant_message(response_content, tool_calls)) + + # Accumulate to total usage + round_usage_value = round_usage.get("usage") + if round_usage_value: + self._accumulate_usage(total_usage, round_usage_value) + + # Update final text if no tool calls (this is likely the final answer) + if not tool_calls: + final_text = response_content + + # Update finish reason + if chunk_finish_reason: + finish_reason = chunk_finish_reason + + # Process tool calls + tool_outputs: dict[str, str] = {} + if tool_calls: + function_call_state = True + # Execute tools + for tool_call_id, tool_name, tool_args in tool_calls: + tool_response, tool_files, _ = yield from self._handle_tool_call( + tool_name, tool_args, tool_call_id, messages, round_log + ) + tool_outputs[tool_name] = tool_response + # Track files produced by tools + output_files.extend(tool_files) + yield self._finish_log( + round_log, + data={ + "llm_result": response_content, + "tool_calls": [ + {"name": tc[1], "args": tc[2], "output": tool_outputs.get(tc[1], "")} for tc in tool_calls + ] + if tool_calls + else [], + "final_answer": final_text if not function_call_state else None, + }, + usage=round_usage.get("usage"), + ) + iteration_step += 1 + + # Return final result + from core.agent.entities import AgentResult + + return AgentResult( + text=final_text, + files=output_files, + usage=total_usage.get("usage") or LLMUsage.empty_usage(), + finish_reason=finish_reason, + ) + + def _handle_chunks( + self, + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult], + llm_usage: dict[str, LLMUsage | None], + start_log: AgentLog, + ) -> Generator[ + LLMResultChunk | AgentLog, + None, + tuple[list[tuple[str, str, dict[str, Any]]], str, str | None], + ]: + """Handle LLM response chunks and extract tool calls and content. + + Returns a tuple of (tool_calls, response_content, finish_reason). + """ + tool_calls: list[tuple[str, str, dict[str, Any]]] = [] + response_content: str = "" + finish_reason: str | None = None + if isinstance(chunks, Generator): + # Streaming response + for chunk in chunks: + # Extract tool calls + if self._has_tool_calls(chunk): + tool_calls.extend(self._extract_tool_calls(chunk)) + + # Extract content + if chunk.delta.message and chunk.delta.message.content: + response_content += self._extract_content(chunk.delta.message.content) + + # Track usage + if chunk.delta.usage: + self._accumulate_usage(llm_usage, chunk.delta.usage) + + # Capture finish reason + if chunk.delta.finish_reason: + finish_reason = chunk.delta.finish_reason + + yield chunk + else: + # Non-streaming response + result: LLMResult = chunks + + if self._has_tool_calls_result(result): + tool_calls.extend(self._extract_tool_calls_result(result)) + + if result.message and result.message.content: + response_content += self._extract_content(result.message.content) + + if result.usage: + self._accumulate_usage(llm_usage, result.usage) + + # Convert to streaming format + yield LLMResultChunk( + model=result.model, + prompt_messages=result.prompt_messages, + delta=LLMResultChunkDelta(index=0, message=result.message, usage=result.usage), + ) + yield self._finish_log( + start_log, + data={ + "result": response_content, + }, + usage=llm_usage.get("usage"), + ) + return tool_calls, response_content, finish_reason + + def _create_assistant_message( + self, content: str, tool_calls: list[tuple[str, str, dict[str, Any]]] | None = None + ) -> AssistantPromptMessage: + """Create assistant message with tool calls.""" + if tool_calls is None: + return AssistantPromptMessage(content=content) + return AssistantPromptMessage( + content=content or "", + tool_calls=[ + AssistantPromptMessage.ToolCall( + id=tc[0], + type="function", + function=AssistantPromptMessage.ToolCall.ToolCallFunction(name=tc[1], arguments=json.dumps(tc[2])), + ) + for tc in tool_calls + ], + ) + + def _handle_tool_call( + self, + tool_name: str, + tool_args: dict[str, Any], + tool_call_id: str, + messages: list[PromptMessage], + round_log: AgentLog, + ) -> Generator[AgentLog, None, tuple[str, list[File], ToolInvokeMeta | None]]: + """Handle a single tool call and return response with files and meta.""" + # Find tool + tool_instance = self._find_tool_by_name(tool_name) + if not tool_instance: + raise ValueError(f"Tool {tool_name} not found") + + # Create tool call log + tool_call_log = self._create_log( + label=f"CALL {tool_name}", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={ + "tool_call_id": tool_call_id, + "tool_name": tool_name, + "tool_args": tool_args, + }, + parent_id=round_log.id, + ) + yield tool_call_log + + # Invoke tool using base class method + response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args, tool_name) + + yield self._finish_log( + tool_call_log, + data={ + **tool_call_log.data, + "output": response_content, + "files": len(tool_files), + "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, + }, + ) + final_content = response_content or "Tool executed successfully" + # Add tool response to messages + messages.append( + ToolPromptMessage( + content=final_content, + tool_call_id=tool_call_id, + name=tool_name, + ) + ) + return response_content, tool_files, tool_invoke_meta diff --git a/api/core/agent/patterns/react.py b/api/core/agent/patterns/react.py new file mode 100644 index 0000000000..46a0dbd61e --- /dev/null +++ b/api/core/agent/patterns/react.py @@ -0,0 +1,402 @@ +"""ReAct strategy implementation.""" + +from __future__ import annotations + +import json +from collections.abc import Generator +from typing import TYPE_CHECKING, Any, Union + +from core.agent.entities import AgentLog, AgentResult, AgentScratchpadUnit, ExecutionContext +from core.agent.output_parser.cot_output_parser import CotAgentOutputParser +from core.file import File +from core.model_manager import ModelInstance +from core.model_runtime.entities import ( + AssistantPromptMessage, + LLMResult, + LLMResultChunk, + LLMResultChunkDelta, + PromptMessage, + SystemPromptMessage, +) + +from .base import AgentPattern, ToolInvokeHook + +if TYPE_CHECKING: + from core.tools.__base.tool import Tool + + +class ReActStrategy(AgentPattern): + """ReAct strategy using reasoning and acting approach.""" + + def __init__( + self, + model_instance: ModelInstance, + tools: list[Tool], + context: ExecutionContext, + max_iterations: int = 10, + workflow_call_depth: int = 0, + files: list[File] = [], + tool_invoke_hook: ToolInvokeHook | None = None, + instruction: str = "", + ): + """Initialize the ReAct strategy with instruction support.""" + super().__init__( + model_instance=model_instance, + tools=tools, + context=context, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + files=files, + tool_invoke_hook=tool_invoke_hook, + ) + self.instruction = instruction + + def run( + self, + prompt_messages: list[PromptMessage], + model_parameters: dict[str, Any], + stop: list[str] = [], + stream: bool = True, + ) -> Generator[LLMResultChunk | AgentLog, None, AgentResult]: + """Execute the ReAct agent strategy.""" + # Initialize tracking + agent_scratchpad: list[AgentScratchpadUnit] = [] + iteration_step: int = 1 + max_iterations: int = self.max_iterations + 1 + react_state: bool = True + total_usage: dict[str, Any] = {"usage": None} + output_files: list[File] = [] # Track files produced by tools + final_text: str = "" + finish_reason: str | None = None + + # Add "Observation" to stop sequences + if "Observation" not in stop: + stop = stop.copy() + stop.append("Observation") + + while react_state and iteration_step <= max_iterations: + react_state = False + round_log = self._create_log( + label=f"ROUND {iteration_step}", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"round_index": iteration_step}, + ) + yield round_log + + # Build prompt with/without tools based on iteration + include_tools = iteration_step < max_iterations + current_messages = self._build_prompt_with_react_format( + prompt_messages, agent_scratchpad, include_tools, self.instruction + ) + + model_log = self._create_log( + label=f"{self.model_instance.model} Thought", + log_type=AgentLog.LogType.THOUGHT, + status=AgentLog.LogStatus.START, + data={}, + parent_id=round_log.id, + extra_metadata={ + AgentLog.LogMetadata.PROVIDER: self.model_instance.provider, + }, + ) + yield model_log + + # Track usage for this round only + round_usage: dict[str, Any] = {"usage": None} + + # Use current messages directly (files are handled by base class if needed) + messages_to_use = current_messages + + # Invoke model + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult] = self.model_instance.invoke_llm( + prompt_messages=messages_to_use, + model_parameters=model_parameters, + stop=stop, + stream=stream, + user=self.context.user_id or "", + callbacks=[], + ) + + # Process response + scratchpad, chunk_finish_reason = yield from self._handle_chunks( + chunks, round_usage, model_log, current_messages + ) + agent_scratchpad.append(scratchpad) + + # Accumulate to total usage + round_usage_value = round_usage.get("usage") + if round_usage_value: + self._accumulate_usage(total_usage, round_usage_value) + + # Update finish reason + if chunk_finish_reason: + finish_reason = chunk_finish_reason + + # Check if we have an action to execute + if scratchpad.action and scratchpad.action.action_name.lower() != "final answer": + react_state = True + # Execute tool + observation, tool_files = yield from self._handle_tool_call( + scratchpad.action, current_messages, round_log + ) + scratchpad.observation = observation + # Track files produced by tools + output_files.extend(tool_files) + + # Add observation to scratchpad for display + yield self._create_text_chunk(f"\nObservation: {observation}\n", current_messages) + else: + # Extract final answer + if scratchpad.action and scratchpad.action.action_input: + final_answer = scratchpad.action.action_input + if isinstance(final_answer, dict): + final_answer = json.dumps(final_answer, ensure_ascii=False) + final_text = str(final_answer) + elif scratchpad.thought: + # If no action but we have thought, use thought as final answer + final_text = scratchpad.thought + + yield self._finish_log( + round_log, + data={ + "thought": scratchpad.thought, + "action": scratchpad.action_str if scratchpad.action else None, + "observation": scratchpad.observation or None, + "final_answer": final_text if not react_state else None, + }, + usage=round_usage.get("usage"), + ) + iteration_step += 1 + + # Return final result + + from core.agent.entities import AgentResult + + return AgentResult( + text=final_text, files=output_files, usage=total_usage.get("usage"), finish_reason=finish_reason + ) + + def _build_prompt_with_react_format( + self, + original_messages: list[PromptMessage], + agent_scratchpad: list[AgentScratchpadUnit], + include_tools: bool = True, + instruction: str = "", + ) -> list[PromptMessage]: + """Build prompt messages with ReAct format.""" + # Copy messages to avoid modifying original + messages = list(original_messages) + + # Find and update the system prompt that should already exist + system_prompt_found = False + for i, msg in enumerate(messages): + if isinstance(msg, SystemPromptMessage): + system_prompt_found = True + # The system prompt from frontend already has the template, just replace placeholders + + # Format tools + tools_str = "" + tool_names = [] + if include_tools and self.tools: + # Convert tools to prompt message tools format + prompt_tools = [tool.to_prompt_message_tool() for tool in self.tools] + tool_names = [tool.name for tool in prompt_tools] + + # Format tools as JSON for comprehensive information + from core.model_runtime.utils.encoders import jsonable_encoder + + tools_str = json.dumps(jsonable_encoder(prompt_tools), indent=2) + tool_names_str = ", ".join(f'"{name}"' for name in tool_names) + else: + tools_str = "No tools available" + tool_names_str = "" + + # Replace placeholders in the existing system prompt + updated_content = msg.content + assert isinstance(updated_content, str) + updated_content = updated_content.replace("{{instruction}}", instruction) + updated_content = updated_content.replace("{{tools}}", tools_str) + updated_content = updated_content.replace("{{tool_names}}", tool_names_str) + + # Create new SystemPromptMessage with updated content + messages[i] = SystemPromptMessage(content=updated_content) + break + + # If no system prompt found, that's unexpected but add scratchpad anyway + if not system_prompt_found: + # This shouldn't happen if frontend is working correctly + pass + + # Format agent scratchpad + scratchpad_str = "" + if agent_scratchpad: + scratchpad_parts: list[str] = [] + for unit in agent_scratchpad: + if unit.thought: + scratchpad_parts.append(f"Thought: {unit.thought}") + if unit.action_str: + scratchpad_parts.append(f"Action:\n```\n{unit.action_str}\n```") + if unit.observation: + scratchpad_parts.append(f"Observation: {unit.observation}") + scratchpad_str = "\n".join(scratchpad_parts) + + # If there's a scratchpad, append it to the last message + if scratchpad_str: + messages.append(AssistantPromptMessage(content=scratchpad_str)) + + return messages + + def _handle_chunks( + self, + chunks: Union[Generator[LLMResultChunk, None, None], LLMResult], + llm_usage: dict[str, Any], + model_log: AgentLog, + current_messages: list[PromptMessage], + ) -> Generator[ + LLMResultChunk | AgentLog, + None, + tuple[AgentScratchpadUnit, str | None], + ]: + """Handle LLM response chunks and extract action/thought. + + Returns a tuple of (scratchpad_unit, finish_reason). + """ + usage_dict: dict[str, Any] = {} + + # Convert non-streaming to streaming format if needed + if isinstance(chunks, LLMResult): + # Create a generator from the LLMResult + def result_to_chunks() -> Generator[LLMResultChunk, None, None]: + yield LLMResultChunk( + model=chunks.model, + prompt_messages=chunks.prompt_messages, + delta=LLMResultChunkDelta( + index=0, + message=chunks.message, + usage=chunks.usage, + finish_reason=None, # LLMResult doesn't have finish_reason, only streaming chunks do + ), + system_fingerprint=chunks.system_fingerprint or "", + ) + + streaming_chunks = result_to_chunks() + else: + streaming_chunks = chunks + + react_chunks = CotAgentOutputParser.handle_react_stream_output(streaming_chunks, usage_dict) + + # Initialize scratchpad unit + scratchpad = AgentScratchpadUnit( + agent_response="", + thought="", + action_str="", + observation="", + action=None, + ) + + finish_reason: str | None = None + + # Process chunks + for chunk in react_chunks: + if isinstance(chunk, AgentScratchpadUnit.Action): + # Action detected + action_str = json.dumps(chunk.model_dump()) + scratchpad.agent_response = (scratchpad.agent_response or "") + action_str + scratchpad.action_str = action_str + scratchpad.action = chunk + + yield self._create_text_chunk(json.dumps(chunk.model_dump()), current_messages) + else: + # Text chunk + chunk_text = str(chunk) + scratchpad.agent_response = (scratchpad.agent_response or "") + chunk_text + scratchpad.thought = (scratchpad.thought or "") + chunk_text + + yield self._create_text_chunk(chunk_text, current_messages) + + # Update usage + if usage_dict.get("usage"): + if llm_usage.get("usage"): + self._accumulate_usage(llm_usage, usage_dict["usage"]) + else: + llm_usage["usage"] = usage_dict["usage"] + + # Clean up thought + scratchpad.thought = (scratchpad.thought or "").strip() or "I am thinking about how to help you" + + # Finish model log + yield self._finish_log( + model_log, + data={ + "thought": scratchpad.thought, + "action": scratchpad.action_str if scratchpad.action else None, + }, + usage=llm_usage.get("usage"), + ) + + return scratchpad, finish_reason + + def _handle_tool_call( + self, + action: AgentScratchpadUnit.Action, + prompt_messages: list[PromptMessage], + round_log: AgentLog, + ) -> Generator[AgentLog, None, tuple[str, list[File]]]: + """Handle tool call and return observation with files.""" + tool_name = action.action_name + tool_args: dict[str, Any] | str = action.action_input + + # Start tool log + tool_log = self._create_log( + label=f"CALL {tool_name}", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={ + "tool_name": tool_name, + "tool_args": tool_args, + }, + parent_id=round_log.id, + ) + yield tool_log + + # Find tool instance + tool_instance = self._find_tool_by_name(tool_name) + if not tool_instance: + # Finish tool log with error + yield self._finish_log( + tool_log, + data={ + **tool_log.data, + "error": f"Tool {tool_name} not found", + }, + ) + return f"Tool {tool_name} not found", [] + + # Ensure tool_args is a dict + tool_args_dict: dict[str, Any] + if isinstance(tool_args, str): + try: + tool_args_dict = json.loads(tool_args) + except json.JSONDecodeError: + tool_args_dict = {"input": tool_args} + elif not isinstance(tool_args, dict): + tool_args_dict = {"input": str(tool_args)} + else: + tool_args_dict = tool_args + + # Invoke tool using base class method + response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args_dict, tool_name) + + # Finish tool log + yield self._finish_log( + tool_log, + data={ + **tool_log.data, + "output": response_content, + "files": len(tool_files), + "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, + }, + ) + + return response_content or "Tool executed successfully", tool_files diff --git a/api/core/agent/patterns/strategy_factory.py b/api/core/agent/patterns/strategy_factory.py new file mode 100644 index 0000000000..ad26075291 --- /dev/null +++ b/api/core/agent/patterns/strategy_factory.py @@ -0,0 +1,107 @@ +"""Strategy factory for creating agent strategies.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from core.agent.entities import AgentEntity, ExecutionContext +from core.file.models import File +from core.model_manager import ModelInstance +from core.model_runtime.entities.model_entities import ModelFeature + +from .base import AgentPattern, ToolInvokeHook +from .function_call import FunctionCallStrategy +from .react import ReActStrategy + +if TYPE_CHECKING: + from core.tools.__base.tool import Tool + + +class StrategyFactory: + """Factory for creating agent strategies based on model features.""" + + # Tool calling related features + TOOL_CALL_FEATURES = {ModelFeature.TOOL_CALL, ModelFeature.MULTI_TOOL_CALL, ModelFeature.STREAM_TOOL_CALL} + + @staticmethod + def create_strategy( + model_features: list[ModelFeature], + model_instance: ModelInstance, + context: ExecutionContext, + tools: list[Tool], + files: list[File], + max_iterations: int = 10, + workflow_call_depth: int = 0, + agent_strategy: AgentEntity.Strategy | None = None, + tool_invoke_hook: ToolInvokeHook | None = None, + instruction: str = "", + ) -> AgentPattern: + """ + Create an appropriate strategy based on model features. + + Args: + model_features: List of model features/capabilities + model_instance: Model instance to use + context: Execution context containing trace/audit information + tools: Available tools + files: Available files + max_iterations: Maximum iterations for the strategy + workflow_call_depth: Depth of workflow calls + agent_strategy: Optional explicit strategy override + tool_invoke_hook: Optional hook for custom tool invocation (e.g., agent_invoke) + instruction: Optional instruction for ReAct strategy + + Returns: + AgentStrategy instance + """ + # If explicit strategy is provided and it's Function Calling, try to use it if supported + if agent_strategy == AgentEntity.Strategy.FUNCTION_CALLING: + if set(model_features) & StrategyFactory.TOOL_CALL_FEATURES: + return FunctionCallStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + ) + # Fallback to ReAct if FC is requested but not supported + + # If explicit strategy is Chain of Thought (ReAct) + if agent_strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT: + return ReActStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + instruction=instruction, + ) + + # Default auto-selection logic + if set(model_features) & StrategyFactory.TOOL_CALL_FEATURES: + # Model supports native function calling + return FunctionCallStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + ) + else: + # Use ReAct strategy for models without function calling + return ReActStrategy( + model_instance=model_instance, + context=context, + tools=tools, + files=files, + max_iterations=max_iterations, + workflow_call_depth=workflow_call_depth, + tool_invoke_hook=tool_invoke_hook, + instruction=instruction, + ) diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index b297f3ff20..8e920f369a 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -4,6 +4,7 @@ import re import time from collections.abc import Callable, Generator, Mapping from contextlib import contextmanager +from dataclasses import dataclass, field from threading import Thread from typing import Any, Union @@ -19,6 +20,7 @@ from core.app.entities.app_invoke_entities import ( InvokeFrom, ) from core.app.entities.queue_entities import ( + ChunkType, MessageQueueMessage, QueueAdvancedChatMessageEndEvent, QueueAgentLogEvent, @@ -71,13 +73,115 @@ from core.workflow.runtime import GraphRuntimeState from core.workflow.system_variable import SystemVariable from extensions.ext_database import db from libs.datetime_utils import naive_utc_now -from models import Account, Conversation, EndUser, Message, MessageFile +from models import Account, Conversation, EndUser, LLMGenerationDetail, Message, MessageFile from models.enums import CreatorUserRole from models.workflow import Workflow, WorkflowNodeExecutionModel logger = logging.getLogger(__name__) +@dataclass +class StreamEventBuffer: + """ + Buffer for recording stream events in order to reconstruct the generation sequence. + Records the exact order of text chunks, thoughts, and tool calls as they stream. + """ + + # Accumulated reasoning content (each thought block is a separate element) + reasoning_content: list[str] = field(default_factory=list) + # Current reasoning buffer (accumulates until we see a different event type) + _current_reasoning: str = "" + # Tool calls with their details + tool_calls: list[dict] = field(default_factory=list) + # Tool call ID to index mapping for updating results + _tool_call_id_map: dict[str, int] = field(default_factory=dict) + # Sequence of events in stream order + sequence: list[dict] = field(default_factory=list) + # Current position in answer text + _content_position: int = 0 + # Track last event type to detect transitions + _last_event_type: str | None = None + + def _flush_current_reasoning(self) -> None: + """Flush accumulated reasoning to the list and add to sequence.""" + if self._current_reasoning.strip(): + self.reasoning_content.append(self._current_reasoning.strip()) + self.sequence.append({"type": "reasoning", "index": len(self.reasoning_content) - 1}) + self._current_reasoning = "" + + def record_text_chunk(self, text: str) -> None: + """Record a text chunk event.""" + if not text: + return + + # Flush any pending reasoning first + if self._last_event_type == "thought": + self._flush_current_reasoning() + + text_len = len(text) + start_pos = self._content_position + + # If last event was also content, extend it; otherwise create new + if self.sequence and self.sequence[-1].get("type") == "content": + self.sequence[-1]["end"] = start_pos + text_len + else: + self.sequence.append({"type": "content", "start": start_pos, "end": start_pos + text_len}) + + self._content_position += text_len + self._last_event_type = "content" + + def record_thought_chunk(self, text: str) -> None: + """Record a thought/reasoning chunk event.""" + if not text: + return + + # Accumulate thought content + self._current_reasoning += text + self._last_event_type = "thought" + + def record_tool_call(self, tool_call_id: str, tool_name: str, tool_arguments: str) -> None: + """Record a tool call event.""" + # Flush any pending reasoning first + if self._last_event_type == "thought": + self._flush_current_reasoning() + + # Check if this tool call already exists (we might get multiple chunks) + if tool_call_id in self._tool_call_id_map: + idx = self._tool_call_id_map[tool_call_id] + # Update arguments if provided + if tool_arguments: + self.tool_calls[idx]["arguments"] = tool_arguments + else: + # New tool call + tool_call = { + "id": tool_call_id or "", + "name": tool_name or "", + "arguments": tool_arguments or "", + "result": "", + } + self.tool_calls.append(tool_call) + idx = len(self.tool_calls) - 1 + self._tool_call_id_map[tool_call_id] = idx + self.sequence.append({"type": "tool_call", "index": idx}) + + self._last_event_type = "tool_call" + + def record_tool_result(self, tool_call_id: str, result: str) -> None: + """Record a tool result event (update existing tool call).""" + if tool_call_id in self._tool_call_id_map: + idx = self._tool_call_id_map[tool_call_id] + self.tool_calls[idx]["result"] = result + + def finalize(self) -> None: + """Finalize the buffer, flushing any pending data.""" + if self._last_event_type == "thought": + self._flush_current_reasoning() + + def has_data(self) -> bool: + """Check if there's any meaningful data recorded.""" + return bool(self.reasoning_content or self.tool_calls or self.sequence) + + class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): """ AdvancedChatAppGenerateTaskPipeline is a class that generate stream output and state management for Application. @@ -145,6 +249,8 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): self._workflow_run_id: str = "" self._draft_var_saver_factory = draft_var_saver_factory self._graph_runtime_state: GraphRuntimeState | None = None + # Stream event buffer for recording generation sequence + self._stream_buffer = StreamEventBuffer() self._seed_graph_runtime_state_from_queue_manager() def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: @@ -384,7 +490,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): queue_message: Union[WorkflowQueueMessage, MessageQueueMessage] | None = None, **kwargs, ) -> Generator[StreamResponse, None, None]: - """Handle text chunk events.""" + """Handle text chunk events and record to stream buffer for sequence reconstruction.""" delta_text = event.text if delta_text is None: return @@ -406,9 +512,37 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): if tts_publisher and queue_message: tts_publisher.publish(queue_message) + # Record stream event based on chunk type + chunk_type = event.chunk_type or ChunkType.TEXT + match chunk_type: + case ChunkType.TEXT: + self._stream_buffer.record_text_chunk(delta_text) + case ChunkType.THOUGHT: + self._stream_buffer.record_thought_chunk(delta_text) + case ChunkType.TOOL_CALL: + self._stream_buffer.record_tool_call( + tool_call_id=event.tool_call_id or "", + tool_name=event.tool_name or "", + tool_arguments=event.tool_arguments or "", + ) + case ChunkType.TOOL_RESULT: + self._stream_buffer.record_tool_result( + tool_call_id=event.tool_call_id or "", + result=delta_text, + ) + self._task_state.answer += delta_text yield self._message_cycle_manager.message_to_stream_response( - answer=delta_text, message_id=self._message_id, from_variable_selector=event.from_variable_selector + answer=delta_text, + message_id=self._message_id, + from_variable_selector=event.from_variable_selector, + chunk_type=event.chunk_type.value if event.chunk_type else None, + tool_call_id=event.tool_call_id, + tool_name=event.tool_name, + tool_arguments=event.tool_arguments, + tool_files=event.tool_files, + tool_error=event.tool_error, + round_index=event.round_index, ) def _handle_iteration_start_event( @@ -842,6 +976,8 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): ] session.add_all(message_files) + # Save merged LLM generation detail from all LLM nodes + self._save_generation_detail(session=session, message=message) # Trigger MESSAGE_TRACE for tracing integrations if trace_manager: trace_manager.add_trace_task( @@ -850,6 +986,41 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): ) ) + def _save_generation_detail(self, *, session: Session, message: Message) -> None: + """ + Save LLM generation detail for Chatflow using stream event buffer. + The buffer records the exact order of events as they streamed, + allowing accurate reconstruction of the generation sequence. + """ + # Finalize the stream buffer to flush any pending data + self._stream_buffer.finalize() + + # Only save if there's meaningful data + if not self._stream_buffer.has_data(): + return + + reasoning_content = self._stream_buffer.reasoning_content + tool_calls = self._stream_buffer.tool_calls + sequence = self._stream_buffer.sequence + + # Check if generation detail already exists for this message + existing = session.query(LLMGenerationDetail).filter_by(message_id=message.id).first() + + if existing: + existing.reasoning_content = json.dumps(reasoning_content) if reasoning_content else None + existing.tool_calls = json.dumps(tool_calls) if tool_calls else None + existing.sequence = json.dumps(sequence) if sequence else None + else: + generation_detail = LLMGenerationDetail( + tenant_id=self._application_generate_entity.app_config.tenant_id, + app_id=self._application_generate_entity.app_config.app_id, + message_id=message.id, + reasoning_content=json.dumps(reasoning_content) if reasoning_content else None, + tool_calls=json.dumps(tool_calls) if tool_calls else None, + sequence=json.dumps(sequence) if sequence else None, + ) + session.add(generation_detail) + def _extract_model_info_from_workflow(self, session: Session, workflow_run_id: str) -> dict[str, str] | None: """ Extract model provider and model_id from workflow node executions. diff --git a/api/core/app/apps/agent_chat/app_runner.py b/api/core/app/apps/agent_chat/app_runner.py index 2760466a3b..f5cf7a2c56 100644 --- a/api/core/app/apps/agent_chat/app_runner.py +++ b/api/core/app/apps/agent_chat/app_runner.py @@ -3,10 +3,8 @@ from typing import cast from sqlalchemy import select -from core.agent.cot_chat_agent_runner import CotChatAgentRunner -from core.agent.cot_completion_agent_runner import CotCompletionAgentRunner +from core.agent.agent_app_runner import AgentAppRunner from core.agent.entities import AgentEntity -from core.agent.fc_agent_runner import FunctionCallAgentRunner from core.app.apps.agent_chat.app_config_manager import AgentChatAppConfig from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.apps.base_app_runner import AppRunner @@ -14,8 +12,7 @@ from core.app.entities.app_invoke_entities import AgentChatAppGenerateEntity from core.app.entities.queue_entities import QueueAnnotationReplyEvent from core.memory.token_buffer_memory import TokenBufferMemory from core.model_manager import ModelInstance -from core.model_runtime.entities.llm_entities import LLMMode -from core.model_runtime.entities.model_entities import ModelFeature, ModelPropertyKey +from core.model_runtime.entities.model_entities import ModelFeature from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel from core.moderation.base import ModerationError from extensions.ext_database import db @@ -194,22 +191,7 @@ class AgentChatAppRunner(AppRunner): raise ValueError("Message not found") db.session.close() - runner_cls: type[FunctionCallAgentRunner] | type[CotChatAgentRunner] | type[CotCompletionAgentRunner] - # start agent runner - if agent_entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT: - # check LLM mode - if model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.CHAT: - runner_cls = CotChatAgentRunner - elif model_schema.model_properties.get(ModelPropertyKey.MODE) == LLMMode.COMPLETION: - runner_cls = CotCompletionAgentRunner - else: - raise ValueError(f"Invalid LLM mode: {model_schema.model_properties.get(ModelPropertyKey.MODE)}") - elif agent_entity.strategy == AgentEntity.Strategy.FUNCTION_CALLING: - runner_cls = FunctionCallAgentRunner - else: - raise ValueError(f"Invalid agent strategy: {agent_entity.strategy}") - - runner = runner_cls( + runner = AgentAppRunner( tenant_id=app_config.tenant_id, application_generate_entity=application_generate_entity, conversation=conversation_result, diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index 38ecec5d30..0f3f9972c3 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -671,7 +671,7 @@ class WorkflowResponseConverter: task_id=task_id, data=AgentLogStreamResponse.Data( node_execution_id=event.node_execution_id, - id=event.id, + message_id=event.id, parent_id=event.parent_id, label=event.label, error=event.error, diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 842ad545ad..09ac24a413 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -13,6 +13,7 @@ from core.app.apps.common.workflow_response_converter import WorkflowResponseCon from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity from core.app.entities.queue_entities import ( AppQueueEvent, + ChunkType, MessageQueueMessage, QueueAgentLogEvent, QueueErrorEvent, @@ -487,7 +488,17 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): if tts_publisher and queue_message: tts_publisher.publish(queue_message) - yield self._text_chunk_to_stream_response(delta_text, from_variable_selector=event.from_variable_selector) + yield self._text_chunk_to_stream_response( + text=delta_text, + from_variable_selector=event.from_variable_selector, + chunk_type=event.chunk_type, + tool_call_id=event.tool_call_id, + tool_name=event.tool_name, + tool_arguments=event.tool_arguments, + tool_files=event.tool_files, + tool_error=event.tool_error, + round_index=event.round_index, + ) def _handle_agent_log_event(self, event: QueueAgentLogEvent, **kwargs) -> Generator[StreamResponse, None, None]: """Handle agent log events.""" @@ -650,16 +661,37 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): session.add(workflow_app_log) def _text_chunk_to_stream_response( - self, text: str, from_variable_selector: list[str] | None = None + self, + text: str, + from_variable_selector: list[str] | None = None, + chunk_type: ChunkType | None = None, + tool_call_id: str | None = None, + tool_name: str | None = None, + tool_arguments: str | None = None, + tool_files: list[str] | None = None, + tool_error: str | None = None, + round_index: int | None = None, ) -> TextChunkStreamResponse: """ Handle completed event. :param text: text :return: """ + from core.app.entities.task_entities import ChunkType as ResponseChunkType + response = TextChunkStreamResponse( task_id=self._application_generate_entity.task_id, - data=TextChunkStreamResponse.Data(text=text, from_variable_selector=from_variable_selector), + data=TextChunkStreamResponse.Data( + text=text, + from_variable_selector=from_variable_selector, + chunk_type=ResponseChunkType(chunk_type.value) if chunk_type else ResponseChunkType.TEXT, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + tool_files=tool_files or [], + tool_error=tool_error, + round_index=round_index, + ), ) return response diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 0e125b3538..3161956c9b 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -455,12 +455,21 @@ class WorkflowBasedAppRunner: ) ) elif isinstance(event, NodeRunStreamChunkEvent): + from core.app.entities.queue_entities import ChunkType as QueueChunkType + self._publish_event( QueueTextChunkEvent( text=event.chunk, from_variable_selector=list(event.selector), in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, + chunk_type=QueueChunkType(event.chunk_type.value), + tool_call_id=event.tool_call_id, + tool_name=event.tool_name, + tool_arguments=event.tool_arguments, + tool_files=event.tool_files, + tool_error=event.tool_error, + round_index=event.round_index, ) ) elif isinstance(event, NodeRunRetrieverResourceEvent): diff --git a/api/core/app/entities/llm_generation_entities.py b/api/core/app/entities/llm_generation_entities.py new file mode 100644 index 0000000000..4e278249fe --- /dev/null +++ b/api/core/app/entities/llm_generation_entities.py @@ -0,0 +1,69 @@ +""" +LLM Generation Detail entities. + +Defines the structure for storing and transmitting LLM generation details +including reasoning content, tool calls, and their sequence. +""" + +from typing import Literal + +from pydantic import BaseModel, Field + + +class ContentSegment(BaseModel): + """Represents a content segment in the generation sequence.""" + + type: Literal["content"] = "content" + start: int = Field(..., description="Start position in the text") + end: int = Field(..., description="End position in the text") + + +class ReasoningSegment(BaseModel): + """Represents a reasoning segment in the generation sequence.""" + + type: Literal["reasoning"] = "reasoning" + index: int = Field(..., description="Index into reasoning_content array") + + +class ToolCallSegment(BaseModel): + """Represents a tool call segment in the generation sequence.""" + + type: Literal["tool_call"] = "tool_call" + index: int = Field(..., description="Index into tool_calls array") + + +SequenceSegment = ContentSegment | ReasoningSegment | ToolCallSegment + + +class ToolCallDetail(BaseModel): + """Represents a tool call with its arguments and result.""" + + id: str = Field(default="", description="Unique identifier for the tool call") + name: str = Field(..., description="Name of the tool") + arguments: str = Field(default="", description="JSON string of tool arguments") + result: str = Field(default="", description="Result from the tool execution") + + +class LLMGenerationDetailData(BaseModel): + """ + Domain model for LLM generation detail. + + Contains the structured data for reasoning content, tool calls, + and their display sequence. + """ + + reasoning_content: list[str] = Field(default_factory=list, description="List of reasoning segments") + tool_calls: list[ToolCallDetail] = Field(default_factory=list, description="List of tool call details") + sequence: list[SequenceSegment] = Field(default_factory=list, description="Display order of segments") + + def is_empty(self) -> bool: + """Check if there's any meaningful generation detail.""" + return not self.reasoning_content and not self.tool_calls + + def to_response_dict(self) -> dict: + """Convert to dictionary for API response.""" + return { + "reasoning_content": self.reasoning_content, + "tool_calls": [tc.model_dump() for tc in self.tool_calls], + "sequence": [seg.model_dump() for seg in self.sequence], + } diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index 77d6bf03b4..c767fcfc34 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -177,6 +177,15 @@ class QueueLoopCompletedEvent(AppQueueEvent): error: str | None = None +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + + class QueueTextChunkEvent(AppQueueEvent): """ QueueTextChunkEvent entity @@ -191,6 +200,28 @@ class QueueTextChunkEvent(AppQueueEvent): in_loop_id: str | None = None """loop id if node is in loop""" + # Extended fields for Agent/Tool streaming + chunk_type: ChunkType = ChunkType.TEXT + """type of the chunk""" + + # Tool call fields (when chunk_type == TOOL_CALL) + tool_call_id: str | None = None + """unique identifier for this tool call""" + tool_name: str | None = None + """name of the tool being called""" + tool_arguments: str | None = None + """accumulated tool arguments JSON""" + + # Tool result fields (when chunk_type == TOOL_RESULT) + tool_files: list[str] = Field(default_factory=list) + """file IDs produced by tool""" + tool_error: str | None = None + """error message if tool failed""" + + # Thought fields (when chunk_type == THOUGHT) + round_index: int | None = None + """current iteration round""" + class QueueAgentMessageEvent(AppQueueEvent): """ diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 7692128985..4609cd87f6 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -116,6 +116,28 @@ class MessageStreamResponse(StreamResponse): answer: str from_variable_selector: list[str] | None = None + # Extended fields for Agent/Tool streaming (imported at runtime to avoid circular import) + chunk_type: str | None = None + """type of the chunk: text, tool_call, tool_result, thought""" + + # Tool call fields (when chunk_type == "tool_call") + tool_call_id: str | None = None + """unique identifier for this tool call""" + tool_name: str | None = None + """name of the tool being called""" + tool_arguments: str | None = None + """accumulated tool arguments JSON""" + + # Tool result fields (when chunk_type == "tool_result") + tool_files: list[str] | None = None + """file IDs produced by tool""" + tool_error: str | None = None + """error message if tool failed""" + + # Thought fields (when chunk_type == "thought") + round_index: int | None = None + """current iteration round""" + class MessageAudioStreamResponse(StreamResponse): """ @@ -585,6 +607,15 @@ class LoopNodeCompletedStreamResponse(StreamResponse): data: Data +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + + class TextChunkStreamResponse(StreamResponse): """ TextChunkStreamResponse entity @@ -598,6 +629,28 @@ class TextChunkStreamResponse(StreamResponse): text: str from_variable_selector: list[str] | None = None + # Extended fields for Agent/Tool streaming + chunk_type: ChunkType = ChunkType.TEXT + """type of the chunk""" + + # Tool call fields (when chunk_type == TOOL_CALL) + tool_call_id: str | None = None + """unique identifier for this tool call""" + tool_name: str | None = None + """name of the tool being called""" + tool_arguments: str | None = None + """accumulated tool arguments JSON""" + + # Tool result fields (when chunk_type == TOOL_RESULT) + tool_files: list[str] = Field(default_factory=list) + """file IDs produced by tool""" + tool_error: str | None = None + """error message if tool failed""" + + # Thought fields (when chunk_type == THOUGHT) + round_index: int | None = None + """current iteration round""" + event: StreamEvent = StreamEvent.TEXT_CHUNK data: Data @@ -746,7 +799,7 @@ class AgentLogStreamResponse(StreamResponse): """ node_execution_id: str - id: str + message_id: str label: str parent_id: str | None = None error: str | None = None diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 98548ddfbb..2405413d71 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -58,7 +58,7 @@ from core.prompt.utils.prompt_template_parser import PromptTemplateParser from events.message_event import message_was_created from extensions.ext_database import db from libs.datetime_utils import naive_utc_now -from models.model import AppMode, Conversation, Message, MessageAgentThought +from models.model import AppMode, Conversation, LLMGenerationDetail, Message, MessageAgentThought logger = logging.getLogger(__name__) @@ -425,11 +425,92 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): ) ) + # Save LLM generation detail if there's reasoning_content + self._save_generation_detail(session=session, message=message, llm_result=llm_result) + message_was_created.send( message, application_generate_entity=self._application_generate_entity, ) + def _save_generation_detail(self, *, session: Session, message: Message, llm_result: LLMResult) -> None: + """ + Save LLM generation detail for Completion/Chat/Agent-Chat applications. + For Agent-Chat, also merges MessageAgentThought records. + """ + import json + + reasoning_list: list[str] = [] + tool_calls_list: list[dict] = [] + sequence: list[dict] = [] + answer = message.answer or "" + + # Check if this is Agent-Chat mode by looking for agent thoughts + agent_thoughts = ( + session.query(MessageAgentThought) + .filter_by(message_id=message.id) + .order_by(MessageAgentThought.position.asc()) + .all() + ) + + if agent_thoughts: + # Agent-Chat mode: merge MessageAgentThought records + content_pos = 0 + for thought in agent_thoughts: + # Add thought/reasoning + if thought.thought: + reasoning_list.append(thought.thought) + sequence.append({"type": "reasoning", "index": len(reasoning_list) - 1}) + + # Add tool calls + if thought.tool: + tool_calls_list.append( + { + "name": thought.tool, + "arguments": thought.tool_input or "", + "result": thought.observation or "", + } + ) + sequence.append({"type": "tool_call", "index": len(tool_calls_list) - 1}) + + # Add answer content if present + if thought.answer: + start = content_pos + end = content_pos + len(thought.answer) + sequence.append({"type": "content", "start": start, "end": end}) + content_pos = end + else: + # Completion/Chat mode: use reasoning_content from llm_result + reasoning_content = llm_result.reasoning_content + if reasoning_content: + reasoning_list = [reasoning_content] + # Content comes first, then reasoning + if answer: + sequence.append({"type": "content", "start": 0, "end": len(answer)}) + sequence.append({"type": "reasoning", "index": 0}) + + # Only save if there's meaningful generation detail + if not reasoning_list and not tool_calls_list: + return + + # Check if generation detail already exists + existing = session.query(LLMGenerationDetail).filter_by(message_id=message.id).first() + + if existing: + existing.reasoning_content = json.dumps(reasoning_list) if reasoning_list else None + existing.tool_calls = json.dumps(tool_calls_list) if tool_calls_list else None + existing.sequence = json.dumps(sequence) if sequence else None + else: + generation_detail = LLMGenerationDetail( + tenant_id=self._application_generate_entity.app_config.tenant_id, + app_id=self._application_generate_entity.app_config.app_id, + message_id=message.id, + reasoning_content=json.dumps(reasoning_list) if reasoning_list else None, + tool_calls=json.dumps(tool_calls_list) if tool_calls_list else None, + sequence=json.dumps(sequence) if sequence else None, + ) + session.add(generation_detail) + def _handle_stop(self, event: QueueStopEvent): """ Handle stop. diff --git a/api/core/app/task_pipeline/message_cycle_manager.py b/api/core/app/task_pipeline/message_cycle_manager.py index 2e6f92efa5..414fed6701 100644 --- a/api/core/app/task_pipeline/message_cycle_manager.py +++ b/api/core/app/task_pipeline/message_cycle_manager.py @@ -214,12 +214,30 @@ class MessageCycleManager: return None def message_to_stream_response( - self, answer: str, message_id: str, from_variable_selector: list[str] | None = None + self, + answer: str, + message_id: str, + from_variable_selector: list[str] | None = None, + chunk_type: str | None = None, + tool_call_id: str | None = None, + tool_name: str | None = None, + tool_arguments: str | None = None, + tool_files: list[str] | None = None, + tool_error: str | None = None, + round_index: int | None = None, ) -> MessageStreamResponse: """ Message to stream response. :param answer: answer :param message_id: message id + :param from_variable_selector: from variable selector + :param chunk_type: type of the chunk (text, function_call, tool_result, thought) + :param tool_call_id: unique identifier for this tool call + :param tool_name: name of the tool being called + :param tool_arguments: accumulated tool arguments JSON + :param tool_files: file IDs produced by tool + :param tool_error: error message if tool failed + :param round_index: current iteration round :return: """ with Session(db.engine, expire_on_commit=False) as session: @@ -232,6 +250,13 @@ class MessageCycleManager: answer=answer, from_variable_selector=from_variable_selector, event=event_type, + chunk_type=chunk_type, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + tool_files=tool_files, + tool_error=tool_error, + round_index=round_index, ) def message_replace_to_stream_response(self, answer: str, reason: str = "") -> MessageReplaceStreamResponse: diff --git a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py index 4436773d25..79b0c702e0 100644 --- a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py @@ -29,6 +29,7 @@ from models import ( Account, CreatorUserRole, EndUser, + LLMGenerationDetail, WorkflowNodeExecutionModel, WorkflowNodeExecutionTriggeredFrom, ) @@ -457,6 +458,94 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) session.merge(db_model) session.flush() + # Save LLMGenerationDetail for LLM nodes with successful execution + if ( + domain_model.node_type == NodeType.LLM + and domain_model.status == WorkflowNodeExecutionStatus.SUCCEEDED + and domain_model.outputs is not None + ): + self._save_llm_generation_detail(session, domain_model) + + def _save_llm_generation_detail(self, session, execution: WorkflowNodeExecution) -> None: + """ + Save LLM generation detail for LLM nodes. + Extracts reasoning_content, tool_calls, and sequence from outputs and metadata. + """ + outputs = execution.outputs or {} + metadata = execution.metadata or {} + + # Extract reasoning_content from outputs + reasoning_content = outputs.get("reasoning_content") + reasoning_list: list[str] = [] + if reasoning_content: + # reasoning_content could be a string or already a list + if isinstance(reasoning_content, str): + reasoning_list = [reasoning_content] if reasoning_content else [] + elif isinstance(reasoning_content, list): + reasoning_list = reasoning_content + + # Extract tool_calls from metadata.agent_log + tool_calls_list: list[dict] = [] + agent_log = metadata.get(WorkflowNodeExecutionMetadataKey.AGENT_LOG) + if agent_log and isinstance(agent_log, list): + for log in agent_log: + # Each log entry has label, data, status, etc. + log_data = log.data if hasattr(log, "data") else log.get("data", {}) + if log_data.get("tool_name"): + tool_calls_list.append( + { + "id": log_data.get("tool_call_id", ""), + "name": log_data.get("tool_name", ""), + "arguments": json.dumps(log_data.get("tool_args", {})), + "result": str(log_data.get("output", "")), + } + ) + + # Build sequence based on content, reasoning, and tool_calls + sequence: list[dict] = [] + text = outputs.get("text", "") + + # For now, use a simple sequence: content -> reasoning -> tool_calls + # This can be enhanced later to track actual streaming order + if text: + sequence.append({"type": "content", "start": 0, "end": len(text)}) + for i, _ in enumerate(reasoning_list): + sequence.append({"type": "reasoning", "index": i}) + for i in range(len(tool_calls_list)): + sequence.append({"type": "tool_call", "index": i}) + + # Only save if there's meaningful generation detail + if not reasoning_list and not tool_calls_list: + return + + # Check if generation detail already exists for this node execution + existing = ( + session.query(LLMGenerationDetail) + .filter_by( + workflow_run_id=execution.workflow_execution_id, + node_id=execution.node_id, + ) + .first() + ) + + if existing: + # Update existing record + existing.reasoning_content = json.dumps(reasoning_list) if reasoning_list else None + existing.tool_calls = json.dumps(tool_calls_list) if tool_calls_list else None + existing.sequence = json.dumps(sequence) if sequence else None + else: + # Create new record + generation_detail = LLMGenerationDetail( + tenant_id=self._tenant_id, + app_id=self._app_id, + workflow_run_id=execution.workflow_execution_id, + node_id=execution.node_id, + reasoning_content=json.dumps(reasoning_list) if reasoning_list else None, + tool_calls=json.dumps(tool_calls_list) if tool_calls_list else None, + sequence=json.dumps(sequence) if sequence else None, + ) + session.add(generation_detail) + def get_db_models_by_workflow_run( self, workflow_run_id: str, diff --git a/api/core/tools/__base/tool.py b/api/core/tools/__base/tool.py index 8ca4eabb7a..cdbfd027ee 100644 --- a/api/core/tools/__base/tool.py +++ b/api/core/tools/__base/tool.py @@ -6,6 +6,7 @@ from typing import TYPE_CHECKING, Any if TYPE_CHECKING: from models.model import File +from core.model_runtime.entities.message_entities import PromptMessageTool from core.tools.__base.tool_runtime import ToolRuntime from core.tools.entities.tool_entities import ( ToolEntity, @@ -152,6 +153,60 @@ class Tool(ABC): return parameters + def to_prompt_message_tool(self) -> PromptMessageTool: + message_tool = PromptMessageTool( + name=self.entity.identity.name, + description=self.entity.description.llm if self.entity.description else "", + parameters={ + "type": "object", + "properties": {}, + "required": [], + }, + ) + + parameters = self.get_merged_runtime_parameters() + for parameter in parameters: + if parameter.form != ToolParameter.ToolParameterForm.LLM: + continue + + parameter_type = parameter.type.as_normal_type() + if parameter.type in { + ToolParameter.ToolParameterType.SYSTEM_FILES, + ToolParameter.ToolParameterType.FILE, + ToolParameter.ToolParameterType.FILES, + }: + # Determine the description based on parameter type + if parameter.type == ToolParameter.ToolParameterType.FILE: + file_format_desc = " Input the file id with format: [File: file_id]." + else: + file_format_desc = "Input the file id with format: [Files: file_id1, file_id2, ...]. " + + message_tool.parameters["properties"][parameter.name] = { + "type": "string", + "description": (parameter.llm_description or "") + file_format_desc, + } + continue + enum = [] + if parameter.type == ToolParameter.ToolParameterType.SELECT: + enum = [option.value for option in parameter.options] if parameter.options else [] + + message_tool.parameters["properties"][parameter.name] = ( + { + "type": parameter_type, + "description": parameter.llm_description or "", + } + if parameter.input_schema is None + else parameter.input_schema + ) + + if len(enum) > 0: + message_tool.parameters["properties"][parameter.name]["enum"] = enum + + if parameter.required: + message_tool.parameters["required"].append(parameter.name) + + return message_tool + def create_image_message( self, image: str, diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index cf12d5ec1f..3a60d34691 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -247,6 +247,7 @@ class WorkflowNodeExecutionMetadataKey(StrEnum): ERROR_STRATEGY = "error_strategy" # node in continue on error mode return the field LOOP_VARIABLE_MAP = "loop_variable_map" # single loop variable output DATASOURCE_INFO = "datasource_info" + LLM_CONTENT_SEQUENCE = "llm_content_sequence" class WorkflowNodeExecutionStatus(StrEnum): diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index 98e0ea91ef..bd20c4f334 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -321,11 +321,20 @@ class ResponseStreamCoordinator: selector: Sequence[str], chunk: str, is_final: bool = False, + **extra_fields, ) -> NodeRunStreamChunkEvent: """Create a stream chunk event with consistent structure. For selectors with special prefixes (sys, env, conversation), we use the active response node's information since these are not actual node IDs. + + Args: + node_id: The node ID to attribute the event to + execution_id: The execution ID for this node + selector: The variable selector + chunk: The chunk content + is_final: Whether this is the final chunk + **extra_fields: Additional fields for specialized events (chunk_type, tool_call_id, etc.) """ # Check if this is a special selector that doesn't correspond to a node if selector and selector[0] not in self._graph.nodes and self._active_session: @@ -338,6 +347,7 @@ class ResponseStreamCoordinator: selector=selector, chunk=chunk, is_final=is_final, + **extra_fields, ) # Standard case: selector refers to an actual node @@ -349,6 +359,7 @@ class ResponseStreamCoordinator: selector=selector, chunk=chunk, is_final=is_final, + **extra_fields, ) def _process_variable_segment(self, segment: VariableSegment) -> tuple[Sequence[NodeRunStreamChunkEvent], bool]: @@ -356,6 +367,8 @@ class ResponseStreamCoordinator: Handles both regular node selectors and special system selectors (sys, env, conversation). For special selectors, we attribute the output to the active response node. + + For object-type variables, automatically streams all child fields that have stream events. """ events: list[NodeRunStreamChunkEvent] = [] source_selector_prefix = segment.selector[0] if segment.selector else "" @@ -372,49 +385,93 @@ class ResponseStreamCoordinator: output_node_id = source_selector_prefix execution_id = self._get_or_create_execution_id(output_node_id) - # Stream all available chunks - while self._has_unread_stream(segment.selector): - if event := self._pop_stream_chunk(segment.selector): - # For special selectors, we need to update the event to use - # the active response node's information - if self._active_session and source_selector_prefix not in self._graph.nodes: - response_node = self._graph.nodes[self._active_session.node_id] - # Create a new event with the response node's information - # but keep the original selector - updated_event = NodeRunStreamChunkEvent( - id=execution_id, - node_id=response_node.id, - node_type=response_node.node_type, - selector=event.selector, # Keep original selector - chunk=event.chunk, - is_final=event.is_final, - ) - events.append(updated_event) - else: - # Regular node selector - use event as is - events.append(event) + # Check if there's a direct stream for this selector + has_direct_stream = ( + tuple(segment.selector) in self._stream_buffers or tuple(segment.selector) in self._closed_streams + ) - # Check if this is the last chunk by looking ahead - stream_closed = self._is_stream_closed(segment.selector) - # Check if stream is closed to determine if segment is complete - if stream_closed: - is_complete = True + if has_direct_stream: + # Stream all available chunks for direct stream + while self._has_unread_stream(segment.selector): + if event := self._pop_stream_chunk(segment.selector): + # For special selectors, update the event to use active response node's information + if self._active_session and source_selector_prefix not in self._graph.nodes: + response_node = self._graph.nodes[self._active_session.node_id] + updated_event = NodeRunStreamChunkEvent( + id=execution_id, + node_id=response_node.id, + node_type=response_node.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + ) + events.append(updated_event) + else: + events.append(event) - elif value := self._variable_pool.get(segment.selector): - # Process scalar value - is_last_segment = bool( - self._active_session and self._active_session.index == len(self._active_session.template.segments) - 1 - ) - events.append( - self._create_stream_chunk_event( - node_id=output_node_id, - execution_id=execution_id, - selector=segment.selector, - chunk=value.markdown, - is_final=is_last_segment, + # Check if stream is closed + if self._is_stream_closed(segment.selector): + is_complete = True + + else: + # No direct stream - check for child field streams (for object types) + child_streams = self._find_child_streams(segment.selector) + + if child_streams: + # Process all child streams + all_children_complete = True + + for child_selector in sorted(child_streams): + # Stream all available chunks from this child + while self._has_unread_stream(child_selector): + if event := self._pop_stream_chunk(child_selector): + # Forward child stream event + if self._active_session and source_selector_prefix not in self._graph.nodes: + response_node = self._graph.nodes[self._active_session.node_id] + updated_event = NodeRunStreamChunkEvent( + id=execution_id, + node_id=response_node.id, + node_type=response_node.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=event.chunk_type, + tool_call_id=event.tool_call_id, + tool_name=event.tool_name, + tool_arguments=event.tool_arguments, + tool_files=event.tool_files, + tool_error=event.tool_error, + round_index=event.round_index, + ) + events.append(updated_event) + else: + events.append(event) + + # Check if this child stream is complete + if not self._is_stream_closed(child_selector): + all_children_complete = False + + # Object segment is complete only when all children are complete + is_complete = all_children_complete + + # Fallback: check if scalar value exists in variable pool + if not is_complete and not has_direct_stream: + if value := self._variable_pool.get(segment.selector): + # Process scalar value + is_last_segment = bool( + self._active_session + and self._active_session.index == len(self._active_session.template.segments) - 1 ) - ) - is_complete = True + events.append( + self._create_stream_chunk_event( + node_id=output_node_id, + execution_id=execution_id, + selector=segment.selector, + chunk=value.markdown, + is_final=is_last_segment, + ) + ) + is_complete = True return events, is_complete @@ -513,6 +570,36 @@ class ResponseStreamCoordinator: # ============= Internal Stream Management Methods ============= + def _find_child_streams(self, parent_selector: Sequence[str]) -> list[tuple[str, ...]]: + """Find all child stream selectors that are descendants of the parent selector. + + For example, if parent_selector is ['llm', 'generation'], this will find: + - ['llm', 'generation', 'content'] + - ['llm', 'generation', 'tool_calls'] + - ['llm', 'generation', 'tool_results'] + - ['llm', 'generation', 'thought'] + + Args: + parent_selector: The parent selector to search for children + + Returns: + List of child selector tuples found in stream buffers or closed streams + """ + parent_key = tuple(parent_selector) + parent_len = len(parent_key) + child_streams: set[tuple[str, ...]] = set() + + # Search in both active buffers and closed streams + all_selectors = set(self._stream_buffers.keys()) | self._closed_streams + + for selector_key in all_selectors: + # Check if this selector is a direct child of the parent + # Direct child means: len(child) == len(parent) + 1 and child starts with parent + if len(selector_key) == parent_len + 1 and selector_key[:parent_len] == parent_key: + child_streams.add(selector_key) + + return sorted(child_streams) + def _append_stream_chunk(self, selector: Sequence[str], event: NodeRunStreamChunkEvent) -> None: """ Append a stream chunk to the internal buffer. diff --git a/api/core/workflow/graph_events/__init__.py b/api/core/workflow/graph_events/__init__.py index 7a5edbb331..6c37fa1bc6 100644 --- a/api/core/workflow/graph_events/__init__.py +++ b/api/core/workflow/graph_events/__init__.py @@ -36,6 +36,7 @@ from .loop import ( # Node events from .node import ( + ChunkType, NodeRunExceptionEvent, NodeRunFailedEvent, NodeRunPauseRequestedEvent, @@ -48,6 +49,7 @@ from .node import ( __all__ = [ "BaseGraphEvent", + "ChunkType", "GraphEngineEvent", "GraphNodeEventBase", "GraphRunAbortedEvent", diff --git a/api/core/workflow/graph_events/node.py b/api/core/workflow/graph_events/node.py index f225798d41..c7f76c424d 100644 --- a/api/core/workflow/graph_events/node.py +++ b/api/core/workflow/graph_events/node.py @@ -1,5 +1,6 @@ from collections.abc import Sequence from datetime import datetime +from enum import StrEnum from pydantic import Field @@ -21,13 +22,37 @@ class NodeRunStartedEvent(GraphNodeEventBase): provider_id: str = "" +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + + class NodeRunStreamChunkEvent(GraphNodeEventBase): - # Spec-compliant fields + """Stream chunk event for workflow node execution.""" + + # Base fields selector: Sequence[str] = Field( ..., description="selector identifying the output location (e.g., ['nodeA', 'text'])" ) chunk: str = Field(..., description="the actual chunk content") is_final: bool = Field(default=False, description="indicates if this is the last chunk") + chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") + + # Tool call fields (when chunk_type == TOOL_CALL) + tool_call_id: str | None = Field(default=None, description="unique identifier for this tool call") + tool_name: str | None = Field(default=None, description="name of the tool being called") + tool_arguments: str | None = Field(default=None, description="accumulated tool arguments JSON") + + # Tool result fields (when chunk_type == TOOL_RESULT) + tool_files: list[str] = Field(default_factory=list, description="file IDs produced by tool") + tool_error: str | None = Field(default=None, description="error message if tool failed") + + # Thought fields (when chunk_type == THOUGHT) + round_index: int | None = Field(default=None, description="current iteration round") class NodeRunRetrieverResourceEvent(GraphNodeEventBase): diff --git a/api/core/workflow/node_events/__init__.py b/api/core/workflow/node_events/__init__.py index f14a594c85..67263311b9 100644 --- a/api/core/workflow/node_events/__init__.py +++ b/api/core/workflow/node_events/__init__.py @@ -13,16 +13,21 @@ from .loop import ( LoopSucceededEvent, ) from .node import ( + ChunkType, ModelInvokeCompletedEvent, PauseRequestedEvent, RunRetrieverResourceEvent, RunRetryEvent, StreamChunkEvent, StreamCompletedEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, ) __all__ = [ "AgentLogEvent", + "ChunkType", "IterationFailedEvent", "IterationNextEvent", "IterationStartedEvent", @@ -39,4 +44,7 @@ __all__ = [ "RunRetryEvent", "StreamChunkEvent", "StreamCompletedEvent", + "ThoughtChunkEvent", + "ToolCallChunkEvent", + "ToolResultChunkEvent", ] diff --git a/api/core/workflow/node_events/node.py b/api/core/workflow/node_events/node.py index ebf93f2fc2..3a062b9c4c 100644 --- a/api/core/workflow/node_events/node.py +++ b/api/core/workflow/node_events/node.py @@ -1,5 +1,6 @@ from collections.abc import Sequence from datetime import datetime +from enum import StrEnum from pydantic import Field @@ -30,13 +31,50 @@ class RunRetryEvent(NodeEventBase): start_at: datetime = Field(..., description="Retry start time") +class ChunkType(StrEnum): + """Stream chunk type for LLM-related events.""" + + TEXT = "text" # Normal text streaming + TOOL_CALL = "tool_call" # Tool call arguments streaming + TOOL_RESULT = "tool_result" # Tool execution result + THOUGHT = "thought" # Agent thinking process (ReAct) + + class StreamChunkEvent(NodeEventBase): - # Spec-compliant fields + """Base stream chunk event - normal text streaming output.""" + selector: Sequence[str] = Field( ..., description="selector identifying the output location (e.g., ['nodeA', 'text'])" ) chunk: str = Field(..., description="the actual chunk content") is_final: bool = Field(default=False, description="indicates if this is the last chunk") + chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") + + +class ToolCallChunkEvent(StreamChunkEvent): + """Tool call streaming event - tool call arguments streaming output.""" + + chunk_type: ChunkType = Field(default=ChunkType.TOOL_CALL, frozen=True) + tool_call_id: str = Field(..., description="unique identifier for this tool call") + tool_name: str = Field(..., description="name of the tool being called") + tool_arguments: str = Field(default="", description="accumulated tool arguments JSON") + + +class ToolResultChunkEvent(StreamChunkEvent): + """Tool result event - tool execution result.""" + + chunk_type: ChunkType = Field(default=ChunkType.TOOL_RESULT, frozen=True) + tool_call_id: str = Field(..., description="identifier of the tool call this result belongs to") + tool_name: str = Field(..., description="name of the tool") + tool_files: list[str] = Field(default_factory=list, description="file IDs produced by tool") + tool_error: str | None = Field(default=None, description="error message if tool failed") + + +class ThoughtChunkEvent(StreamChunkEvent): + """Agent thought streaming event - Agent thinking process (ReAct).""" + + chunk_type: ChunkType = Field(default=ChunkType.THOUGHT, frozen=True) + round_index: int = Field(default=1, description="current iteration round") class StreamCompletedEvent(NodeEventBase): diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index c2e1105971..9be16d4f08 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -46,6 +46,9 @@ from core.workflow.node_events import ( RunRetrieverResourceEvent, StreamChunkEvent, StreamCompletedEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, ) from core.workflow.runtime import GraphRuntimeState from libs.datetime_utils import naive_utc_now @@ -536,6 +539,8 @@ class Node(Generic[NodeDataT]): @_dispatch.register def _(self, event: StreamChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + return NodeRunStreamChunkEvent( id=self._node_execution_id, node_id=self._node_id, @@ -543,6 +548,57 @@ class Node(Generic[NodeDataT]): selector=event.selector, chunk=event.chunk, is_final=event.is_final, + chunk_type=ChunkType(event.chunk_type.value), + ) + + @_dispatch.register + def _(self, event: ToolCallChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + + return NodeRunStreamChunkEvent( + id=self._node_execution_id, + node_id=self._node_id, + node_type=self.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=ChunkType.TOOL_CALL, + tool_call_id=event.tool_call_id, + tool_name=event.tool_name, + tool_arguments=event.tool_arguments, + ) + + @_dispatch.register + def _(self, event: ToolResultChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + + return NodeRunStreamChunkEvent( + id=self._node_execution_id, + node_id=self._node_id, + node_type=self.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=ChunkType.TOOL_RESULT, + tool_call_id=event.tool_call_id, + tool_name=event.tool_name, + tool_files=event.tool_files, + tool_error=event.tool_error, + ) + + @_dispatch.register + def _(self, event: ThoughtChunkEvent) -> NodeRunStreamChunkEvent: + from core.workflow.graph_events import ChunkType + + return NodeRunStreamChunkEvent( + id=self._node_execution_id, + node_id=self._node_id, + node_type=self.node_type, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=ChunkType.THOUGHT, + round_index=event.round_index, ) @_dispatch.register diff --git a/api/core/workflow/nodes/llm/__init__.py b/api/core/workflow/nodes/llm/__init__.py index f7bc713f63..edd0d3d581 100644 --- a/api/core/workflow/nodes/llm/__init__.py +++ b/api/core/workflow/nodes/llm/__init__.py @@ -3,6 +3,7 @@ from .entities import ( LLMNodeCompletionModelPromptTemplate, LLMNodeData, ModelConfig, + ToolMetadata, VisionConfig, ) from .node import LLMNode @@ -13,5 +14,6 @@ __all__ = [ "LLMNodeCompletionModelPromptTemplate", "LLMNodeData", "ModelConfig", + "ToolMetadata", "VisionConfig", ] diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index fe6f2290aa..fbdd1daec7 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -5,6 +5,7 @@ from pydantic import BaseModel, Field, field_validator from core.model_runtime.entities import ImagePromptMessageContent, LLMMode from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig +from core.tools.entities.tool_entities import ToolProviderType from core.workflow.nodes.base import BaseNodeData from core.workflow.nodes.base.entities import VariableSelector @@ -58,6 +59,30 @@ class LLMNodeCompletionModelPromptTemplate(CompletionModelPromptTemplate): jinja2_text: str | None = None +class ToolMetadata(BaseModel): + """ + Tool metadata for LLM node with tool support. + + Defines the essential fields needed for tool configuration, + particularly the 'type' field to identify tool provider type. + """ + + # Core fields + enabled: bool = True + type: ToolProviderType = Field(..., description="Tool provider type: builtin, api, mcp, workflow") + provider_name: str = Field(..., description="Tool provider name/identifier") + tool_name: str = Field(..., description="Tool name") + + # Optional fields + plugin_unique_identifier: str | None = Field(None, description="Plugin unique identifier for plugin tools") + credential_id: str | None = Field(None, description="Credential ID for tools requiring authentication") + + # Configuration fields + parameters: dict[str, Any] = Field(default_factory=dict, description="Tool parameters") + settings: dict[str, Any] = Field(default_factory=dict, description="Tool settings configuration") + extra: dict[str, Any] = Field(default_factory=dict, description="Extra tool configuration like custom description") + + class LLMNodeData(BaseNodeData): model: ModelConfig prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate @@ -86,6 +111,9 @@ class LLMNodeData(BaseNodeData): ), ) + # Tool support (from Agent V2) + tools: Sequence[ToolMetadata] = Field(default_factory=list) + @field_validator("prompt_config", mode="before") @classmethod def convert_none_prompt_config(cls, v: Any): diff --git a/api/core/workflow/nodes/llm/llm_utils.py b/api/core/workflow/nodes/llm/llm_utils.py index 0c545469bc..e9c363851f 100644 --- a/api/core/workflow/nodes/llm/llm_utils.py +++ b/api/core/workflow/nodes/llm/llm_utils.py @@ -1,3 +1,4 @@ +import re from collections.abc import Sequence from typing import cast @@ -154,3 +155,94 @@ def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUs ) session.execute(stmt) session.commit() + + +class ThinkTagStreamParser: + """Lightweight state machine to split streaming chunks by tags.""" + + _START_PATTERN = re.compile(r"]*)?>", re.IGNORECASE) + _END_PATTERN = re.compile(r"", re.IGNORECASE) + _START_PREFIX = " int: + """Return length of the longest suffix of `text` that is a prefix of `prefix`.""" + max_len = min(len(text), len(prefix) - 1) + for i in range(max_len, 0, -1): + if text[-i:].lower() == prefix[:i].lower(): + return i + return 0 + + def process(self, chunk: str) -> list[tuple[str, str]]: + """ + Split incoming chunk into ('thought' | 'text', content) tuples. + Content excludes the tags themselves and handles split tags across chunks. + """ + parts: list[tuple[str, str]] = [] + self._buffer += chunk + + while self._buffer: + if self._in_think: + end_match = self._END_PATTERN.search(self._buffer) + if end_match: + thought_text = self._buffer[: end_match.start()] + if thought_text: + parts.append(("thought", thought_text)) + self._buffer = self._buffer[end_match.end() :] + self._in_think = False + continue + + hold_len = self._suffix_prefix_len(self._buffer, self._END_PREFIX) + emit = self._buffer[: len(self._buffer) - hold_len] + if emit: + parts.append(("thought", emit)) + self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" + break + + start_match = self._START_PATTERN.search(self._buffer) + if start_match: + prefix = self._buffer[: start_match.start()] + if prefix: + parts.append(("text", prefix)) + self._buffer = self._buffer[start_match.end() :] + self._in_think = True + continue + + hold_len = self._suffix_prefix_len(self._buffer, self._START_PREFIX) + emit = self._buffer[: len(self._buffer) - hold_len] + if emit: + parts.append(("text", emit)) + self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" + break + + cleaned_parts: list[tuple[str, str]] = [] + for kind, content in parts: + # Extra safeguard: strip any stray tags that slipped through. + content = self._START_PATTERN.sub("", content) + content = self._END_PATTERN.sub("", content) + if content: + cleaned_parts.append((kind, content)) + + return cleaned_parts + + def flush(self) -> list[tuple[str, str]]: + """Flush remaining buffer when the stream ends.""" + if not self._buffer: + return [] + kind = "thought" if self._in_think else "text" + content = self._buffer + # Drop dangling partial tags instead of emitting them + if content.lower().startswith(self._START_PREFIX) or content.lower().startswith(self._END_PREFIX): + content = "" + self._buffer = "" + if not content: + return [] + # Strip any complete tags that might still be present. + content = self._START_PATTERN.sub("", content) + content = self._END_PATTERN.sub("", content) + return [(kind, content)] if content else [] diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 1a2473e0bb..bf41f476fd 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -7,6 +7,8 @@ import time from collections.abc import Generator, Mapping, Sequence from typing import TYPE_CHECKING, Any, Literal +from core.agent.entities import AgentLog, AgentResult, AgentToolEntity, ExecutionContext +from core.agent.patterns import StrategyFactory from core.app.entities.app_invoke_entities import ModelConfigWithCredentialsEntity from core.file import FileType, file_manager from core.helper.code_executor import CodeExecutor, CodeLanguage @@ -44,6 +46,8 @@ from core.model_runtime.utils.encoders import jsonable_encoder from core.prompt.entities.advanced_prompt_entities import CompletionModelPromptTemplate, MemoryConfig from core.prompt.utils.prompt_message_util import PromptMessageUtil from core.rag.entities.citation_metadata import RetrievalSourceMetadata +from core.tools.__base.tool import Tool +from core.tools.tool_manager import ToolManager from core.variables import ( ArrayFileSegment, ArraySegment, @@ -61,12 +65,16 @@ from core.workflow.enums import ( WorkflowNodeExecutionStatus, ) from core.workflow.node_events import ( + AgentLogEvent, ModelInvokeCompletedEvent, NodeEventBase, NodeRunResult, RunRetrieverResourceEvent, StreamChunkEvent, StreamCompletedEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, ) from core.workflow.nodes.base.entities import VariableSelector from core.workflow.nodes.base.node import Node @@ -147,7 +155,8 @@ class LLMNode(Node[LLMNodeData]): clean_text = "" usage = LLMUsage.empty_usage() finish_reason = None - reasoning_content = None + reasoning_content = "" # Initialize as empty string for consistency + clean_text = "" # Initialize clean_text to avoid UnboundLocalError variable_pool = self.graph_runtime_state.variable_pool try: @@ -163,6 +172,15 @@ class LLMNode(Node[LLMNodeData]): # merge inputs inputs.update(jinja_inputs) + # Add all inputs to node_inputs for logging + node_inputs.update(inputs) + + # Add tools to inputs if configured + if self.tool_call_enabled: + node_inputs["tools"] = [ + {"provider_id": tool.provider_name, "tool_name": tool.tool_name} for tool in self._node_data.tools + ] + # fetch files files = ( llm_utils.fetch_files( @@ -222,21 +240,39 @@ class LLMNode(Node[LLMNodeData]): tenant_id=self.tenant_id, ) - # handle invoke result - generator = LLMNode.invoke_llm( - node_data_model=self.node_data.model, - model_instance=model_instance, - prompt_messages=prompt_messages, - stop=stop, - user_id=self.user_id, - structured_output_enabled=self.node_data.structured_output_enabled, - structured_output=self.node_data.structured_output, - file_saver=self._llm_file_saver, - file_outputs=self._file_outputs, - node_id=self._node_id, - node_type=self.node_type, - reasoning_format=self.node_data.reasoning_format, - ) + # Check if tools are configured + if self.tool_call_enabled: + # Use tool-enabled invocation (Agent V2 style) + # This generator handles all events including final events + generator = self._invoke_llm_with_tools( + model_instance=model_instance, + prompt_messages=prompt_messages, + stop=stop, + files=files, + variable_pool=variable_pool, + node_inputs=node_inputs, + process_data=process_data, + ) + # Forward all events and return early since _invoke_llm_with_tools + # already sends final event and StreamCompletedEvent + yield from generator + return + else: + # Use traditional LLM invocation + generator = LLMNode.invoke_llm( + node_data_model=self._node_data.model, + model_instance=model_instance, + prompt_messages=prompt_messages, + stop=stop, + user_id=self.user_id, + structured_output_enabled=self._node_data.structured_output_enabled, + structured_output=self._node_data.structured_output, + file_saver=self._llm_file_saver, + file_outputs=self._file_outputs, + node_id=self._node_id, + node_type=self.node_type, + reasoning_format=self._node_data.reasoning_format, + ) structured_output: LLMStructuredOutput | None = None @@ -287,6 +323,11 @@ class LLMNode(Node[LLMNodeData]): "reasoning_content": reasoning_content, "usage": jsonable_encoder(usage), "finish_reason": finish_reason, + "generation": { + "content": clean_text, + "reasoning_content": [reasoning_content] if reasoning_content else [], + "tool_calls": [], + }, } if structured_output: outputs["structured_output"] = structured_output.structured_output @@ -1204,6 +1245,398 @@ class LLMNode(Node[LLMNodeData]): def retry(self) -> bool: return self.node_data.retry_config.retry_enabled + @property + def tool_call_enabled(self) -> bool: + return ( + self.node_data.tools is not None + and len(self.node_data.tools) > 0 + and all(tool.enabled for tool in self.node_data.tools) + ) + + def _invoke_llm_with_tools( + self, + model_instance: ModelInstance, + prompt_messages: Sequence[PromptMessage], + stop: Sequence[str] | None, + files: Sequence["File"], + variable_pool: VariablePool, + node_inputs: dict[str, Any], + process_data: dict[str, Any], + ) -> Generator[NodeEventBase, None, None]: + """Invoke LLM with tools support (from Agent V2).""" + # Get model features to determine strategy + model_features = self._get_model_features(model_instance) + + # Prepare tool instances + tool_instances = self._prepare_tool_instances(variable_pool) + + # Prepare prompt files (files that come from prompt variables, not vision files) + prompt_files = self._extract_prompt_files(variable_pool) + + # Use factory to create appropriate strategy + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=model_instance, + tools=tool_instances, + files=prompt_files, + max_iterations=10, + context=ExecutionContext(user_id=self.user_id, app_id=self.app_id, tenant_id=self.tenant_id), + ) + + # Run strategy + outputs = strategy.run( + prompt_messages=list(prompt_messages), + model_parameters=self._node_data.model.completion_params, + stop=list(stop or []), + stream=True, + ) + + # Process outputs + yield from self._process_tool_outputs(outputs, strategy, node_inputs, process_data) + + def _get_model_features(self, model_instance: ModelInstance) -> list[ModelFeature]: + """Get model schema to determine features.""" + try: + model_type_instance = model_instance.model_type_instance + model_schema = model_type_instance.get_model_schema( + model_instance.model, + model_instance.credentials, + ) + return model_schema.features if model_schema and model_schema.features else [] + except Exception: + logger.warning("Failed to get model schema, assuming no special features") + return [] + + def _prepare_tool_instances(self, variable_pool: VariablePool) -> list[Tool]: + """Prepare tool instances from configuration.""" + tool_instances = [] + + if self._node_data.tools: + for tool in self._node_data.tools: + try: + # Process settings to extract the correct structure + processed_settings = {} + for key, value in tool.settings.items(): + if isinstance(value, dict) and "value" in value and isinstance(value["value"], dict): + # Extract the nested value if it has the ToolInput structure + if "type" in value["value"] and "value" in value["value"]: + processed_settings[key] = value["value"] + else: + processed_settings[key] = value + else: + processed_settings[key] = value + + # Merge parameters with processed settings (similar to Agent Node logic) + merged_parameters = {**tool.parameters, **processed_settings} + + # Create AgentToolEntity from ToolMetadata + agent_tool = AgentToolEntity( + provider_id=tool.provider_name, + provider_type=tool.type, + tool_name=tool.tool_name, + tool_parameters=merged_parameters, + plugin_unique_identifier=tool.plugin_unique_identifier, + credential_id=tool.credential_id, + ) + + # Get tool runtime from ToolManager + tool_runtime = ToolManager.get_agent_tool_runtime( + tenant_id=self.tenant_id, + app_id=self.app_id, + agent_tool=agent_tool, + invoke_from=self.invoke_from, + variable_pool=variable_pool, + ) + + # Apply custom description from extra field if available + if tool.extra.get("description") and tool_runtime.entity.description: + tool_runtime.entity.description.llm = ( + tool.extra.get("description") or tool_runtime.entity.description.llm + ) + + tool_instances.append(tool_runtime) + except Exception as e: + logger.warning("Failed to load tool %s: %s", tool, str(e)) + continue + + return tool_instances + + def _extract_prompt_files(self, variable_pool: VariablePool) -> list["File"]: + """Extract files from prompt template variables.""" + from core.variables import ArrayFileVariable, FileVariable + + files: list[File] = [] + + # Extract variables from prompt template + if isinstance(self._node_data.prompt_template, list): + for message in self._node_data.prompt_template: + if message.text: + parser = VariableTemplateParser(message.text) + variable_selectors = parser.extract_variable_selectors() + + for variable_selector in variable_selectors: + variable = variable_pool.get(variable_selector.value_selector) + if isinstance(variable, FileVariable) and variable.value: + files.append(variable.value) + elif isinstance(variable, ArrayFileVariable) and variable.value: + files.extend(variable.value) + + return files + + def _process_tool_outputs( + self, + outputs: Generator[LLMResultChunk | AgentLog, None, AgentResult], + strategy: Any, + node_inputs: dict[str, Any], + process_data: dict[str, Any], + ) -> Generator[NodeEventBase, None, None]: + """Process strategy outputs and convert to node events.""" + text = "" + files: list[File] = [] + usage = LLMUsage.empty_usage() + agent_logs: list[AgentLogEvent] = [] + finish_reason = None + agent_result: AgentResult | None = None + + # Track current round for ThoughtChunkEvent + current_round = 1 + think_parser = llm_utils.ThinkTagStreamParser() + reasoning_chunks: list[str] = [] + + # Process each output from strategy + try: + for output in outputs: + if isinstance(output, AgentLog): + # Store agent log event for metadata (no longer yielded, StreamChunkEvent contains the info) + agent_log_event = AgentLogEvent( + message_id=output.id, + label=output.label, + node_execution_id=self.id, + parent_id=output.parent_id, + error=output.error, + status=output.status.value, + data=output.data, + metadata={k.value: v for k, v in output.metadata.items()}, + node_id=self._node_id, + ) + for log in agent_logs: + if log.message_id == agent_log_event.message_id: + # update the log + log.data = agent_log_event.data + log.status = agent_log_event.status + log.error = agent_log_event.error + log.label = agent_log_event.label + log.metadata = agent_log_event.metadata + break + else: + agent_logs.append(agent_log_event) + + # Extract round number from ROUND log type + if output.log_type == AgentLog.LogType.ROUND: + round_index = output.data.get("round_index") + if isinstance(round_index, int): + current_round = round_index + + # Emit tool call events when tool call starts + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.START: + tool_name = output.data.get("tool_name", "") + tool_call_id = output.data.get("tool_call_id", "") + tool_args = output.data.get("tool_args", {}) + tool_arguments = json.dumps(tool_args) if tool_args else "" + + yield ToolCallChunkEvent( + selector=[self._node_id, "generation", "tool_calls"], + chunk=tool_arguments, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + is_final=True, + ) + + # Emit tool result events when tool call completes + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.SUCCESS: + tool_name = output.data.get("tool_name", "") + tool_output = output.data.get("output", "") + tool_call_id = output.data.get("tool_call_id", "") + tool_files = [] + tool_error = None + + # Extract file IDs if present + files_data = output.data.get("files") + if files_data and isinstance(files_data, list): + tool_files = files_data + + # Check for error in meta + meta = output.data.get("meta") + if meta and isinstance(meta, dict) and meta.get("error"): + tool_error = meta.get("error") + + yield ToolResultChunkEvent( + selector=[self._node_id, "generation", "tool_results"], + chunk=str(tool_output) if tool_output else "", + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_files=tool_files, + tool_error=tool_error, + is_final=True, + ) + + elif isinstance(output, LLMResultChunk): + # Handle LLM result chunks - only process text content + message = output.delta.message + + # Handle text content + if message and message.content: + chunk_text = message.content + if isinstance(chunk_text, list): + # Extract text from content list + chunk_text = "".join(getattr(c, "data", str(c)) for c in chunk_text) + else: + chunk_text = str(chunk_text) + for kind, segment in think_parser.process(chunk_text): + if not segment: + continue + + if kind == "thought": + reasoning_chunks.append(segment) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk=segment, + round_index=current_round, + is_final=False, + ) + else: + text += segment + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk=segment, + is_final=False, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + + if output.delta.usage: + self._accumulate_usage(usage, output.delta.usage) + + # Capture finish reason + if output.delta.finish_reason: + finish_reason = output.delta.finish_reason + + except StopIteration as e: + # Get the return value from generator + if isinstance(getattr(e, "value", None), AgentResult): + agent_result = e.value + + # Use result from generator if available + if agent_result: + text = agent_result.text or text + files = agent_result.files + if agent_result.usage: + usage = agent_result.usage + if agent_result.finish_reason: + finish_reason = agent_result.finish_reason + + # Flush any remaining buffered content after streaming ends + for kind, segment in think_parser.flush(): + if not segment: + continue + if kind == "thought": + reasoning_chunks.append(segment) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk=segment, + round_index=current_round, + is_final=False, + ) + else: + text += segment + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk=segment, + is_final=False, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + + # Send final events for all streams + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk="", + is_final=True, + ) + + # Close generation sub-field streams + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk="", + is_final=True, + ) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + round_index=current_round, + is_final=True, + ) + + # Build generation field from agent_logs + tool_calls_for_generation = [] + for log in agent_logs: + if log.label == "Tool Call": + tool_call_data = { + "id": log.data.get("tool_call_id", ""), + "name": log.data.get("tool_name", ""), + "arguments": json.dumps(log.data.get("tool_args", {})), + "result": log.data.get("output", ""), + } + tool_calls_for_generation.append(tool_call_data) + + # Complete with results + yield StreamCompletedEvent( + node_run_result=NodeRunResult( + status=WorkflowNodeExecutionStatus.SUCCEEDED, + outputs={ + "text": text, + "files": ArrayFileSegment(value=files), + "usage": jsonable_encoder(usage), + "finish_reason": finish_reason, + "generation": { + "reasoning_content": ["".join(reasoning_chunks)] if reasoning_chunks else [], + "tool_calls": tool_calls_for_generation, + "content": text, + }, + }, + metadata={ + WorkflowNodeExecutionMetadataKey.LLM_CONTENT_SEQUENCE: [], + }, + inputs={ + **node_inputs, + "tools": [ + {"provider_id": tool.provider_name, "tool_name": tool.tool_name} + for tool in self._node_data.tools + ] + if self._node_data.tools + else [], + }, + process_data=process_data, + llm_usage=usage, + ) + ) + + def _accumulate_usage(self, total_usage: LLMUsage, delta_usage: LLMUsage) -> None: + """Accumulate LLM usage statistics.""" + total_usage.prompt_tokens += delta_usage.prompt_tokens + total_usage.completion_tokens += delta_usage.completion_tokens + total_usage.total_tokens += delta_usage.total_tokens + total_usage.prompt_price += delta_usage.prompt_price + total_usage.completion_price += delta_usage.completion_price + total_usage.total_price += delta_usage.total_price + def _combine_message_content_with_role( *, contents: str | list[PromptMessageContentUnionTypes] | None = None, role: PromptMessageRole diff --git a/api/fields/conversation_fields.py b/api/fields/conversation_fields.py index ecc267cf38..d5b2574edc 100644 --- a/api/fields/conversation_fields.py +++ b/api/fields/conversation_fields.py @@ -89,6 +89,7 @@ message_detail_fields = { "status": fields.String, "error": fields.String, "parent_message_id": fields.String, + "generation_detail": fields.Raw, } feedback_stat_fields = {"like": fields.Integer, "dislike": fields.Integer} diff --git a/api/fields/message_fields.py b/api/fields/message_fields.py index a419da2e18..8b9bcac76f 100644 --- a/api/fields/message_fields.py +++ b/api/fields/message_fields.py @@ -68,6 +68,7 @@ message_fields = { "message_files": fields.List(fields.Nested(message_file_fields)), "status": fields.String, "error": fields.String, + "generation_detail": fields.Raw, } message_infinite_scroll_pagination_fields = { diff --git a/api/fields/workflow_run_fields.py b/api/fields/workflow_run_fields.py index 821ce62ecc..6305d8d9d5 100644 --- a/api/fields/workflow_run_fields.py +++ b/api/fields/workflow_run_fields.py @@ -129,6 +129,7 @@ workflow_run_node_execution_fields = { "inputs_truncated": fields.Boolean, "outputs_truncated": fields.Boolean, "process_data_truncated": fields.Boolean, + "generation_detail": fields.Raw, } workflow_run_node_execution_list_fields = { diff --git a/api/models/__init__.py b/api/models/__init__.py index 906bc3198e..bc29421d4c 100644 --- a/api/models/__init__.py +++ b/api/models/__init__.py @@ -49,6 +49,7 @@ from .model import ( EndUser, IconType, InstalledApp, + LLMGenerationDetail, Message, MessageAgentThought, MessageAnnotation, @@ -154,6 +155,7 @@ __all__ = [ "IconType", "InstalledApp", "InvitationCode", + "LLMGenerationDetail", "LoadBalancingModelConfig", "Message", "MessageAgentThought", diff --git a/api/models/model.py b/api/models/model.py index 1731ff5699..0e862ad845 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -31,6 +31,8 @@ from .provider_ids import GenericProviderID from .types import LongText, StringUUID if TYPE_CHECKING: + from core.app.entities.llm_generation_entities import LLMGenerationDetailData + from .workflow import Workflow @@ -1169,6 +1171,17 @@ class Message(Base): .all() ) + @property + def generation_detail(self) -> dict[str, Any] | None: + """ + Get LLM generation detail for this message. + Returns the detail as a dictionary or None if not found. + """ + detail = db.session.query(LLMGenerationDetail).filter_by(message_id=self.id).first() + if detail: + return detail.to_dict() + return None + @property def retriever_resources(self) -> Any: return self.message_metadata_dict.get("retriever_resources") if self.message_metadata else [] @@ -2041,3 +2054,87 @@ class TraceAppConfig(TypeBase): "created_at": str(self.created_at) if self.created_at else None, "updated_at": str(self.updated_at) if self.updated_at else None, } + + +class LLMGenerationDetail(Base): + """ + Store LLM generation details including reasoning process and tool calls. + + Association (choose one): + - For apps with Message: use message_id (one-to-one) + - For Workflow: use workflow_run_id + node_id (one run may have multiple LLM nodes) + """ + + __tablename__ = "llm_generation_details" + __table_args__ = ( + sa.PrimaryKeyConstraint("id", name="llm_generation_detail_pkey"), + sa.Index("idx_llm_generation_detail_message", "message_id"), + sa.Index("idx_llm_generation_detail_workflow", "workflow_run_id", "node_id"), + sa.CheckConstraint( + "(message_id IS NOT NULL AND workflow_run_id IS NULL AND node_id IS NULL)" + " OR " + "(message_id IS NULL AND workflow_run_id IS NOT NULL AND node_id IS NOT NULL)", + name="ck_llm_generation_detail_assoc_mode", + ), + ) + + id: Mapped[str] = mapped_column(StringUUID, default=lambda: str(uuid4())) + tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) + + # Association fields (choose one) + message_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True, unique=True) + workflow_run_id: Mapped[str | None] = mapped_column(StringUUID, nullable=True) + node_id: Mapped[str | None] = mapped_column(String(255), nullable=True) + + # Core data as JSON strings + reasoning_content: Mapped[str | None] = mapped_column(LongText) + tool_calls: Mapped[str | None] = mapped_column(LongText) + sequence: Mapped[str | None] = mapped_column(LongText) + + created_at: Mapped[datetime] = mapped_column(sa.DateTime, nullable=False, server_default=func.current_timestamp()) + + def to_domain_model(self) -> "LLMGenerationDetailData": + """Convert to Pydantic domain model with proper validation.""" + from core.app.entities.llm_generation_entities import LLMGenerationDetailData + + return LLMGenerationDetailData( + reasoning_content=json.loads(self.reasoning_content) if self.reasoning_content else [], + tool_calls=json.loads(self.tool_calls) if self.tool_calls else [], + sequence=json.loads(self.sequence) if self.sequence else [], + ) + + def to_dict(self) -> dict[str, Any]: + """Convert to dictionary for API response.""" + return self.to_domain_model().to_response_dict() + + @classmethod + def from_domain_model( + cls, + data: "LLMGenerationDetailData", + *, + tenant_id: str, + app_id: str, + message_id: str | None = None, + workflow_run_id: str | None = None, + node_id: str | None = None, + ) -> "LLMGenerationDetail": + """Create from Pydantic domain model.""" + # Enforce association mode at object creation time as well. + message_mode = message_id is not None + workflow_mode = workflow_run_id is not None or node_id is not None + if message_mode and workflow_mode: + raise ValueError("LLMGenerationDetail cannot set both message_id and workflow_run_id/node_id.") + if not message_mode and not (workflow_run_id and node_id): + raise ValueError("LLMGenerationDetail requires either message_id or workflow_run_id+node_id.") + + return cls( + tenant_id=tenant_id, + app_id=app_id, + message_id=message_id, + workflow_run_id=workflow_run_id, + node_id=node_id, + reasoning_content=json.dumps(data.reasoning_content) if data.reasoning_content else None, + tool_calls=json.dumps([tc.model_dump() for tc in data.tool_calls]) if data.tool_calls else None, + sequence=json.dumps([seg.model_dump() for seg in data.sequence]) if data.sequence else None, + ) diff --git a/api/services/llm_generation_service.py b/api/services/llm_generation_service.py new file mode 100644 index 0000000000..1e8c78a416 --- /dev/null +++ b/api/services/llm_generation_service.py @@ -0,0 +1,131 @@ +""" +LLM Generation Detail Service. + +Provides methods to query and attach generation details to workflow node executions +and messages, avoiding N+1 query problems. +""" + +from collections.abc import Sequence + +from sqlalchemy import select +from sqlalchemy.orm import Session + +from core.app.entities.llm_generation_entities import LLMGenerationDetailData +from models import LLMGenerationDetail, WorkflowNodeExecutionModel + + +class LLMGenerationService: + """Service for handling LLM generation details.""" + + def __init__(self, session: Session): + self._session = session + + def get_generation_details_for_workflow_run( + self, + workflow_run_id: str, + *, + tenant_id: str | None = None, + app_id: str | None = None, + ) -> dict[str, LLMGenerationDetailData]: + """ + Batch query generation details for all LLM nodes in a workflow run. + + Returns dict mapping node_id to LLMGenerationDetailData. + """ + stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.workflow_run_id == workflow_run_id) + if tenant_id: + stmt = stmt.where(LLMGenerationDetail.tenant_id == tenant_id) + if app_id: + stmt = stmt.where(LLMGenerationDetail.app_id == app_id) + details = self._session.scalars(stmt).all() + return {detail.node_id: detail.to_domain_model() for detail in details if detail.node_id} + + def get_generation_detail_for_message(self, message_id: str) -> LLMGenerationDetailData | None: + """Query generation detail for a specific message.""" + stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id == message_id) + detail = self._session.scalars(stmt).first() + return detail.to_domain_model() if detail else None + + def get_generation_details_for_messages( + self, + message_ids: list[str], + ) -> dict[str, LLMGenerationDetailData]: + """Batch query generation details for multiple messages.""" + if not message_ids: + return {} + + stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id.in_(message_ids)) + details = self._session.scalars(stmt).all() + return {detail.message_id: detail.to_domain_model() for detail in details if detail.message_id} + + def attach_generation_details_to_node_executions( + self, + node_executions: Sequence[WorkflowNodeExecutionModel], + workflow_run_id: str, + *, + tenant_id: str | None = None, + app_id: str | None = None, + ) -> list[dict]: + """ + Attach generation details to node executions and return as dicts. + + Queries generation details in batch and attaches them to the corresponding + node executions, avoiding N+1 queries. + """ + generation_details = self.get_generation_details_for_workflow_run( + workflow_run_id, tenant_id=tenant_id, app_id=app_id + ) + + return [ + { + "id": node.id, + "index": node.index, + "predecessor_node_id": node.predecessor_node_id, + "node_id": node.node_id, + "node_type": node.node_type, + "title": node.title, + "inputs": node.inputs_dict, + "process_data": node.process_data_dict, + "outputs": node.outputs_dict, + "status": node.status, + "error": node.error, + "elapsed_time": node.elapsed_time, + "execution_metadata": node.execution_metadata_dict, + "extras": node.extras, + "created_at": int(node.created_at.timestamp()) if node.created_at else None, + "created_by_role": node.created_by_role, + "created_by_account": _serialize_account(node.created_by_account), + "created_by_end_user": _serialize_end_user(node.created_by_end_user), + "finished_at": int(node.finished_at.timestamp()) if node.finished_at else None, + "inputs_truncated": node.inputs_truncated, + "outputs_truncated": node.outputs_truncated, + "process_data_truncated": node.process_data_truncated, + "generation_detail": generation_details[node.node_id].to_response_dict() + if node.node_id in generation_details + else None, + } + for node in node_executions + ] + + +def _serialize_account(account) -> dict | None: + """Serialize Account to dict for API response.""" + if not account: + return None + return { + "id": account.id, + "name": account.name, + "email": account.email, + } + + +def _serialize_end_user(end_user) -> dict | None: + """Serialize EndUser to dict for API response.""" + if not end_user: + return None + return { + "id": end_user.id, + "type": end_user.type, + "is_anonymous": end_user.is_anonymous, + "session_id": end_user.session_id, + } diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index b903d8df5f..14bcca8754 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -1,8 +1,8 @@ import threading -from collections.abc import Sequence +from typing import Any from sqlalchemy import Engine -from sqlalchemy.orm import sessionmaker +from sqlalchemy.orm import Session, sessionmaker import contexts from extensions.ext_database import db @@ -11,12 +11,12 @@ from models import ( Account, App, EndUser, - WorkflowNodeExecutionModel, WorkflowRun, WorkflowRunTriggeredFrom, ) from repositories.api_workflow_run_repository import APIWorkflowRunRepository from repositories.factory import DifyAPIRepositoryFactory +from services.llm_generation_service import LLMGenerationService class WorkflowRunService: @@ -137,9 +137,9 @@ class WorkflowRunService: app_model: App, run_id: str, user: Account | EndUser, - ) -> Sequence[WorkflowNodeExecutionModel]: + ) -> list[dict[str, Any]]: """ - Get workflow run node execution list + Get workflow run node execution list with generation details attached. """ workflow_run = self.get_workflow_run(app_model, run_id) @@ -154,8 +154,18 @@ class WorkflowRunService: if tenant_id is None: raise ValueError("User tenant_id cannot be None") - return self._node_execution_service_repo.get_executions_by_workflow_run( + node_executions = self._node_execution_service_repo.get_executions_by_workflow_run( tenant_id=tenant_id, app_id=app_model.id, workflow_run_id=run_id, ) + + # Attach generation details using batch query + with Session(db.engine) as session: + generation_service = LLMGenerationService(session) + return generation_service.attach_generation_details_to_node_executions( + node_executions=node_executions, + workflow_run_id=run_id, + tenant_id=tenant_id, + app_id=app_model.id, + ) diff --git a/api/tests/unit_tests/core/agent/patterns/__init__.py b/api/tests/unit_tests/core/agent/patterns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/core/agent/patterns/test_base.py b/api/tests/unit_tests/core/agent/patterns/test_base.py new file mode 100644 index 0000000000..b0e0d44940 --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_base.py @@ -0,0 +1,324 @@ +"""Tests for AgentPattern base class.""" + +from decimal import Decimal +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import AgentLog, ExecutionContext +from core.agent.patterns.base import AgentPattern +from core.model_runtime.entities.llm_entities import LLMUsage + + +class ConcreteAgentPattern(AgentPattern): + """Concrete implementation of AgentPattern for testing.""" + + def run(self, prompt_messages, model_parameters, stop=[], stream=True): + """Minimal implementation for testing.""" + yield from [] + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +@pytest.fixture +def agent_pattern(mock_model_instance, mock_context): + """Create a concrete agent pattern for testing.""" + return ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + max_iterations=10, + ) + + +class TestAccumulateUsage: + """Tests for _accumulate_usage method.""" + + def test_accumulate_usage_to_empty_dict(self, agent_pattern): + """Test accumulating usage to an empty dict creates a copy.""" + total_usage: dict = {"usage": None} + delta_usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + + agent_pattern._accumulate_usage(total_usage, delta_usage) + + assert total_usage["usage"] is not None + assert total_usage["usage"].total_tokens == 150 + assert total_usage["usage"].prompt_tokens == 100 + assert total_usage["usage"].completion_tokens == 50 + # Verify it's a copy, not a reference + assert total_usage["usage"] is not delta_usage + + def test_accumulate_usage_adds_to_existing(self, agent_pattern): + """Test accumulating usage adds to existing values.""" + initial_usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + total_usage: dict = {"usage": initial_usage} + + delta_usage = LLMUsage( + prompt_tokens=200, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.2"), + completion_tokens=100, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.2"), + total_tokens=300, + total_price=Decimal("0.4"), + currency="USD", + latency=0.5, + ) + + agent_pattern._accumulate_usage(total_usage, delta_usage) + + assert total_usage["usage"].total_tokens == 450 # 150 + 300 + assert total_usage["usage"].prompt_tokens == 300 # 100 + 200 + assert total_usage["usage"].completion_tokens == 150 # 50 + 100 + + def test_accumulate_usage_multiple_rounds(self, agent_pattern): + """Test accumulating usage across multiple rounds.""" + total_usage: dict = {"usage": None} + + # Round 1: 100 tokens + round1_usage = LLMUsage( + prompt_tokens=70, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.07"), + completion_tokens=30, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.06"), + total_tokens=100, + total_price=Decimal("0.13"), + currency="USD", + latency=0.3, + ) + agent_pattern._accumulate_usage(total_usage, round1_usage) + assert total_usage["usage"].total_tokens == 100 + + # Round 2: 150 tokens + round2_usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.4, + ) + agent_pattern._accumulate_usage(total_usage, round2_usage) + assert total_usage["usage"].total_tokens == 250 # 100 + 150 + + # Round 3: 200 tokens + round3_usage = LLMUsage( + prompt_tokens=130, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.13"), + completion_tokens=70, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.14"), + total_tokens=200, + total_price=Decimal("0.27"), + currency="USD", + latency=0.5, + ) + agent_pattern._accumulate_usage(total_usage, round3_usage) + assert total_usage["usage"].total_tokens == 450 # 100 + 150 + 200 + + +class TestCreateLog: + """Tests for _create_log method.""" + + def test_create_log_with_label_and_status(self, agent_pattern): + """Test creating a log with label and status.""" + log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"key": "value"}, + ) + + assert log.label == "ROUND 1" + assert log.log_type == AgentLog.LogType.ROUND + assert log.status == AgentLog.LogStatus.START + assert log.data == {"key": "value"} + assert log.parent_id is None + + def test_create_log_with_parent_id(self, agent_pattern): + """Test creating a log with parent_id.""" + parent_log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + child_log = agent_pattern._create_log( + label="CALL tool", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={}, + parent_id=parent_log.id, + ) + + assert child_log.parent_id == parent_log.id + assert child_log.log_type == AgentLog.LogType.TOOL_CALL + + +class TestFinishLog: + """Tests for _finish_log method.""" + + def test_finish_log_updates_status(self, agent_pattern): + """Test that finish_log updates status to SUCCESS.""" + log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + finished_log = agent_pattern._finish_log(log, data={"result": "done"}) + + assert finished_log.status == AgentLog.LogStatus.SUCCESS + assert finished_log.data == {"result": "done"} + + def test_finish_log_adds_usage_metadata(self, agent_pattern): + """Test that finish_log adds usage to metadata.""" + log = agent_pattern._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + + finished_log = agent_pattern._finish_log(log, usage=usage) + + assert finished_log.metadata[AgentLog.LogMetadata.TOTAL_TOKENS] == 150 + assert finished_log.metadata[AgentLog.LogMetadata.TOTAL_PRICE] == Decimal("0.2") + assert finished_log.metadata[AgentLog.LogMetadata.CURRENCY] == "USD" + assert finished_log.metadata[AgentLog.LogMetadata.LLM_USAGE] == usage + + +class TestFindToolByName: + """Tests for _find_tool_by_name method.""" + + def test_find_existing_tool(self, mock_model_instance, mock_context): + """Test finding an existing tool by name.""" + mock_tool = MagicMock() + mock_tool.entity.identity.name = "test_tool" + + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + found_tool = pattern._find_tool_by_name("test_tool") + assert found_tool == mock_tool + + def test_find_nonexistent_tool_returns_none(self, mock_model_instance, mock_context): + """Test that finding a nonexistent tool returns None.""" + mock_tool = MagicMock() + mock_tool.entity.identity.name = "test_tool" + + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + found_tool = pattern._find_tool_by_name("nonexistent_tool") + assert found_tool is None + + +class TestMaxIterationsCapping: + """Tests for max_iterations capping.""" + + def test_max_iterations_capped_at_99(self, mock_model_instance, mock_context): + """Test that max_iterations is capped at 99.""" + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + max_iterations=150, + ) + + assert pattern.max_iterations == 99 + + def test_max_iterations_not_capped_when_under_99(self, mock_model_instance, mock_context): + """Test that max_iterations is not capped when under 99.""" + pattern = ConcreteAgentPattern( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + max_iterations=50, + ) + + assert pattern.max_iterations == 50 diff --git a/api/tests/unit_tests/core/agent/patterns/test_function_call.py b/api/tests/unit_tests/core/agent/patterns/test_function_call.py new file mode 100644 index 0000000000..6b3600dbbf --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_function_call.py @@ -0,0 +1,332 @@ +"""Tests for FunctionCallStrategy.""" + +from decimal import Decimal +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import AgentLog, ExecutionContext +from core.model_runtime.entities.llm_entities import LLMUsage +from core.model_runtime.entities.message_entities import ( + PromptMessageTool, + SystemPromptMessage, + UserPromptMessage, +) + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +@pytest.fixture +def mock_tool(): + """Create a mock tool.""" + tool = MagicMock() + tool.entity.identity.name = "test_tool" + tool.to_prompt_message_tool.return_value = PromptMessageTool( + name="test_tool", + description="A test tool", + parameters={ + "type": "object", + "properties": {"param1": {"type": "string", "description": "A parameter"}}, + "required": ["param1"], + }, + ) + return tool + + +class TestFunctionCallStrategyInit: + """Tests for FunctionCallStrategy initialization.""" + + def test_initialization(self, mock_model_instance, mock_context, mock_tool): + """Test basic initialization.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + max_iterations=10, + ) + + assert strategy.model_instance == mock_model_instance + assert strategy.context == mock_context + assert strategy.max_iterations == 10 + assert len(strategy.tools) == 1 + + def test_initialization_with_tool_invoke_hook(self, mock_model_instance, mock_context, mock_tool): + """Test initialization with tool_invoke_hook.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + mock_hook = MagicMock() + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + tool_invoke_hook=mock_hook, + ) + + assert strategy.tool_invoke_hook == mock_hook + + +class TestConvertToolsToPromptFormat: + """Tests for _convert_tools_to_prompt_format method.""" + + def test_convert_tools_returns_prompt_message_tools(self, mock_model_instance, mock_context, mock_tool): + """Test that _convert_tools_to_prompt_format returns PromptMessageTool list.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + tools = strategy._convert_tools_to_prompt_format() + + assert len(tools) == 1 + assert isinstance(tools[0], PromptMessageTool) + assert tools[0].name == "test_tool" + + def test_convert_tools_empty_when_no_tools(self, mock_model_instance, mock_context): + """Test that _convert_tools_to_prompt_format returns empty list when no tools.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + tools = strategy._convert_tools_to_prompt_format() + + assert tools == [] + + +class TestAgentLogGeneration: + """Tests for AgentLog generation during run.""" + + def test_round_log_structure(self, mock_model_instance, mock_context, mock_tool): + """Test that round logs have correct structure.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + max_iterations=1, + ) + + # Create a round log + round_log = strategy._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"inputs": {"query": "test"}}, + ) + + assert round_log.label == "ROUND 1" + assert round_log.log_type == AgentLog.LogType.ROUND + assert round_log.status == AgentLog.LogStatus.START + assert "inputs" in round_log.data + + def test_tool_call_log_structure(self, mock_model_instance, mock_context, mock_tool): + """Test that tool call logs have correct structure.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + # Create a parent round log + round_log = strategy._create_log( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={}, + ) + + # Create a tool call log + tool_log = strategy._create_log( + label="CALL test_tool", + log_type=AgentLog.LogType.TOOL_CALL, + status=AgentLog.LogStatus.START, + data={"tool_name": "test_tool", "tool_args": {"param1": "value1"}}, + parent_id=round_log.id, + ) + + assert tool_log.label == "CALL test_tool" + assert tool_log.log_type == AgentLog.LogType.TOOL_CALL + assert tool_log.parent_id == round_log.id + assert tool_log.data["tool_name"] == "test_tool" + + +class TestToolInvocation: + """Tests for tool invocation.""" + + def test_invoke_tool_with_hook(self, mock_model_instance, mock_context, mock_tool): + """Test that tool invocation uses hook when provided.""" + from core.agent.patterns.function_call import FunctionCallStrategy + from core.tools.entities.tool_entities import ToolInvokeMeta + + mock_hook = MagicMock() + mock_meta = ToolInvokeMeta( + time_cost=0.5, + error=None, + tool_config={"tool_provider_type": "test", "tool_provider": "test_id"}, + ) + mock_hook.return_value = ("Tool result", ["file-1"], mock_meta) + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + tool_invoke_hook=mock_hook, + ) + + result, files, meta = strategy._invoke_tool(mock_tool, {"param1": "value"}, "test_tool") + + mock_hook.assert_called_once() + assert result == "Tool result" + assert files == [] # Hook returns file IDs, but _invoke_tool returns empty File list + assert meta == mock_meta + + def test_invoke_tool_without_hook_attribute_set(self, mock_model_instance, mock_context, mock_tool): + """Test that tool_invoke_hook is None when not provided.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + tool_invoke_hook=None, + ) + + # Verify that tool_invoke_hook is None + assert strategy.tool_invoke_hook is None + + +class TestUsageTracking: + """Tests for usage tracking across rounds.""" + + def test_round_usage_is_separate_from_total(self, mock_model_instance, mock_context): + """Test that round usage is tracked separately from total.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + # Simulate two rounds of usage + total_usage: dict = {"usage": None} + round1_usage: dict = {"usage": None} + round2_usage: dict = {"usage": None} + + # Round 1 + usage1 = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + strategy._accumulate_usage(round1_usage, usage1) + strategy._accumulate_usage(total_usage, usage1) + + # Round 2 + usage2 = LLMUsage( + prompt_tokens=200, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.2"), + completion_tokens=100, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.2"), + total_tokens=300, + total_price=Decimal("0.4"), + currency="USD", + latency=0.5, + ) + strategy._accumulate_usage(round2_usage, usage2) + strategy._accumulate_usage(total_usage, usage2) + + # Verify round usage is separate + assert round1_usage["usage"].total_tokens == 150 + assert round2_usage["usage"].total_tokens == 300 + # Verify total is accumulated + assert total_usage["usage"].total_tokens == 450 + + +class TestPromptMessageHandling: + """Tests for prompt message handling.""" + + def test_messages_include_system_and_user(self, mock_model_instance, mock_context, mock_tool): + """Test that messages include system and user prompts.""" + from core.agent.patterns.function_call import FunctionCallStrategy + + strategy = FunctionCallStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + messages = [ + SystemPromptMessage(content="You are a helpful assistant."), + UserPromptMessage(content="Hello"), + ] + + # Just verify the messages can be processed + assert len(messages) == 2 + assert isinstance(messages[0], SystemPromptMessage) + assert isinstance(messages[1], UserPromptMessage) + + def test_assistant_message_with_tool_calls(self, mock_model_instance, mock_context, mock_tool): + """Test that assistant messages can contain tool calls.""" + from core.model_runtime.entities.message_entities import AssistantPromptMessage + + tool_call = AssistantPromptMessage.ToolCall( + id="call_123", + type="function", + function=AssistantPromptMessage.ToolCall.ToolCallFunction( + name="test_tool", + arguments='{"param1": "value1"}', + ), + ) + + assistant_message = AssistantPromptMessage( + content="I'll help you with that.", + tool_calls=[tool_call], + ) + + assert len(assistant_message.tool_calls) == 1 + assert assistant_message.tool_calls[0].function.name == "test_tool" diff --git a/api/tests/unit_tests/core/agent/patterns/test_react.py b/api/tests/unit_tests/core/agent/patterns/test_react.py new file mode 100644 index 0000000000..a942ba6100 --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_react.py @@ -0,0 +1,224 @@ +"""Tests for ReActStrategy.""" + +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import ExecutionContext +from core.agent.patterns.react import ReActStrategy +from core.model_runtime.entities import SystemPromptMessage, UserPromptMessage + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +@pytest.fixture +def mock_tool(): + """Create a mock tool.""" + from core.model_runtime.entities.message_entities import PromptMessageTool + + tool = MagicMock() + tool.entity.identity.name = "test_tool" + tool.entity.identity.provider = "test_provider" + + # Use real PromptMessageTool for proper serialization + prompt_tool = PromptMessageTool( + name="test_tool", + description="A test tool", + parameters={"type": "object", "properties": {}}, + ) + tool.to_prompt_message_tool.return_value = prompt_tool + + return tool + + +class TestReActStrategyInit: + """Tests for ReActStrategy initialization.""" + + def test_init_with_instruction(self, mock_model_instance, mock_context): + """Test that instruction is stored correctly.""" + instruction = "You are a helpful assistant." + + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + instruction=instruction, + ) + + assert strategy.instruction == instruction + + def test_init_with_empty_instruction(self, mock_model_instance, mock_context): + """Test that empty instruction is handled correctly.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + assert strategy.instruction == "" + + +class TestBuildPromptWithReactFormat: + """Tests for _build_prompt_with_react_format method.""" + + def test_replace_tools_placeholder(self, mock_model_instance, mock_context, mock_tool): + """Test that {{tools}} placeholder is replaced.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + system_content = "You have access to: {{tools}}" + messages = [ + SystemPromptMessage(content=system_content), + UserPromptMessage(content="Hello"), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True) + + # The tools placeholder should be replaced with JSON + assert "{{tools}}" not in result[0].content + assert "test_tool" in result[0].content + + def test_replace_tool_names_placeholder(self, mock_model_instance, mock_context, mock_tool): + """Test that {{tool_names}} placeholder is replaced.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[mock_tool], + context=mock_context, + ) + + system_content = "Valid actions: {{tool_names}}" + messages = [ + SystemPromptMessage(content=system_content), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True) + + assert "{{tool_names}}" not in result[0].content + assert '"test_tool"' in result[0].content + + def test_replace_instruction_placeholder(self, mock_model_instance, mock_context): + """Test that {{instruction}} placeholder is replaced.""" + instruction = "You are a helpful coding assistant." + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + instruction=instruction, + ) + + system_content = "{{instruction}}\n\nYou have access to: {{tools}}" + messages = [ + SystemPromptMessage(content=system_content), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True, instruction) + + assert "{{instruction}}" not in result[0].content + assert instruction in result[0].content + + def test_no_tools_available_message(self, mock_model_instance, mock_context): + """Test that 'No tools available' is shown when include_tools is False.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + system_content = "You have access to: {{tools}}" + messages = [ + SystemPromptMessage(content=system_content), + ] + + result = strategy._build_prompt_with_react_format(messages, [], False) + + assert "No tools available" in result[0].content + + def test_scratchpad_appended_as_assistant_message(self, mock_model_instance, mock_context): + """Test that agent scratchpad is appended as AssistantPromptMessage.""" + from core.agent.entities import AgentScratchpadUnit + from core.model_runtime.entities import AssistantPromptMessage + + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + messages = [ + SystemPromptMessage(content="System prompt"), + UserPromptMessage(content="User query"), + ] + + scratchpad = [ + AgentScratchpadUnit( + thought="I need to search for information", + action_str='{"action": "search", "action_input": "query"}', + observation="Search results here", + ) + ] + + result = strategy._build_prompt_with_react_format(messages, scratchpad, True) + + # The last message should be an AssistantPromptMessage with scratchpad content + assert len(result) == 3 + assert isinstance(result[-1], AssistantPromptMessage) + assert "I need to search for information" in result[-1].content + assert "Search results here" in result[-1].content + + def test_empty_scratchpad_no_extra_message(self, mock_model_instance, mock_context): + """Test that empty scratchpad doesn't add extra message.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + messages = [ + SystemPromptMessage(content="System prompt"), + UserPromptMessage(content="User query"), + ] + + result = strategy._build_prompt_with_react_format(messages, [], True) + + # Should only have the original 2 messages + assert len(result) == 2 + + def test_original_messages_not_modified(self, mock_model_instance, mock_context): + """Test that original messages list is not modified.""" + strategy = ReActStrategy( + model_instance=mock_model_instance, + tools=[], + context=mock_context, + ) + + original_content = "Original system prompt {{tools}}" + messages = [ + SystemPromptMessage(content=original_content), + ] + + strategy._build_prompt_with_react_format(messages, [], True) + + # Original message should not be modified + assert messages[0].content == original_content diff --git a/api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py b/api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py new file mode 100644 index 0000000000..07b9df2acf --- /dev/null +++ b/api/tests/unit_tests/core/agent/patterns/test_strategy_factory.py @@ -0,0 +1,203 @@ +"""Tests for StrategyFactory.""" + +from unittest.mock import MagicMock + +import pytest + +from core.agent.entities import AgentEntity, ExecutionContext +from core.agent.patterns.function_call import FunctionCallStrategy +from core.agent.patterns.react import ReActStrategy +from core.agent.patterns.strategy_factory import StrategyFactory +from core.model_runtime.entities.model_entities import ModelFeature + + +@pytest.fixture +def mock_model_instance(): + """Create a mock model instance.""" + model_instance = MagicMock() + model_instance.model = "test-model" + model_instance.provider = "test-provider" + return model_instance + + +@pytest.fixture +def mock_context(): + """Create a mock execution context.""" + return ExecutionContext( + user_id="test-user", + app_id="test-app", + conversation_id="test-conversation", + message_id="test-message", + tenant_id="test-tenant", + ) + + +class TestStrategyFactory: + """Tests for StrategyFactory.create_strategy method.""" + + def test_create_function_call_strategy_with_tool_call_feature(self, mock_model_instance, mock_context): + """Test that FunctionCallStrategy is created when model supports TOOL_CALL.""" + model_features = [ModelFeature.TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_create_function_call_strategy_with_multi_tool_call_feature(self, mock_model_instance, mock_context): + """Test that FunctionCallStrategy is created when model supports MULTI_TOOL_CALL.""" + model_features = [ModelFeature.MULTI_TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_create_function_call_strategy_with_stream_tool_call_feature(self, mock_model_instance, mock_context): + """Test that FunctionCallStrategy is created when model supports STREAM_TOOL_CALL.""" + model_features = [ModelFeature.STREAM_TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_create_react_strategy_without_tool_call_features(self, mock_model_instance, mock_context): + """Test that ReActStrategy is created when model doesn't support tool calling.""" + model_features = [ModelFeature.VISION] # Only vision, no tool calling + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, ReActStrategy) + + def test_create_react_strategy_with_empty_features(self, mock_model_instance, mock_context): + """Test that ReActStrategy is created when model has no features.""" + model_features: list[ModelFeature] = [] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + ) + + assert isinstance(strategy, ReActStrategy) + + def test_explicit_function_calling_strategy_with_support(self, mock_model_instance, mock_context): + """Test explicit FUNCTION_CALLING strategy selection with model support.""" + model_features = [ModelFeature.TOOL_CALL] + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + agent_strategy=AgentEntity.Strategy.FUNCTION_CALLING, + ) + + assert isinstance(strategy, FunctionCallStrategy) + + def test_explicit_function_calling_strategy_without_support_falls_back_to_react( + self, mock_model_instance, mock_context + ): + """Test that explicit FUNCTION_CALLING falls back to ReAct when not supported.""" + model_features: list[ModelFeature] = [] # No tool calling support + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + agent_strategy=AgentEntity.Strategy.FUNCTION_CALLING, + ) + + # Should fall back to ReAct since FC is not supported + assert isinstance(strategy, ReActStrategy) + + def test_explicit_chain_of_thought_strategy(self, mock_model_instance, mock_context): + """Test explicit CHAIN_OF_THOUGHT strategy selection.""" + model_features = [ModelFeature.TOOL_CALL] # Even with tool call support + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + agent_strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT, + ) + + assert isinstance(strategy, ReActStrategy) + + def test_react_strategy_with_instruction(self, mock_model_instance, mock_context): + """Test that ReActStrategy receives instruction parameter.""" + model_features: list[ModelFeature] = [] + instruction = "You are a helpful assistant." + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + instruction=instruction, + ) + + assert isinstance(strategy, ReActStrategy) + assert strategy.instruction == instruction + + def test_max_iterations_passed_to_strategy(self, mock_model_instance, mock_context): + """Test that max_iterations is passed to the strategy.""" + model_features = [ModelFeature.TOOL_CALL] + max_iterations = 5 + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + max_iterations=max_iterations, + ) + + assert strategy.max_iterations == max_iterations + + def test_tool_invoke_hook_passed_to_strategy(self, mock_model_instance, mock_context): + """Test that tool_invoke_hook is passed to the strategy.""" + model_features = [ModelFeature.TOOL_CALL] + mock_hook = MagicMock() + + strategy = StrategyFactory.create_strategy( + model_features=model_features, + model_instance=mock_model_instance, + context=mock_context, + tools=[], + files=[], + tool_invoke_hook=mock_hook, + ) + + assert strategy.tool_invoke_hook == mock_hook diff --git a/api/tests/unit_tests/core/agent/test_agent_app_runner.py b/api/tests/unit_tests/core/agent/test_agent_app_runner.py new file mode 100644 index 0000000000..d9301ccfe0 --- /dev/null +++ b/api/tests/unit_tests/core/agent/test_agent_app_runner.py @@ -0,0 +1,388 @@ +"""Tests for AgentAppRunner.""" + +from decimal import Decimal +from unittest.mock import MagicMock, patch + +import pytest + +from core.agent.entities import AgentEntity, AgentLog, AgentPromptEntity, AgentResult +from core.model_runtime.entities import SystemPromptMessage, UserPromptMessage +from core.model_runtime.entities.llm_entities import LLMUsage + + +class TestOrganizePromptMessages: + """Tests for _organize_prompt_messages method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + # We'll patch the class to avoid complex initialization + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + + # Set up required attributes + runner.config = MagicMock(spec=AgentEntity) + runner.config.strategy = AgentEntity.Strategy.FUNCTION_CALLING + runner.config.prompt = None + + runner.app_config = MagicMock() + runner.app_config.prompt_template = MagicMock() + runner.app_config.prompt_template.simple_prompt_template = "You are a helpful assistant." + + runner.history_prompt_messages = [] + runner.query = "Hello" + runner._current_thoughts = [] + runner.files = [] + runner.model_config = MagicMock() + runner.memory = None + runner.application_generate_entity = MagicMock() + runner.application_generate_entity.file_upload_config = None + + return runner + + def test_function_calling_uses_simple_prompt(self, mock_runner): + """Test that function calling strategy uses simple_prompt_template.""" + mock_runner.config.strategy = AgentEntity.Strategy.FUNCTION_CALLING + + with patch.object(mock_runner, "_init_system_message") as mock_init: + mock_init.return_value = [SystemPromptMessage(content="You are a helpful assistant.")] + with patch.object(mock_runner, "_organize_user_query") as mock_query: + mock_query.return_value = [UserPromptMessage(content="Hello")] + with patch("core.agent.agent_app_runner.AgentHistoryPromptTransform") as mock_transform: + mock_transform.return_value.get_prompt.return_value = [ + SystemPromptMessage(content="You are a helpful assistant.") + ] + + result = mock_runner._organize_prompt_messages() + + # Verify _init_system_message was called with simple_prompt_template + mock_init.assert_called_once() + call_args = mock_init.call_args[0] + assert call_args[0] == "You are a helpful assistant." + + def test_chain_of_thought_uses_agent_prompt(self, mock_runner): + """Test that chain of thought strategy uses agent prompt template.""" + mock_runner.config.strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT + mock_runner.config.prompt = AgentPromptEntity( + first_prompt="ReAct prompt template with {{tools}}", + next_iteration="Continue...", + ) + + with patch.object(mock_runner, "_init_system_message") as mock_init: + mock_init.return_value = [SystemPromptMessage(content="ReAct prompt template with {{tools}}")] + with patch.object(mock_runner, "_organize_user_query") as mock_query: + mock_query.return_value = [UserPromptMessage(content="Hello")] + with patch("core.agent.agent_app_runner.AgentHistoryPromptTransform") as mock_transform: + mock_transform.return_value.get_prompt.return_value = [ + SystemPromptMessage(content="ReAct prompt template with {{tools}}") + ] + + result = mock_runner._organize_prompt_messages() + + # Verify _init_system_message was called with agent prompt + mock_init.assert_called_once() + call_args = mock_init.call_args[0] + assert call_args[0] == "ReAct prompt template with {{tools}}" + + def test_chain_of_thought_without_prompt_falls_back(self, mock_runner): + """Test that chain of thought without prompt falls back to simple_prompt_template.""" + mock_runner.config.strategy = AgentEntity.Strategy.CHAIN_OF_THOUGHT + mock_runner.config.prompt = None + + with patch.object(mock_runner, "_init_system_message") as mock_init: + mock_init.return_value = [SystemPromptMessage(content="You are a helpful assistant.")] + with patch.object(mock_runner, "_organize_user_query") as mock_query: + mock_query.return_value = [UserPromptMessage(content="Hello")] + with patch("core.agent.agent_app_runner.AgentHistoryPromptTransform") as mock_transform: + mock_transform.return_value.get_prompt.return_value = [ + SystemPromptMessage(content="You are a helpful assistant.") + ] + + result = mock_runner._organize_prompt_messages() + + # Verify _init_system_message was called with simple_prompt_template + mock_init.assert_called_once() + call_args = mock_init.call_args[0] + assert call_args[0] == "You are a helpful assistant." + + +class TestInitSystemMessage: + """Tests for _init_system_message method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + return runner + + def test_empty_messages_with_template(self, mock_runner): + """Test that system message is created when messages are empty.""" + result = mock_runner._init_system_message("System template", []) + + assert len(result) == 1 + assert isinstance(result[0], SystemPromptMessage) + assert result[0].content == "System template" + + def test_empty_messages_without_template(self, mock_runner): + """Test that empty list is returned when no template and no messages.""" + result = mock_runner._init_system_message("", []) + + assert result == [] + + def test_existing_system_message_not_duplicated(self, mock_runner): + """Test that system message is not duplicated if already present.""" + existing_messages = [ + SystemPromptMessage(content="Existing system"), + UserPromptMessage(content="User message"), + ] + + result = mock_runner._init_system_message("New template", existing_messages) + + # Should not insert new system message + assert len(result) == 2 + assert result[0].content == "Existing system" + + def test_system_message_inserted_when_missing(self, mock_runner): + """Test that system message is inserted when first message is not system.""" + existing_messages = [ + UserPromptMessage(content="User message"), + ] + + result = mock_runner._init_system_message("System template", existing_messages) + + assert len(result) == 2 + assert isinstance(result[0], SystemPromptMessage) + assert result[0].content == "System template" + + +class TestClearUserPromptImageMessages: + """Tests for _clear_user_prompt_image_messages method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + return runner + + def test_text_content_unchanged(self, mock_runner): + """Test that text content is unchanged.""" + messages = [ + UserPromptMessage(content="Plain text message"), + ] + + result = mock_runner._clear_user_prompt_image_messages(messages) + + assert len(result) == 1 + assert result[0].content == "Plain text message" + + def test_original_messages_not_modified(self, mock_runner): + """Test that original messages are not modified (deep copy).""" + from core.model_runtime.entities.message_entities import ( + ImagePromptMessageContent, + TextPromptMessageContent, + ) + + messages = [ + UserPromptMessage( + content=[ + TextPromptMessageContent(data="Text part"), + ImagePromptMessageContent( + data="http://example.com/image.jpg", + format="url", + mime_type="image/jpeg", + ), + ] + ), + ] + + result = mock_runner._clear_user_prompt_image_messages(messages) + + # Original should still have list content + assert isinstance(messages[0].content, list) + # Result should have string content + assert isinstance(result[0].content, str) + + +class TestToolInvokeHook: + """Tests for _create_tool_invoke_hook method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + + runner.user_id = "test-user" + runner.tenant_id = "test-tenant" + runner.application_generate_entity = MagicMock() + runner.application_generate_entity.trace_manager = None + runner.application_generate_entity.invoke_from = "api" + runner.application_generate_entity.app_config = MagicMock() + runner.application_generate_entity.app_config.app_id = "test-app" + runner.agent_callback = MagicMock() + runner.conversation = MagicMock() + runner.conversation.id = "test-conversation" + runner.queue_manager = MagicMock() + runner._current_message_file_ids = [] + + return runner + + def test_hook_calls_agent_invoke(self, mock_runner): + """Test that the hook calls ToolEngine.agent_invoke.""" + from core.tools.entities.tool_entities import ToolInvokeMeta + + mock_message = MagicMock() + mock_message.id = "test-message" + + mock_tool = MagicMock() + mock_tool_meta = ToolInvokeMeta( + time_cost=0.5, + error=None, + tool_config={ + "tool_provider_type": "test_provider", + "tool_provider": "test_id", + }, + ) + + with patch("core.agent.agent_app_runner.ToolEngine") as mock_engine: + mock_engine.agent_invoke.return_value = ("Tool result", ["file-1", "file-2"], mock_tool_meta) + + hook = mock_runner._create_tool_invoke_hook(mock_message) + result_content, result_files, result_meta = hook(mock_tool, {"arg": "value"}, "test_tool") + + # Verify ToolEngine.agent_invoke was called + mock_engine.agent_invoke.assert_called_once() + + # Verify return values + assert result_content == "Tool result" + assert result_files == ["file-1", "file-2"] + assert result_meta == mock_tool_meta + + def test_hook_publishes_file_events(self, mock_runner): + """Test that the hook publishes QueueMessageFileEvent for files.""" + from core.tools.entities.tool_entities import ToolInvokeMeta + + mock_message = MagicMock() + mock_message.id = "test-message" + + mock_tool = MagicMock() + mock_tool_meta = ToolInvokeMeta( + time_cost=0.5, + error=None, + tool_config={}, + ) + + with patch("core.agent.agent_app_runner.ToolEngine") as mock_engine: + mock_engine.agent_invoke.return_value = ("Tool result", ["file-1", "file-2"], mock_tool_meta) + + hook = mock_runner._create_tool_invoke_hook(mock_message) + hook(mock_tool, {}, "test_tool") + + # Verify file events were published + assert mock_runner.queue_manager.publish.call_count == 2 + assert mock_runner._current_message_file_ids == ["file-1", "file-2"] + + +class TestAgentLogProcessing: + """Tests for AgentLog processing in run method.""" + + def test_agent_log_status_enum(self): + """Test AgentLog status enum values.""" + assert AgentLog.LogStatus.START == "start" + assert AgentLog.LogStatus.SUCCESS == "success" + assert AgentLog.LogStatus.ERROR == "error" + + def test_agent_log_metadata_enum(self): + """Test AgentLog metadata enum values.""" + assert AgentLog.LogMetadata.STARTED_AT == "started_at" + assert AgentLog.LogMetadata.FINISHED_AT == "finished_at" + assert AgentLog.LogMetadata.ELAPSED_TIME == "elapsed_time" + assert AgentLog.LogMetadata.TOTAL_PRICE == "total_price" + assert AgentLog.LogMetadata.TOTAL_TOKENS == "total_tokens" + assert AgentLog.LogMetadata.LLM_USAGE == "llm_usage" + + def test_agent_result_structure(self): + """Test AgentResult structure.""" + usage = LLMUsage( + prompt_tokens=100, + prompt_unit_price=Decimal("0.001"), + prompt_price_unit=Decimal("0.001"), + prompt_price=Decimal("0.1"), + completion_tokens=50, + completion_unit_price=Decimal("0.002"), + completion_price_unit=Decimal("0.001"), + completion_price=Decimal("0.1"), + total_tokens=150, + total_price=Decimal("0.2"), + currency="USD", + latency=0.5, + ) + + result = AgentResult( + text="Final answer", + files=[], + usage=usage, + finish_reason="stop", + ) + + assert result.text == "Final answer" + assert result.files == [] + assert result.usage == usage + assert result.finish_reason == "stop" + + +class TestOrganizeUserQuery: + """Tests for _organize_user_query method.""" + + @pytest.fixture + def mock_runner(self): + """Create a mock AgentAppRunner for testing.""" + with patch("core.agent.agent_app_runner.BaseAgentRunner.__init__", return_value=None): + from core.agent.agent_app_runner import AgentAppRunner + + runner = AgentAppRunner.__new__(AgentAppRunner) + runner.files = [] + runner.application_generate_entity = MagicMock() + runner.application_generate_entity.file_upload_config = None + return runner + + def test_simple_query_without_files(self, mock_runner): + """Test organizing a simple query without files.""" + result = mock_runner._organize_user_query("Hello world", []) + + assert len(result) == 1 + assert isinstance(result[0], UserPromptMessage) + assert result[0].content == "Hello world" + + def test_query_with_files(self, mock_runner): + """Test organizing a query with files.""" + from core.file.models import File + + mock_file = MagicMock(spec=File) + mock_runner.files = [mock_file] + + with patch("core.agent.agent_app_runner.file_manager") as mock_fm: + from core.model_runtime.entities.message_entities import ImagePromptMessageContent + + mock_fm.to_prompt_message_content.return_value = ImagePromptMessageContent( + data="http://example.com/image.jpg", + format="url", + mime_type="image/jpeg", + ) + + result = mock_runner._organize_user_query("Describe this image", []) + + assert len(result) == 1 + assert isinstance(result[0], UserPromptMessage) + assert isinstance(result[0].content, list) + assert len(result[0].content) == 2 # Image + Text diff --git a/api/tests/unit_tests/core/agent/test_entities.py b/api/tests/unit_tests/core/agent/test_entities.py new file mode 100644 index 0000000000..5136f48aab --- /dev/null +++ b/api/tests/unit_tests/core/agent/test_entities.py @@ -0,0 +1,191 @@ +"""Tests for agent entities.""" + +from core.agent.entities import AgentEntity, AgentLog, AgentPromptEntity, AgentScratchpadUnit, ExecutionContext + + +class TestExecutionContext: + """Tests for ExecutionContext entity.""" + + def test_create_with_all_fields(self): + """Test creating ExecutionContext with all fields.""" + context = ExecutionContext( + user_id="user-123", + app_id="app-456", + conversation_id="conv-789", + message_id="msg-012", + tenant_id="tenant-345", + ) + + assert context.user_id == "user-123" + assert context.app_id == "app-456" + assert context.conversation_id == "conv-789" + assert context.message_id == "msg-012" + assert context.tenant_id == "tenant-345" + + def test_create_minimal(self): + """Test creating minimal ExecutionContext.""" + context = ExecutionContext.create_minimal(user_id="user-123") + + assert context.user_id == "user-123" + assert context.app_id is None + assert context.conversation_id is None + assert context.message_id is None + assert context.tenant_id is None + + def test_to_dict(self): + """Test converting ExecutionContext to dictionary.""" + context = ExecutionContext( + user_id="user-123", + app_id="app-456", + conversation_id="conv-789", + message_id="msg-012", + tenant_id="tenant-345", + ) + + result = context.to_dict() + + assert result == { + "user_id": "user-123", + "app_id": "app-456", + "conversation_id": "conv-789", + "message_id": "msg-012", + "tenant_id": "tenant-345", + } + + def test_with_updates(self): + """Test creating new context with updates.""" + original = ExecutionContext( + user_id="user-123", + app_id="app-456", + ) + + updated = original.with_updates(message_id="msg-789") + + # Original should be unchanged + assert original.message_id is None + # Updated should have new value + assert updated.message_id == "msg-789" + assert updated.user_id == "user-123" + assert updated.app_id == "app-456" + + +class TestAgentLog: + """Tests for AgentLog entity.""" + + def test_create_log_with_required_fields(self): + """Test creating AgentLog with required fields.""" + log = AgentLog( + label="ROUND 1", + log_type=AgentLog.LogType.ROUND, + status=AgentLog.LogStatus.START, + data={"key": "value"}, + ) + + assert log.label == "ROUND 1" + assert log.log_type == AgentLog.LogType.ROUND + assert log.status == AgentLog.LogStatus.START + assert log.data == {"key": "value"} + assert log.id is not None # Auto-generated + assert log.parent_id is None + assert log.error is None + + def test_log_type_enum(self): + """Test LogType enum values.""" + assert AgentLog.LogType.ROUND == "round" + assert AgentLog.LogType.THOUGHT == "thought" + assert AgentLog.LogType.TOOL_CALL == "tool_call" + + def test_log_status_enum(self): + """Test LogStatus enum values.""" + assert AgentLog.LogStatus.START == "start" + assert AgentLog.LogStatus.SUCCESS == "success" + assert AgentLog.LogStatus.ERROR == "error" + + def test_log_metadata_enum(self): + """Test LogMetadata enum values.""" + assert AgentLog.LogMetadata.STARTED_AT == "started_at" + assert AgentLog.LogMetadata.FINISHED_AT == "finished_at" + assert AgentLog.LogMetadata.ELAPSED_TIME == "elapsed_time" + assert AgentLog.LogMetadata.TOTAL_PRICE == "total_price" + assert AgentLog.LogMetadata.TOTAL_TOKENS == "total_tokens" + assert AgentLog.LogMetadata.LLM_USAGE == "llm_usage" + + +class TestAgentScratchpadUnit: + """Tests for AgentScratchpadUnit entity.""" + + def test_is_final_with_final_answer_action(self): + """Test is_final returns True for Final Answer action.""" + unit = AgentScratchpadUnit( + thought="I know the answer", + action=AgentScratchpadUnit.Action( + action_name="Final Answer", + action_input="The answer is 42", + ), + ) + + assert unit.is_final() is True + + def test_is_final_with_tool_action(self): + """Test is_final returns False for tool action.""" + unit = AgentScratchpadUnit( + thought="I need to search", + action=AgentScratchpadUnit.Action( + action_name="search", + action_input={"query": "test"}, + ), + ) + + assert unit.is_final() is False + + def test_is_final_with_no_action(self): + """Test is_final returns True when no action.""" + unit = AgentScratchpadUnit( + thought="Just thinking", + ) + + assert unit.is_final() is True + + def test_action_to_dict(self): + """Test Action.to_dict method.""" + action = AgentScratchpadUnit.Action( + action_name="search", + action_input={"query": "test"}, + ) + + result = action.to_dict() + + assert result == { + "action": "search", + "action_input": {"query": "test"}, + } + + +class TestAgentEntity: + """Tests for AgentEntity.""" + + def test_strategy_enum(self): + """Test Strategy enum values.""" + assert AgentEntity.Strategy.CHAIN_OF_THOUGHT == "chain-of-thought" + assert AgentEntity.Strategy.FUNCTION_CALLING == "function-calling" + + def test_create_with_prompt(self): + """Test creating AgentEntity with prompt.""" + prompt = AgentPromptEntity( + first_prompt="You are a helpful assistant.", + next_iteration="Continue thinking...", + ) + + entity = AgentEntity( + provider="openai", + model="gpt-4", + strategy=AgentEntity.Strategy.CHAIN_OF_THOUGHT, + prompt=prompt, + max_iteration=5, + ) + + assert entity.provider == "openai" + assert entity.model == "gpt-4" + assert entity.strategy == AgentEntity.Strategy.CHAIN_OF_THOUGHT + assert entity.prompt == prompt + assert entity.max_iteration == 5 diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py new file mode 100644 index 0000000000..388496ce1d --- /dev/null +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -0,0 +1,169 @@ +"""Tests for ResponseStreamCoordinator object field streaming.""" + +from unittest.mock import MagicMock + +from core.workflow.enums import NodeType +from core.workflow.graph import Graph +from core.workflow.graph_engine.response_coordinator.coordinator import ResponseStreamCoordinator +from core.workflow.graph_events import ChunkType, NodeRunStreamChunkEvent +from core.workflow.nodes.base.entities import BaseNodeData +from core.workflow.runtime import VariablePool + + +class TestResponseCoordinatorObjectStreaming: + """Test streaming of object-type variables with child fields.""" + + def test_object_field_streaming(self): + """Test that when selecting an object variable, all child field streams are forwarded.""" + # Create mock graph and variable pool + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + # Mock nodes + llm_node = MagicMock() + llm_node.id = "llm_node" + llm_node.node_type = NodeType.LLM + llm_node.execution_type = MagicMock() + llm_node.blocks_variable_output = MagicMock(return_value=False) + + response_node = MagicMock() + response_node.id = "response_node" + response_node.node_type = NodeType.ANSWER + response_node.execution_type = MagicMock() + response_node.blocks_variable_output = MagicMock(return_value=False) + + # Mock template for response node + response_node.node_data = MagicMock(spec=BaseNodeData) + response_node.node_data.answer = "{{#llm_node.generation#}}" + + graph.nodes = { + "llm_node": llm_node, + "response_node": response_node, + } + graph.root_node = llm_node + graph.get_outgoing_edges = MagicMock(return_value=[]) + + # Create coordinator + coordinator = ResponseStreamCoordinator(variable_pool, graph) + + # Track execution + coordinator.track_node_execution("llm_node", "exec_123") + coordinator.track_node_execution("response_node", "exec_456") + + # Simulate streaming events for child fields of generation object + # 1. Content stream + content_event_1 = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "content"], + chunk="Hello", + is_final=False, + chunk_type=ChunkType.TEXT, + ) + content_event_2 = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "content"], + chunk=" world", + is_final=True, + chunk_type=ChunkType.TEXT, + ) + + # 2. Tool call stream + tool_call_event = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "tool_calls"], + chunk='{"query": "test"}', + is_final=True, + chunk_type=ChunkType.TOOL_CALL, + tool_call_id="call_123", + tool_name="search", + tool_arguments='{"query": "test"}', + ) + + # 3. Tool result stream + tool_result_event = NodeRunStreamChunkEvent( + id="exec_123", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["llm_node", "generation", "tool_results"], + chunk="Found 10 results", + is_final=True, + chunk_type=ChunkType.TOOL_RESULT, + tool_call_id="call_123", + tool_name="search", + tool_files=[], + tool_error=None, + ) + + # Intercept these events + coordinator.intercept_event(content_event_1) + coordinator.intercept_event(tool_call_event) + coordinator.intercept_event(tool_result_event) + coordinator.intercept_event(content_event_2) + + # Verify that all child streams are buffered + assert ("llm_node", "generation", "content") in coordinator._stream_buffers + assert ("llm_node", "generation", "tool_calls") in coordinator._stream_buffers + assert ("llm_node", "generation", "tool_results") in coordinator._stream_buffers + + # Verify we can find child streams + child_streams = coordinator._find_child_streams(["llm_node", "generation"]) + assert len(child_streams) == 3 + assert ("llm_node", "generation", "content") in child_streams + assert ("llm_node", "generation", "tool_calls") in child_streams + assert ("llm_node", "generation", "tool_results") in child_streams + + def test_find_child_streams(self): + """Test the _find_child_streams method.""" + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + coordinator = ResponseStreamCoordinator(variable_pool, graph) + + # Add some mock streams + coordinator._stream_buffers = { + ("node1", "generation", "content"): [], + ("node1", "generation", "tool_calls"): [], + ("node1", "generation", "thought"): [], + ("node1", "text"): [], # Not a child of generation + ("node2", "generation", "content"): [], # Different node + } + + # Find children of node1.generation + children = coordinator._find_child_streams(["node1", "generation"]) + + assert len(children) == 3 + assert ("node1", "generation", "content") in children + assert ("node1", "generation", "tool_calls") in children + assert ("node1", "generation", "thought") in children + assert ("node1", "text") not in children + assert ("node2", "generation", "content") not in children + + def test_find_child_streams_with_closed_streams(self): + """Test that _find_child_streams also considers closed streams.""" + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + coordinator = ResponseStreamCoordinator(variable_pool, graph) + + # Add some streams - some buffered, some closed + coordinator._stream_buffers = { + ("node1", "generation", "content"): [], + } + coordinator._closed_streams = { + ("node1", "generation", "tool_calls"), + ("node1", "generation", "thought"), + } + + # Should find all children regardless of whether they're in buffers or closed + children = coordinator._find_child_streams(["node1", "generation"]) + + assert len(children) == 3 + assert ("node1", "generation", "content") in children + assert ("node1", "generation", "tool_calls") in children + assert ("node1", "generation", "thought") in children diff --git a/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py new file mode 100644 index 0000000000..498d43905e --- /dev/null +++ b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py @@ -0,0 +1,336 @@ +"""Tests for StreamChunkEvent and its subclasses.""" + +from core.workflow.node_events import ( + ChunkType, + StreamChunkEvent, + ThoughtChunkEvent, + ToolCallChunkEvent, + ToolResultChunkEvent, +) + + +class TestChunkType: + """Tests for ChunkType enum.""" + + def test_chunk_type_values(self): + """Test that ChunkType has expected values.""" + assert ChunkType.TEXT == "text" + assert ChunkType.TOOL_CALL == "tool_call" + assert ChunkType.TOOL_RESULT == "tool_result" + assert ChunkType.THOUGHT == "thought" + + def test_chunk_type_is_str_enum(self): + """Test that ChunkType values are strings.""" + for chunk_type in ChunkType: + assert isinstance(chunk_type.value, str) + + +class TestStreamChunkEvent: + """Tests for base StreamChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating StreamChunkEvent with required fields.""" + event = StreamChunkEvent( + selector=["node1", "text"], + chunk="Hello", + ) + + assert event.selector == ["node1", "text"] + assert event.chunk == "Hello" + assert event.is_final is False + assert event.chunk_type == ChunkType.TEXT + + def test_create_with_all_fields(self): + """Test creating StreamChunkEvent with all fields.""" + event = StreamChunkEvent( + selector=["node1", "output"], + chunk="World", + is_final=True, + chunk_type=ChunkType.TEXT, + ) + + assert event.selector == ["node1", "output"] + assert event.chunk == "World" + assert event.is_final is True + assert event.chunk_type == ChunkType.TEXT + + def test_default_chunk_type_is_text(self): + """Test that default chunk_type is TEXT.""" + event = StreamChunkEvent( + selector=["node1", "text"], + chunk="test", + ) + + assert event.chunk_type == ChunkType.TEXT + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = StreamChunkEvent( + selector=["node1", "text"], + chunk="Hello", + is_final=True, + ) + + data = event.model_dump() + + assert data["selector"] == ["node1", "text"] + assert data["chunk"] == "Hello" + assert data["is_final"] is True + assert data["chunk_type"] == "text" + + +class TestToolCallChunkEvent: + """Tests for ToolCallChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating ToolCallChunkEvent with required fields.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk='{"city": "Beijing"}', + tool_call_id="call_123", + tool_name="weather", + ) + + assert event.selector == ["node1", "tool_calls"] + assert event.chunk == '{"city": "Beijing"}' + assert event.tool_call_id == "call_123" + assert event.tool_name == "weather" + assert event.chunk_type == ChunkType.TOOL_CALL + + def test_chunk_type_is_tool_call(self): + """Test that chunk_type is always TOOL_CALL.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk="", + tool_call_id="call_123", + tool_name="test_tool", + ) + + assert event.chunk_type == ChunkType.TOOL_CALL + + def test_tool_arguments_field(self): + """Test tool_arguments field.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk='{"param": "value"}', + tool_call_id="call_123", + tool_name="test_tool", + tool_arguments='{"param": "value"}', + ) + + assert event.tool_arguments == '{"param": "value"}' + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk='{"city": "Beijing"}', + tool_call_id="call_123", + tool_name="weather", + tool_arguments='{"city": "Beijing"}', + is_final=True, + ) + + data = event.model_dump() + + assert data["chunk_type"] == "tool_call" + assert data["tool_call_id"] == "call_123" + assert data["tool_name"] == "weather" + assert data["tool_arguments"] == '{"city": "Beijing"}' + assert data["is_final"] is True + + +class TestToolResultChunkEvent: + """Tests for ToolResultChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating ToolResultChunkEvent with required fields.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="Weather: Sunny, 25°C", + tool_call_id="call_123", + tool_name="weather", + ) + + assert event.selector == ["node1", "tool_results"] + assert event.chunk == "Weather: Sunny, 25°C" + assert event.tool_call_id == "call_123" + assert event.tool_name == "weather" + assert event.chunk_type == ChunkType.TOOL_RESULT + + def test_chunk_type_is_tool_result(self): + """Test that chunk_type is always TOOL_RESULT.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_call_id="call_123", + tool_name="test_tool", + ) + + assert event.chunk_type == ChunkType.TOOL_RESULT + + def test_tool_files_default_empty(self): + """Test that tool_files defaults to empty list.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_call_id="call_123", + tool_name="test_tool", + ) + + assert event.tool_files == [] + + def test_tool_files_with_values(self): + """Test tool_files with file IDs.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_call_id="call_123", + tool_name="test_tool", + tool_files=["file_1", "file_2"], + ) + + assert event.tool_files == ["file_1", "file_2"] + + def test_tool_error_field(self): + """Test tool_error field.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="", + tool_call_id="call_123", + tool_name="test_tool", + tool_error="Tool execution failed", + ) + + assert event.tool_error == "Tool execution failed" + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="Weather: Sunny", + tool_call_id="call_123", + tool_name="weather", + tool_files=["file_1"], + tool_error=None, + is_final=True, + ) + + data = event.model_dump() + + assert data["chunk_type"] == "tool_result" + assert data["tool_call_id"] == "call_123" + assert data["tool_name"] == "weather" + assert data["tool_files"] == ["file_1"] + assert data["tool_error"] is None + assert data["is_final"] is True + + +class TestThoughtChunkEvent: + """Tests for ThoughtChunkEvent.""" + + def test_create_with_required_fields(self): + """Test creating ThoughtChunkEvent with required fields.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="I need to query the weather...", + ) + + assert event.selector == ["node1", "thought"] + assert event.chunk == "I need to query the weather..." + assert event.chunk_type == ChunkType.THOUGHT + assert event.round_index == 1 # default + + def test_chunk_type_is_thought(self): + """Test that chunk_type is always THOUGHT.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="thinking...", + ) + + assert event.chunk_type == ChunkType.THOUGHT + + def test_round_index_default(self): + """Test that round_index defaults to 1.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="thinking...", + ) + + assert event.round_index == 1 + + def test_round_index_custom(self): + """Test custom round_index.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="Second round thinking...", + round_index=2, + ) + + assert event.round_index == 2 + + def test_serialization(self): + """Test that event can be serialized to dict.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="I need to analyze this...", + round_index=3, + is_final=False, + ) + + data = event.model_dump() + + assert data["chunk_type"] == "thought" + assert data["round_index"] == 3 + assert data["chunk"] == "I need to analyze this..." + assert data["is_final"] is False + + +class TestEventInheritance: + """Tests for event inheritance relationships.""" + + def test_tool_call_is_stream_chunk(self): + """Test that ToolCallChunkEvent is a StreamChunkEvent.""" + event = ToolCallChunkEvent( + selector=["node1", "tool_calls"], + chunk="", + tool_call_id="call_123", + tool_name="test", + ) + + assert isinstance(event, StreamChunkEvent) + + def test_tool_result_is_stream_chunk(self): + """Test that ToolResultChunkEvent is a StreamChunkEvent.""" + event = ToolResultChunkEvent( + selector=["node1", "tool_results"], + chunk="result", + tool_call_id="call_123", + tool_name="test", + ) + + assert isinstance(event, StreamChunkEvent) + + def test_thought_is_stream_chunk(self): + """Test that ThoughtChunkEvent is a StreamChunkEvent.""" + event = ThoughtChunkEvent( + selector=["node1", "thought"], + chunk="thinking...", + ) + + assert isinstance(event, StreamChunkEvent) + + def test_all_events_have_common_fields(self): + """Test that all events have common StreamChunkEvent fields.""" + events = [ + StreamChunkEvent(selector=["n", "t"], chunk="a"), + ToolCallChunkEvent(selector=["n", "t"], chunk="b", tool_call_id="1", tool_name="t"), + ToolResultChunkEvent(selector=["n", "t"], chunk="c", tool_call_id="1", tool_name="t"), + ThoughtChunkEvent(selector=["n", "t"], chunk="d"), + ] + + for event in events: + assert hasattr(event, "selector") + assert hasattr(event, "chunk") + assert hasattr(event, "is_final") + assert hasattr(event, "chunk_type") diff --git a/web/app/components/workflow/constants.ts b/web/app/components/workflow/constants.ts index ad498ff65b..206f39312a 100644 --- a/web/app/components/workflow/constants.ts +++ b/web/app/components/workflow/constants.ts @@ -131,6 +131,10 @@ export const LLM_OUTPUT_STRUCT: Var[] = [ variable: 'usage', type: VarType.object, }, + { + variable: 'generation', + type: VarType.object, + }, ] export const KNOWLEDGE_RETRIEVAL_OUTPUT_STRUCT: Var[] = [ diff --git a/web/app/components/workflow/nodes/agent/components/tool-icon.tsx b/web/app/components/workflow/nodes/agent/components/tool-icon.tsx index 8e6993a78d..6cc00d91ee 100644 --- a/web/app/components/workflow/nodes/agent/components/tool-icon.tsx +++ b/web/app/components/workflow/nodes/agent/components/tool-icon.tsx @@ -29,9 +29,9 @@ export const ToolIcon = memo(({ providerName }: ToolIconProps) => { }) }, [buildInTools, customTools, providerName, workflowTools, mcpTools]) - const providerNameParts = providerName.split('/') - const author = providerNameParts[0] - const name = providerNameParts[1] + const providerNameParts = providerName ? providerName.split('/') : [] + const author = providerNameParts[0] || '' + const name = providerNameParts[1] || providerName || '' const icon = useMemo(() => { if (!isDataReady) return '' if (currentProvider) return currentProvider.icon diff --git a/web/app/components/workflow/nodes/llm/components/tools-config.tsx b/web/app/components/workflow/nodes/llm/components/tools-config.tsx new file mode 100644 index 0000000000..147ff8cd49 --- /dev/null +++ b/web/app/components/workflow/nodes/llm/components/tools-config.tsx @@ -0,0 +1,58 @@ +import type { FC } from 'react' +import { memo } from 'react' +import { useTranslation } from 'react-i18next' +import MultipleToolSelector from '@/app/components/plugins/plugin-detail-panel/multiple-tool-selector' +import type { NodeOutPutVar } from '@/app/components/workflow/types' +import type { ToolValue } from '@/app/components/workflow/block-selector/types' +import type { Node } from 'reactflow' +import Field from '@/app/components/workflow/nodes/_base/components/field' +import { RiHammerLine } from '@remixicon/react' + +type Props = { + tools?: ToolValue[] + onChange: (tools: ToolValue[]) => void + readonly?: boolean + nodeId?: string + availableVars?: NodeOutPutVar[] + availableNodes?: Node[] +} + +const ToolsConfig: FC = ({ + tools = [], + onChange, + readonly = false, + nodeId = '', + availableVars = [], + availableNodes = [], +}) => { + const { t } = useTranslation() + + return ( + + + {t('workflow.nodes.llm.tools')} + + } + operations={ +
+ {t('workflow.nodes.llm.toolsCount', { count: tools.length })} +
+ } + > + +
+ ) +} + +export default memo(ToolsConfig) diff --git a/web/app/components/workflow/nodes/llm/constants.ts b/web/app/components/workflow/nodes/llm/constants.ts new file mode 100644 index 0000000000..e733ca72c6 --- /dev/null +++ b/web/app/components/workflow/nodes/llm/constants.ts @@ -0,0 +1,41 @@ +// ReAct prompt template for models that don't support tool_call or stream_tool_call +export const REACT_PROMPT_TEMPLATE = `Respond to the human as helpfully and accurately as possible. + +{{instruction}} + +You have access to the following tools: + +{{tools}} + +Use a json blob to specify a tool by providing an action key (tool name) and an action_input key (tool input). +Valid "action" values: "Final Answer" or {{tool_names}} + +Provide only ONE action per $JSON_BLOB, as shown: + +\`\`\` +{ + "action": $TOOL_NAME, + "action_input": $ACTION_INPUT +} +\`\`\` + +Follow this format: + +Question: input question to answer +Thought: consider previous and subsequent steps +Action: +\`\`\` +$JSON_BLOB +\`\`\` +Observation: action result +... (repeat Thought/Action/Observation N times) +Thought: I know what to respond +Action: +\`\`\` +{ + "action": "Final Answer", + "action_input": "Final response to human" +} +\`\`\` + +Begin! Reminder to ALWAYS respond with a valid json blob of a single action. Use tools if necessary. Respond directly if appropriate. Format is Action:\`\`\`$JSON_BLOB\`\`\`then Observation:.` diff --git a/web/app/components/workflow/nodes/llm/default.ts b/web/app/components/workflow/nodes/llm/default.ts index 57033d26a1..e225837d12 100644 --- a/web/app/components/workflow/nodes/llm/default.ts +++ b/web/app/components/workflow/nodes/llm/default.ts @@ -53,6 +53,7 @@ const nodeDefault: NodeDefault = { vision: { enabled: false, }, + tools: [], }, defaultRunInputData: { '#context#': [RETRIEVAL_OUTPUT_STRUCT], diff --git a/web/app/components/workflow/nodes/llm/node.tsx b/web/app/components/workflow/nodes/llm/node.tsx index ce676ba984..0f0b5bf390 100644 --- a/web/app/components/workflow/nodes/llm/node.tsx +++ b/web/app/components/workflow/nodes/llm/node.tsx @@ -1,26 +1,46 @@ import type { FC } from 'react' -import React from 'react' +import React, { useMemo } from 'react' import type { LLMNodeType } from './types' import { useTextGenerationCurrentProviderAndModelAndModelList, } from '@/app/components/header/account-setting/model-provider-page/hooks' import ModelSelector from '@/app/components/header/account-setting/model-provider-page/model-selector' import type { NodeProps } from '@/app/components/workflow/types' +import { Group, GroupLabel } from '../_base/components/group' +import { ToolIcon } from '../agent/components/tool-icon' +import useConfig from './use-config' +import { useTranslation } from 'react-i18next' const Node: FC> = ({ + id, data, }) => { + const { t } = useTranslation() + const { inputs } = useConfig(id, data) const { provider, name: modelId } = data.model || {} const { textGenerationModelList, } = useTextGenerationCurrentProviderAndModelAndModelList() const hasSetModel = provider && modelId + // Extract tools information + const tools = useMemo(() => { + if (!inputs.tools || inputs.tools.length === 0) + return [] + + // For LLM Node, tools is ToolValue[] + // Each tool has provider_name which is the unique identifier + return inputs.tools.map((tool, index) => ({ + id: `tool-${index}`, + providerName: tool.provider_name, + })) + }, [inputs.tools]) + if (!hasSetModel) return null return ( -
+
{hasSetModel && ( > = ({ readonly /> )} + + {/* Tools display */} + {tools.length > 0 && ( + + {t('workflow.nodes.llm.tools')} + + } + > +
+ {tools.map(tool => ( + + ))} +
+
+ )}
) } diff --git a/web/app/components/workflow/nodes/llm/panel.tsx b/web/app/components/workflow/nodes/llm/panel.tsx index bb893b0da7..b39a4ea373 100644 --- a/web/app/components/workflow/nodes/llm/panel.tsx +++ b/web/app/components/workflow/nodes/llm/panel.tsx @@ -18,6 +18,7 @@ import Tooltip from '@/app/components/base/tooltip' import Editor from '@/app/components/workflow/nodes/_base/components/prompt/editor' import StructureOutput from './components/structure-output' import ReasoningFormatConfig from './components/reasoning-format-config' +import ToolsConfig from './components/tools-config' import Switch from '@/app/components/base/switch' import { RiAlertFill, RiQuestionLine } from '@remixicon/react' import { fetchAndMergeValidCompletionParams } from '@/utils/completion-params' @@ -57,12 +58,14 @@ const Panel: FC> = ({ handleVisionResolutionEnabledChange, handleVisionResolutionChange, isModelSupportStructuredOutput, + isModelSupportToolCall, structuredOutputCollapsed, setStructuredOutputCollapsed, handleStructureOutputEnableChange, handleStructureOutputChange, filterJinja2InputVar, handleReasoningFormatChange, + handleToolsChange, } = useConfig(id, data) const model = inputs.model @@ -241,6 +244,26 @@ const Panel: FC> = ({ onConfigChange={handleVisionResolutionChange} /> + {/* Tools configuration */} + + + {/* Show warning when model doesn't support tool call but tools are selected */} + {inputs.tools && inputs.tools.length > 0 && !isModelSupportToolCall && isChatModel && ( +
+ +
+ {t('workflow.nodes.llm.toolsNotSupportedWarning')} +
+
+ )} + {/* Reasoning Format */} > = ({ type='object' description={t(`${i18nPrefix}.outputVars.usage`)} /> + {inputs.structured_output_enabled && ( <> diff --git a/web/app/components/workflow/nodes/llm/types.ts b/web/app/components/workflow/nodes/llm/types.ts index 70dc4d9cc7..6bc3508cd0 100644 --- a/web/app/components/workflow/nodes/llm/types.ts +++ b/web/app/components/workflow/nodes/llm/types.ts @@ -1,4 +1,5 @@ import type { CommonNodeType, Memory, ModelConfig, PromptItem, ValueSelector, Variable, VisionSetting } from '@/app/components/workflow/types' +import type { ToolValue } from '@/app/components/workflow/block-selector/types' export type LLMNodeType = CommonNodeType & { model: ModelConfig @@ -18,6 +19,7 @@ export type LLMNodeType = CommonNodeType & { structured_output_enabled?: boolean structured_output?: StructuredOutput reasoning_format?: 'tagged' | 'separated' + tools?: ToolValue[] } export enum Type { diff --git a/web/app/components/workflow/nodes/llm/use-config.ts b/web/app/components/workflow/nodes/llm/use-config.ts index d9b811bb85..55bc65a4d4 100644 --- a/web/app/components/workflow/nodes/llm/use-config.ts +++ b/web/app/components/workflow/nodes/llm/use-config.ts @@ -1,7 +1,8 @@ import { useCallback, useEffect, useRef, useState } from 'react' +import { EditionType, PromptRole, VarType } from '../../types' import { produce } from 'immer' -import { EditionType, VarType } from '../../types' import type { Memory, PromptItem, ValueSelector, Var, Variable } from '../../types' +import type { ToolValue } from '../../block-selector/types' import { useStore } from '../../store' import { useIsChatMode, @@ -18,6 +19,7 @@ import { import useNodeCrud from '@/app/components/workflow/nodes/_base/hooks/use-node-crud' import { checkHasContextBlock, checkHasHistoryBlock, checkHasQueryBlock } from '@/app/components/base/prompt-editor/constants' import useInspectVarsCrud from '@/app/components/workflow/hooks/use-inspect-vars-crud' +import { REACT_PROMPT_TEMPLATE } from './constants' import { AppModeEnum } from '@/types/app' const useConfig = (id: string, payload: LLMNodeType) => { @@ -250,7 +252,7 @@ const useConfig = (id: string, payload: LLMNodeType) => { }, [setInputs]) const handlePromptChange = useCallback((newPrompt: PromptItem[] | PromptItem) => { - const newInputs = produce(inputRef.current, (draft) => { + const newInputs = produce(inputs, (draft) => { draft.prompt_template = newPrompt }) setInputs(newInputs) @@ -283,10 +285,13 @@ const useConfig = (id: string, payload: LLMNodeType) => { // structure output const { data: modelList } = useModelList(ModelTypeEnum.textGeneration) - const isModelSupportStructuredOutput = modelList + const currentModelFeatures = modelList ?.find(provideItem => provideItem.provider === model?.provider) ?.models.find(modelItem => modelItem.model === model?.name) - ?.features?.includes(ModelFeatureEnum.StructuredOutput) + ?.features || [] + + const isModelSupportStructuredOutput = currentModelFeatures.includes(ModelFeatureEnum.StructuredOutput) + const isModelSupportToolCall = currentModelFeatures.includes(ModelFeatureEnum.toolCall) || currentModelFeatures.includes(ModelFeatureEnum.streamToolCall) const [structuredOutputCollapsed, setStructuredOutputCollapsed] = useState(true) const handleStructureOutputEnableChange = useCallback((enabled: boolean) => { @@ -327,6 +332,91 @@ const useConfig = (id: string, payload: LLMNodeType) => { setInputs(newInputs) }, [setInputs]) + const handleToolsChange = useCallback((tools: ToolValue[]) => { + const newInputs = produce(inputs, (draft) => { + draft.tools = tools + }) + setInputs(newInputs) + }, [inputs, setInputs]) + + // Auto-manage ReAct prompt based on model support and tool selection + useEffect(() => { + if (!isChatModel) return + + // Add a small delay to ensure all state updates have settled + const timeoutId = setTimeout(() => { + const promptTemplate = inputs.prompt_template as PromptItem[] + const systemPromptIndex = promptTemplate.findIndex(item => item.role === 'system') + + const shouldHaveReactPrompt = inputs.tools && inputs.tools.length > 0 && !isModelSupportToolCall + + if (shouldHaveReactPrompt) { + // Should have ReAct prompt + let needsAdd = false + if (systemPromptIndex >= 0) { + const currentSystemPrompt = promptTemplate[systemPromptIndex].text + // Check if ReAct prompt is already present by looking for key phrases + needsAdd = !currentSystemPrompt.includes('{{tools}}') && !currentSystemPrompt.includes('{{tool_names}}') + } + else { + needsAdd = true + } + + if (needsAdd) { + const newInputs = produce(inputs, (draft) => { + const draftPromptTemplate = draft.prompt_template as PromptItem[] + const sysPromptIdx = draftPromptTemplate.findIndex(item => item.role === 'system') + + if (sysPromptIdx >= 0) { + // Append ReAct prompt to existing system prompt + draftPromptTemplate[sysPromptIdx].text + = `${draftPromptTemplate[sysPromptIdx].text}\n\n${REACT_PROMPT_TEMPLATE}` + } + else { + // Create new system prompt with ReAct template + draftPromptTemplate.unshift({ + role: PromptRole.system, + text: REACT_PROMPT_TEMPLATE, + }) + } + }) + setInputs(newInputs) + } + } + else { + // Should NOT have ReAct prompt - remove it if present + if (systemPromptIndex >= 0) { + const currentSystemPrompt = promptTemplate[systemPromptIndex].text + const hasReactPrompt = currentSystemPrompt.includes('{{tools}}') || currentSystemPrompt.includes('{{tool_names}}') + + if (hasReactPrompt) { + const newInputs = produce(inputs, (draft) => { + const draftPromptTemplate = draft.prompt_template as PromptItem[] + const sysPromptIdx = draftPromptTemplate.findIndex(item => item.role === 'system') + + if (sysPromptIdx >= 0) { + // Remove ReAct prompt from system prompt + let cleanedText = draftPromptTemplate[sysPromptIdx].text + // Remove the ReAct template + cleanedText = cleanedText.replace(`\n\n${REACT_PROMPT_TEMPLATE}`, '') + cleanedText = cleanedText.replace(REACT_PROMPT_TEMPLATE, '') + + // If system prompt is now empty, remove it entirely + if (cleanedText.trim() === '') + draftPromptTemplate.splice(sysPromptIdx, 1) + else + draftPromptTemplate[sysPromptIdx].text = cleanedText.trim() + } + }) + setInputs(newInputs) + } + } + } + }, 100) // Small delay to let other state updates settle + + return () => clearTimeout(timeoutId) + }, [inputs.tools?.length, isModelSupportToolCall, isChatModel, setInputs]) + const { availableVars, availableNodesWithParent, @@ -362,12 +452,14 @@ const useConfig = (id: string, payload: LLMNodeType) => { handleVisionResolutionEnabledChange, handleVisionResolutionChange, isModelSupportStructuredOutput, + isModelSupportToolCall, handleStructureOutputChange, structuredOutputCollapsed, setStructuredOutputCollapsed, handleStructureOutputEnableChange, filterJinja2InputVar, handleReasoningFormatChange, + handleToolsChange, } } diff --git a/web/app/components/workflow/run/agent-log/agent-log-trigger.tsx b/web/app/components/workflow/run/agent-log/agent-log-trigger.tsx index 85b37d72d6..8376073d9e 100644 --- a/web/app/components/workflow/run/agent-log/agent-log-trigger.tsx +++ b/web/app/components/workflow/run/agent-log/agent-log-trigger.tsx @@ -4,6 +4,7 @@ import type { AgentLogItemWithChildren, NodeTracing, } from '@/types/workflow' +import { BlockEnum } from '@/app/components/workflow/types' type AgentLogTriggerProps = { nodeInfo: NodeTracing @@ -14,9 +15,13 @@ const AgentLogTrigger = ({ onShowAgentOrToolLog, }: AgentLogTriggerProps) => { const { t } = useTranslation() - const { agentLog, execution_metadata } = nodeInfo + const { agentLog, execution_metadata, node_type } = nodeInfo const agentStrategy = execution_metadata?.tool_info?.agent_strategy + // For LLM node, show different label + const isLLMNode = node_type === BlockEnum.LLM + const label = isLLMNode ? t('workflow.nodes.llm.tools').toUpperCase() : t('workflow.nodes.agent.strategy.label') + return (
- {t('workflow.nodes.agent.strategy.label')} + {label}
{ - agentStrategy && ( + !isLLMNode && agentStrategy && (
{agentStrategy}
diff --git a/web/app/components/workflow/run/node.tsx b/web/app/components/workflow/run/node.tsx index 33124907f3..485426b3ea 100644 --- a/web/app/components/workflow/run/node.tsx +++ b/web/app/components/workflow/run/node.tsx @@ -96,6 +96,7 @@ const NodePanel: FC = ({ const isRetryNode = hasRetryNode(nodeInfo.node_type) && !!nodeInfo.retryDetail?.length const isAgentNode = nodeInfo.node_type === BlockEnum.Agent && !!nodeInfo.agentLog?.length const isToolNode = nodeInfo.node_type === BlockEnum.Tool && !!nodeInfo.agentLog?.length + const isLLMNode = nodeInfo.node_type === BlockEnum.LLM && !!nodeInfo.agentLog?.length const inputsTitle = useMemo(() => { let text = t('workflow.common.input') @@ -188,7 +189,7 @@ const NodePanel: FC = ({ /> )} { - (isAgentNode || isToolNode) && onShowAgentOrToolLog && ( + (isAgentNode || isToolNode || isLLMNode) && onShowAgentOrToolLog && ( = ({ const isRetryNode = hasRetryNode(nodeInfo?.node_type) && !!nodeInfo?.retryDetail?.length const isAgentNode = nodeInfo?.node_type === BlockEnum.Agent && !!nodeInfo?.agentLog?.length const isToolNode = nodeInfo?.node_type === BlockEnum.Tool && !!nodeInfo?.agentLog?.length + const isLLMNode = nodeInfo?.node_type === BlockEnum.LLM && !!nodeInfo?.agentLog?.length return (
@@ -117,7 +118,7 @@ const ResultPanel: FC = ({ ) } { - (isAgentNode || isToolNode) && handleShowAgentOrToolLog && ( + (isAgentNode || isToolNode || isLLMNode) && handleShowAgentOrToolLog && ( { let { children } = node diff --git a/web/i18n/en-US/workflow.ts b/web/i18n/en-US/workflow.ts index 636537c466..540bb4e43a 100644 --- a/web/i18n/en-US/workflow.ts +++ b/web/i18n/en-US/workflow.ts @@ -503,6 +503,9 @@ const translation = { contextTooltip: 'You can import Knowledge as context', notSetContextInPromptTip: 'To enable the context feature, please fill in the context variable in PROMPT.', prompt: 'prompt', + tools: 'Tools', + toolsCount: '{{count}} tools selected', + toolsNotSupportedWarning: 'This model does not support native tool calling. A ReAct prompt template has been automatically added to the system prompt to enable tool usage.', roleDescription: { system: 'Give high level instructions for the conversation', user: 'Provide instructions, queries, or any text-based input to the model', @@ -520,6 +523,7 @@ const translation = { output: 'Generate content', reasoning_content: 'Reasoning Content', usage: 'Model Usage Information', + generation: 'Generation details including reasoning, tool calls and their sequence', }, singleRun: { variable: 'Variable', diff --git a/web/i18n/zh-Hans/workflow.ts b/web/i18n/zh-Hans/workflow.ts index e33941a6cd..3c1d1f1cb8 100644 --- a/web/i18n/zh-Hans/workflow.ts +++ b/web/i18n/zh-Hans/workflow.ts @@ -503,6 +503,9 @@ const translation = { contextTooltip: '您可以导入知识库作为上下文', notSetContextInPromptTip: '要启用上下文功能,请在提示中填写上下文变量。', prompt: '提示词', + tools: '工具', + toolsCount: '已选择 {{count}} 个工具', + toolsNotSupportedWarning: '该模型不支持原生工具调用功能。已自动在系统提示词中添加 ReAct 提示模板以启用工具使用。', addMessage: '添加消息', roleDescription: { system: '为对话提供高层指导', @@ -520,6 +523,7 @@ const translation = { output: '生成内容', reasoning_content: '推理内容', usage: '模型用量信息', + generation: '生成详情,包含推理内容、工具调用及其顺序', }, singleRun: { variable: '变量', From 2d2ce5df85ffa2e9f7bc356755179f19e44b7554 Mon Sep 17 00:00:00 2001 From: Novice Date: Tue, 9 Dec 2025 16:22:17 +0800 Subject: [PATCH 02/18] feat: generation stream output. --- api/core/agent/patterns/function_call.py | 64 ++++++---- api/core/agent/patterns/react.py | 39 ++++-- .../advanced_chat/generate_task_pipeline.py | 1 - .../apps/workflow/generate_task_pipeline.py | 3 - api/core/app/apps/workflow_app_runner.py | 1 - api/core/app/entities/queue_entities.py | 4 - api/core/app/entities/task_entities.py | 8 -- .../task_pipeline/message_cycle_manager.py | 3 - .../response_coordinator/coordinator.py | 1 - api/core/workflow/graph_events/node.py | 3 - api/core/workflow/node_events/node.py | 1 - api/core/workflow/nodes/base/node.py | 1 - api/core/workflow/nodes/llm/node.py | 113 ++++++++++++++---- .../node_events/test_stream_chunk_events.py | 22 ---- 14 files changed, 160 insertions(+), 104 deletions(-) diff --git a/api/core/agent/patterns/function_call.py b/api/core/agent/patterns/function_call.py index 2c8664c419..a46c5d77f9 100644 --- a/api/core/agent/patterns/function_call.py +++ b/api/core/agent/patterns/function_call.py @@ -51,7 +51,7 @@ class FunctionCallStrategy(AgentPattern): label=f"ROUND {iteration_step}", log_type=AgentLog.LogType.ROUND, status=AgentLog.LogStatus.START, - data={"round_index": iteration_step}, + data={}, ) yield round_log # On last iteration, remove tools to force final answer @@ -249,25 +249,47 @@ class FunctionCallStrategy(AgentPattern): ) yield tool_call_log - # Invoke tool using base class method - response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args, tool_name) + # Invoke tool using base class method with error handling + try: + response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args, tool_name) - yield self._finish_log( - tool_call_log, - data={ - **tool_call_log.data, - "output": response_content, - "files": len(tool_files), - "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, - }, - ) - final_content = response_content or "Tool executed successfully" - # Add tool response to messages - messages.append( - ToolPromptMessage( - content=final_content, - tool_call_id=tool_call_id, - name=tool_name, + yield self._finish_log( + tool_call_log, + data={ + **tool_call_log.data, + "output": response_content, + "files": len(tool_files), + "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, + }, ) - ) - return response_content, tool_files, tool_invoke_meta + final_content = response_content or "Tool executed successfully" + # Add tool response to messages + messages.append( + ToolPromptMessage( + content=final_content, + tool_call_id=tool_call_id, + name=tool_name, + ) + ) + return response_content, tool_files, tool_invoke_meta + except Exception as e: + # Tool invocation failed, yield error log + error_message = str(e) + tool_call_log.status = AgentLog.LogStatus.ERROR + tool_call_log.error = error_message + tool_call_log.data = { + **tool_call_log.data, + "error": error_message, + } + yield tool_call_log + + # Add error message to conversation + error_content = f"Tool execution failed: {error_message}" + messages.append( + ToolPromptMessage( + content=error_content, + tool_call_id=tool_call_id, + name=tool_name, + ) + ) + return error_content, [], None diff --git a/api/core/agent/patterns/react.py b/api/core/agent/patterns/react.py index 46a0dbd61e..81aa7fe3b1 100644 --- a/api/core/agent/patterns/react.py +++ b/api/core/agent/patterns/react.py @@ -80,7 +80,7 @@ class ReActStrategy(AgentPattern): label=f"ROUND {iteration_step}", log_type=AgentLog.LogType.ROUND, status=AgentLog.LogStatus.START, - data={"round_index": iteration_step}, + data={}, ) yield round_log @@ -385,18 +385,31 @@ class ReActStrategy(AgentPattern): else: tool_args_dict = tool_args - # Invoke tool using base class method - response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args_dict, tool_name) + # Invoke tool using base class method with error handling + try: + response_content, tool_files, tool_invoke_meta = self._invoke_tool(tool_instance, tool_args_dict, tool_name) - # Finish tool log - yield self._finish_log( - tool_log, - data={ + # Finish tool log + yield self._finish_log( + tool_log, + data={ + **tool_log.data, + "output": response_content, + "files": len(tool_files), + "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, + }, + ) + + return response_content or "Tool executed successfully", tool_files + except Exception as e: + # Tool invocation failed, yield error log + error_message = str(e) + tool_log.status = AgentLog.LogStatus.ERROR + tool_log.error = error_message + tool_log.data = { **tool_log.data, - "output": response_content, - "files": len(tool_files), - "meta": tool_invoke_meta.to_dict() if tool_invoke_meta else None, - }, - ) + "error": error_message, + } + yield tool_log - return response_content or "Tool executed successfully", tool_files + return f"Tool execution failed: {error_message}", [] diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 8e920f369a..3b2c55aa2d 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -542,7 +542,6 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): tool_arguments=event.tool_arguments, tool_files=event.tool_files, tool_error=event.tool_error, - round_index=event.round_index, ) def _handle_iteration_start_event( diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 09ac24a413..a6c7067ccd 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -497,7 +497,6 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): tool_arguments=event.tool_arguments, tool_files=event.tool_files, tool_error=event.tool_error, - round_index=event.round_index, ) def _handle_agent_log_event(self, event: QueueAgentLogEvent, **kwargs) -> Generator[StreamResponse, None, None]: @@ -670,7 +669,6 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): tool_arguments: str | None = None, tool_files: list[str] | None = None, tool_error: str | None = None, - round_index: int | None = None, ) -> TextChunkStreamResponse: """ Handle completed event. @@ -690,7 +688,6 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): tool_arguments=tool_arguments, tool_files=tool_files or [], tool_error=tool_error, - round_index=round_index, ), ) diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 3161956c9b..23624cb934 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -469,7 +469,6 @@ class WorkflowBasedAppRunner: tool_arguments=event.tool_arguments, tool_files=event.tool_files, tool_error=event.tool_error, - round_index=event.round_index, ) ) elif isinstance(event, NodeRunRetrieverResourceEvent): diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index c767fcfc34..edb2c8a1f3 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -218,10 +218,6 @@ class QueueTextChunkEvent(AppQueueEvent): tool_error: str | None = None """error message if tool failed""" - # Thought fields (when chunk_type == THOUGHT) - round_index: int | None = None - """current iteration round""" - class QueueAgentMessageEvent(AppQueueEvent): """ diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 4609cd87f6..6ea689f401 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -134,10 +134,6 @@ class MessageStreamResponse(StreamResponse): tool_error: str | None = None """error message if tool failed""" - # Thought fields (when chunk_type == "thought") - round_index: int | None = None - """current iteration round""" - class MessageAudioStreamResponse(StreamResponse): """ @@ -647,10 +643,6 @@ class TextChunkStreamResponse(StreamResponse): tool_error: str | None = None """error message if tool failed""" - # Thought fields (when chunk_type == THOUGHT) - round_index: int | None = None - """current iteration round""" - event: StreamEvent = StreamEvent.TEXT_CHUNK data: Data diff --git a/api/core/app/task_pipeline/message_cycle_manager.py b/api/core/app/task_pipeline/message_cycle_manager.py index 414fed6701..25c3a99cf8 100644 --- a/api/core/app/task_pipeline/message_cycle_manager.py +++ b/api/core/app/task_pipeline/message_cycle_manager.py @@ -224,7 +224,6 @@ class MessageCycleManager: tool_arguments: str | None = None, tool_files: list[str] | None = None, tool_error: str | None = None, - round_index: int | None = None, ) -> MessageStreamResponse: """ Message to stream response. @@ -237,7 +236,6 @@ class MessageCycleManager: :param tool_arguments: accumulated tool arguments JSON :param tool_files: file IDs produced by tool :param tool_error: error message if tool failed - :param round_index: current iteration round :return: """ with Session(db.engine, expire_on_commit=False) as session: @@ -256,7 +254,6 @@ class MessageCycleManager: tool_arguments=tool_arguments, tool_files=tool_files, tool_error=tool_error, - round_index=round_index, ) def message_replace_to_stream_response(self, answer: str, reason: str = "") -> MessageReplaceStreamResponse: diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index bd20c4f334..1396c3a7ff 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -441,7 +441,6 @@ class ResponseStreamCoordinator: tool_arguments=event.tool_arguments, tool_files=event.tool_files, tool_error=event.tool_error, - round_index=event.round_index, ) events.append(updated_event) else: diff --git a/api/core/workflow/graph_events/node.py b/api/core/workflow/graph_events/node.py index c7f76c424d..3351f028b1 100644 --- a/api/core/workflow/graph_events/node.py +++ b/api/core/workflow/graph_events/node.py @@ -51,9 +51,6 @@ class NodeRunStreamChunkEvent(GraphNodeEventBase): tool_files: list[str] = Field(default_factory=list, description="file IDs produced by tool") tool_error: str | None = Field(default=None, description="error message if tool failed") - # Thought fields (when chunk_type == THOUGHT) - round_index: int | None = Field(default=None, description="current iteration round") - class NodeRunRetrieverResourceEvent(GraphNodeEventBase): retriever_resources: Sequence[RetrievalSourceMetadata] = Field(..., description="retriever resources") diff --git a/api/core/workflow/node_events/node.py b/api/core/workflow/node_events/node.py index 3a062b9c4c..4739bcc714 100644 --- a/api/core/workflow/node_events/node.py +++ b/api/core/workflow/node_events/node.py @@ -74,7 +74,6 @@ class ThoughtChunkEvent(StreamChunkEvent): """Agent thought streaming event - Agent thinking process (ReAct).""" chunk_type: ChunkType = Field(default=ChunkType.THOUGHT, frozen=True) - round_index: int = Field(default=1, description="current iteration round") class StreamCompletedEvent(NodeEventBase): diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index 9be16d4f08..dd3775714a 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -598,7 +598,6 @@ class Node(Generic[NodeDataT]): chunk=event.chunk, is_final=event.is_final, chunk_type=ChunkType.THOUGHT, - round_index=event.round_index, ) @_dispatch.register diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index bf41f476fd..e14dfebe64 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -277,7 +277,7 @@ class LLMNode(Node[LLMNodeData]): structured_output: LLMStructuredOutput | None = None for event in generator: - if isinstance(event, StreamChunkEvent): + if isinstance(event, (StreamChunkEvent, ThoughtChunkEvent)): yield event elif isinstance(event, ModelInvokeCompletedEvent): # Raw text @@ -340,6 +340,16 @@ class LLMNode(Node[LLMNodeData]): chunk="", is_final=True, ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk="", + is_final=True, + ) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=True, + ) yield StreamCompletedEvent( node_run_result=NodeRunResult( @@ -470,6 +480,8 @@ class LLMNode(Node[LLMNodeData]): usage = LLMUsage.empty_usage() finish_reason = None full_text_buffer = io.StringIO() + think_parser = llm_utils.ThinkTagStreamParser() + reasoning_chunks: list[str] = [] # Initialize streaming metrics tracking start_time = request_start_time if request_start_time is not None else time.perf_counter() @@ -498,12 +510,32 @@ class LLMNode(Node[LLMNodeData]): has_content = True full_text_buffer.write(text_part) + # Text output: always forward raw chunk (keep tags intact) yield StreamChunkEvent( selector=[node_id, "text"], chunk=text_part, is_final=False, ) + # Generation output: split out thoughts, forward only non-thought content chunks + for kind, segment in think_parser.process(text_part): + if not segment: + continue + + if kind == "thought": + reasoning_chunks.append(segment) + yield ThoughtChunkEvent( + selector=[node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + else: + yield StreamChunkEvent( + selector=[node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + # Update the whole metadata if not model and result.model: model = result.model @@ -518,16 +550,35 @@ class LLMNode(Node[LLMNodeData]): except OutputParserError as e: raise LLMNodeError(f"Failed to parse structured output: {e}") + for kind, segment in think_parser.flush(): + if not segment: + continue + if kind == "thought": + reasoning_chunks.append(segment) + yield ThoughtChunkEvent( + selector=[node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + else: + yield StreamChunkEvent( + selector=[node_id, "generation", "content"], + chunk=segment, + is_final=False, + ) + # Extract reasoning content from tags in the main text full_text = full_text_buffer.getvalue() if reasoning_format == "tagged": # Keep tags in text for backward compatibility clean_text = full_text - reasoning_content = "" + reasoning_content = "".join(reasoning_chunks) else: # Extract clean text and reasoning from tags clean_text, reasoning_content = LLMNode._split_reasoning(full_text, reasoning_format) + if reasoning_chunks and not reasoning_content: + reasoning_content = "".join(reasoning_chunks) # Calculate streaming metrics end_time = time.perf_counter() @@ -1398,8 +1449,6 @@ class LLMNode(Node[LLMNodeData]): finish_reason = None agent_result: AgentResult | None = None - # Track current round for ThoughtChunkEvent - current_round = 1 think_parser = llm_utils.ThinkTagStreamParser() reasoning_chunks: list[str] = [] @@ -1431,12 +1480,6 @@ class LLMNode(Node[LLMNodeData]): else: agent_logs.append(agent_log_event) - # Extract round number from ROUND log type - if output.log_type == AgentLog.LogType.ROUND: - round_index = output.data.get("round_index") - if isinstance(round_index, int): - current_round = round_index - # Emit tool call events when tool call starts if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.START: tool_name = output.data.get("tool_name", "") @@ -1450,26 +1493,34 @@ class LLMNode(Node[LLMNodeData]): tool_call_id=tool_call_id, tool_name=tool_name, tool_arguments=tool_arguments, - is_final=True, + is_final=False, ) - # Emit tool result events when tool call completes - if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.SUCCESS: + # Emit tool result events when tool call completes (both success and error) + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status != AgentLog.LogStatus.START: tool_name = output.data.get("tool_name", "") tool_output = output.data.get("output", "") tool_call_id = output.data.get("tool_call_id", "") tool_files = [] tool_error = None - # Extract file IDs if present + # Extract file IDs if present (only for success case) files_data = output.data.get("files") if files_data and isinstance(files_data, list): tool_files = files_data - # Check for error in meta - meta = output.data.get("meta") - if meta and isinstance(meta, dict) and meta.get("error"): - tool_error = meta.get("error") + # Check for error from multiple sources + if output.status == AgentLog.LogStatus.ERROR: + # Priority: output.error > data.error > meta.error + tool_error = output.error or output.data.get("error") + meta = output.data.get("meta") + if not tool_error and meta and isinstance(meta, dict): + tool_error = meta.get("error") + else: + # For success case, check meta for potential errors + meta = output.data.get("meta") + if meta and isinstance(meta, dict) and meta.get("error"): + tool_error = meta.get("error") yield ToolResultChunkEvent( selector=[self._node_id, "generation", "tool_results"], @@ -1478,7 +1529,7 @@ class LLMNode(Node[LLMNodeData]): tool_name=tool_name, tool_files=tool_files, tool_error=tool_error, - is_final=True, + is_final=False, ) elif isinstance(output, LLMResultChunk): @@ -1502,7 +1553,6 @@ class LLMNode(Node[LLMNodeData]): yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, - round_index=current_round, is_final=False, ) else: @@ -1548,7 +1598,6 @@ class LLMNode(Node[LLMNodeData]): yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, - round_index=current_round, is_final=False, ) else: @@ -1580,7 +1629,27 @@ class LLMNode(Node[LLMNodeData]): yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk="", - round_index=current_round, + is_final=True, + ) + + # Close tool_calls stream (already sent via ToolCallChunkEvent) + yield ToolCallChunkEvent( + selector=[self._node_id, "generation", "tool_calls"], + chunk="", + tool_call_id="", + tool_name="", + tool_arguments="", + is_final=True, + ) + + # Close tool_results stream (already sent via ToolResultChunkEvent) + yield ToolResultChunkEvent( + selector=[self._node_id, "generation", "tool_results"], + chunk="", + tool_call_id="", + tool_name="", + tool_files=[], + tool_error=None, is_final=True, ) diff --git a/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py index 498d43905e..f6e0834b1e 100644 --- a/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py +++ b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py @@ -239,7 +239,6 @@ class TestThoughtChunkEvent: assert event.selector == ["node1", "thought"] assert event.chunk == "I need to query the weather..." assert event.chunk_type == ChunkType.THOUGHT - assert event.round_index == 1 # default def test_chunk_type_is_thought(self): """Test that chunk_type is always THOUGHT.""" @@ -250,38 +249,17 @@ class TestThoughtChunkEvent: assert event.chunk_type == ChunkType.THOUGHT - def test_round_index_default(self): - """Test that round_index defaults to 1.""" - event = ThoughtChunkEvent( - selector=["node1", "thought"], - chunk="thinking...", - ) - - assert event.round_index == 1 - - def test_round_index_custom(self): - """Test custom round_index.""" - event = ThoughtChunkEvent( - selector=["node1", "thought"], - chunk="Second round thinking...", - round_index=2, - ) - - assert event.round_index == 2 - def test_serialization(self): """Test that event can be serialized to dict.""" event = ThoughtChunkEvent( selector=["node1", "thought"], chunk="I need to analyze this...", - round_index=3, is_final=False, ) data = event.model_dump() assert data["chunk_type"] == "thought" - assert data["round_index"] == 3 assert data["chunk"] == "I need to analyze this..." assert data["is_final"] is False From 930c36e7578d0a4aa55bb9229e05b214dcda2c00 Mon Sep 17 00:00:00 2001 From: Novice Date: Tue, 9 Dec 2025 20:56:54 +0800 Subject: [PATCH 03/18] fix: llm detail store --- ...hemy_workflow_node_execution_repository.py | 22 ++++++++++++------- api/core/workflow/nodes/llm/node.py | 3 --- 2 files changed, 14 insertions(+), 11 deletions(-) diff --git a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py index 79b0c702e0..0a3189f398 100644 --- a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py @@ -480,9 +480,10 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) if reasoning_content: # reasoning_content could be a string or already a list if isinstance(reasoning_content, str): - reasoning_list = [reasoning_content] if reasoning_content else [] + reasoning_list = [reasoning_content] if reasoning_content.strip() else [] elif isinstance(reasoning_content, list): - reasoning_list = reasoning_content + # Filter out empty or whitespace-only strings + reasoning_list = [r.strip() for r in reasoning_content if isinstance(r, str) and r.strip()] # Extract tool_calls from metadata.agent_log tool_calls_list: list[dict] = [] @@ -491,16 +492,25 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) for log in agent_log: # Each log entry has label, data, status, etc. log_data = log.data if hasattr(log, "data") else log.get("data", {}) - if log_data.get("tool_name"): + tool_name = log_data.get("tool_name") + # Only include tool calls with valid tool_name + if tool_name and str(tool_name).strip(): tool_calls_list.append( { "id": log_data.get("tool_call_id", ""), - "name": log_data.get("tool_name", ""), + "name": tool_name, "arguments": json.dumps(log_data.get("tool_args", {})), "result": str(log_data.get("output", "")), } ) + # Only save if there's meaningful generation detail (reasoning or tool calls) + has_valid_reasoning = bool(reasoning_list) + has_valid_tool_calls = bool(tool_calls_list) + + if not has_valid_reasoning and not has_valid_tool_calls: + return + # Build sequence based on content, reasoning, and tool_calls sequence: list[dict] = [] text = outputs.get("text", "") @@ -514,10 +524,6 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) for i in range(len(tool_calls_list)): sequence.append({"type": "tool_call", "index": i}) - # Only save if there's meaningful generation detail - if not reasoning_list and not tool_calls_list: - return - # Check if generation detail already exists for this node execution existing = ( session.query(LLMGenerationDetail) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index e14dfebe64..4acbe25ae3 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -1680,9 +1680,6 @@ class LLMNode(Node[LLMNodeData]): "content": text, }, }, - metadata={ - WorkflowNodeExecutionMetadataKey.LLM_CONTENT_SEQUENCE: [], - }, inputs={ **node_inputs, "tools": [ From abb2b860f21694d4e6697227c4b7af4914a84809 Mon Sep 17 00:00:00 2001 From: Novice Date: Wed, 10 Dec 2025 15:04:19 +0800 Subject: [PATCH 04/18] chore: remove unused changes --- api/controllers/console/app/workflow_run.py | 1 + api/core/workflow/nodes/llm/entities.py | 3 +- api/core/workflow/nodes/llm/node.py | 2 +- api/services/llm_generation_service.py | 96 +-------------------- api/services/workflow_run_service.py | 22 ++--- 5 files changed, 11 insertions(+), 113 deletions(-) diff --git a/api/controllers/console/app/workflow_run.py b/api/controllers/console/app/workflow_run.py index 8360785d19..8f1871f1e9 100644 --- a/api/controllers/console/app/workflow_run.py +++ b/api/controllers/console/app/workflow_run.py @@ -359,6 +359,7 @@ class WorkflowRunNodeExecutionListApi(Resource): @login_required @account_initialization_required @get_app_model(mode=[AppMode.ADVANCED_CHAT, AppMode.WORKFLOW]) + @marshal_with(workflow_run_node_execution_list_model) def get(self, app_model: App, run_id): """ Get workflow run node execution list diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index fbdd1daec7..7da5cd241e 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -111,8 +111,9 @@ class LLMNodeData(BaseNodeData): ), ) - # Tool support (from Agent V2) + # Tool support tools: Sequence[ToolMetadata] = Field(default_factory=list) + max_iterations: int | None = Field(default=None, description="Maximum number of iterations for the LLM node") @field_validator("prompt_config", mode="before") @classmethod diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 4acbe25ae3..738350f301 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -1330,7 +1330,7 @@ class LLMNode(Node[LLMNodeData]): model_instance=model_instance, tools=tool_instances, files=prompt_files, - max_iterations=10, + max_iterations=self._node_data.max_iterations or 10, context=ExecutionContext(user_id=self.user_id, app_id=self.app_id, tenant_id=self.tenant_id), ) diff --git a/api/services/llm_generation_service.py b/api/services/llm_generation_service.py index 1e8c78a416..eb8327537e 100644 --- a/api/services/llm_generation_service.py +++ b/api/services/llm_generation_service.py @@ -5,13 +5,11 @@ Provides methods to query and attach generation details to workflow node executi and messages, avoiding N+1 query problems. """ -from collections.abc import Sequence - from sqlalchemy import select from sqlalchemy.orm import Session from core.app.entities.llm_generation_entities import LLMGenerationDetailData -from models import LLMGenerationDetail, WorkflowNodeExecutionModel +from models import LLMGenerationDetail class LLMGenerationService: @@ -20,26 +18,6 @@ class LLMGenerationService: def __init__(self, session: Session): self._session = session - def get_generation_details_for_workflow_run( - self, - workflow_run_id: str, - *, - tenant_id: str | None = None, - app_id: str | None = None, - ) -> dict[str, LLMGenerationDetailData]: - """ - Batch query generation details for all LLM nodes in a workflow run. - - Returns dict mapping node_id to LLMGenerationDetailData. - """ - stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.workflow_run_id == workflow_run_id) - if tenant_id: - stmt = stmt.where(LLMGenerationDetail.tenant_id == tenant_id) - if app_id: - stmt = stmt.where(LLMGenerationDetail.app_id == app_id) - details = self._session.scalars(stmt).all() - return {detail.node_id: detail.to_domain_model() for detail in details if detail.node_id} - def get_generation_detail_for_message(self, message_id: str) -> LLMGenerationDetailData | None: """Query generation detail for a specific message.""" stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id == message_id) @@ -57,75 +35,3 @@ class LLMGenerationService: stmt = select(LLMGenerationDetail).where(LLMGenerationDetail.message_id.in_(message_ids)) details = self._session.scalars(stmt).all() return {detail.message_id: detail.to_domain_model() for detail in details if detail.message_id} - - def attach_generation_details_to_node_executions( - self, - node_executions: Sequence[WorkflowNodeExecutionModel], - workflow_run_id: str, - *, - tenant_id: str | None = None, - app_id: str | None = None, - ) -> list[dict]: - """ - Attach generation details to node executions and return as dicts. - - Queries generation details in batch and attaches them to the corresponding - node executions, avoiding N+1 queries. - """ - generation_details = self.get_generation_details_for_workflow_run( - workflow_run_id, tenant_id=tenant_id, app_id=app_id - ) - - return [ - { - "id": node.id, - "index": node.index, - "predecessor_node_id": node.predecessor_node_id, - "node_id": node.node_id, - "node_type": node.node_type, - "title": node.title, - "inputs": node.inputs_dict, - "process_data": node.process_data_dict, - "outputs": node.outputs_dict, - "status": node.status, - "error": node.error, - "elapsed_time": node.elapsed_time, - "execution_metadata": node.execution_metadata_dict, - "extras": node.extras, - "created_at": int(node.created_at.timestamp()) if node.created_at else None, - "created_by_role": node.created_by_role, - "created_by_account": _serialize_account(node.created_by_account), - "created_by_end_user": _serialize_end_user(node.created_by_end_user), - "finished_at": int(node.finished_at.timestamp()) if node.finished_at else None, - "inputs_truncated": node.inputs_truncated, - "outputs_truncated": node.outputs_truncated, - "process_data_truncated": node.process_data_truncated, - "generation_detail": generation_details[node.node_id].to_response_dict() - if node.node_id in generation_details - else None, - } - for node in node_executions - ] - - -def _serialize_account(account) -> dict | None: - """Serialize Account to dict for API response.""" - if not account: - return None - return { - "id": account.id, - "name": account.name, - "email": account.email, - } - - -def _serialize_end_user(end_user) -> dict | None: - """Serialize EndUser to dict for API response.""" - if not end_user: - return None - return { - "id": end_user.id, - "type": end_user.type, - "is_anonymous": end_user.is_anonymous, - "session_id": end_user.session_id, - } diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index 14bcca8754..b903d8df5f 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -1,8 +1,8 @@ import threading -from typing import Any +from collections.abc import Sequence from sqlalchemy import Engine -from sqlalchemy.orm import Session, sessionmaker +from sqlalchemy.orm import sessionmaker import contexts from extensions.ext_database import db @@ -11,12 +11,12 @@ from models import ( Account, App, EndUser, + WorkflowNodeExecutionModel, WorkflowRun, WorkflowRunTriggeredFrom, ) from repositories.api_workflow_run_repository import APIWorkflowRunRepository from repositories.factory import DifyAPIRepositoryFactory -from services.llm_generation_service import LLMGenerationService class WorkflowRunService: @@ -137,9 +137,9 @@ class WorkflowRunService: app_model: App, run_id: str, user: Account | EndUser, - ) -> list[dict[str, Any]]: + ) -> Sequence[WorkflowNodeExecutionModel]: """ - Get workflow run node execution list with generation details attached. + Get workflow run node execution list """ workflow_run = self.get_workflow_run(app_model, run_id) @@ -154,18 +154,8 @@ class WorkflowRunService: if tenant_id is None: raise ValueError("User tenant_id cannot be None") - node_executions = self._node_execution_service_repo.get_executions_by_workflow_run( + return self._node_execution_service_repo.get_executions_by_workflow_run( tenant_id=tenant_id, app_id=app_model.id, workflow_run_id=run_id, ) - - # Attach generation details using batch query - with Session(db.engine) as session: - generation_service = LLMGenerationService(session) - return generation_service.attach_generation_details_to_node_executions( - node_executions=node_executions, - workflow_run_id=run_id, - tenant_id=tenant_id, - app_id=app_model.id, - ) From 9ce48b4dc488e4f93007503c1b53664c08e6d0cf Mon Sep 17 00:00:00 2001 From: Novice Date: Fri, 12 Dec 2025 11:08:49 +0800 Subject: [PATCH 05/18] fix: llm generation variable --- .../advanced_chat/generate_task_pipeline.py | 5 + api/core/workflow/nodes/llm/entities.py | 18 ++ api/core/workflow/nodes/llm/node.py | 283 +++++++++++------- ...4a64f53_add_llm_generation_detail_table.py | 46 +++ 4 files changed, 251 insertions(+), 101 deletions(-) create mode 100644 api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 3b2c55aa2d..7a330bbfb1 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -141,6 +141,9 @@ class StreamEventBuffer: def record_tool_call(self, tool_call_id: str, tool_name: str, tool_arguments: str) -> None: """Record a tool call event.""" + if not tool_call_id: + return + # Flush any pending reasoning first if self._last_event_type == "thought": self._flush_current_reasoning() @@ -168,6 +171,8 @@ class StreamEventBuffer: def record_tool_result(self, tool_call_id: str, result: str) -> None: """Record a tool result event (update existing tool call).""" + if not tool_call_id: + return if tool_call_id in self._tool_call_id_map: idx = self._tool_call_id_map[tool_call_id] self.tool_calls[idx]["result"] = result diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index 7da5cd241e..2003820d80 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -3,7 +3,9 @@ from typing import Any, Literal from pydantic import BaseModel, Field, field_validator +from core.file import File from core.model_runtime.entities import ImagePromptMessageContent, LLMMode +from core.model_runtime.entities.llm_entities import LLMUsage from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig from core.tools.entities.tool_entities import ToolProviderType from core.workflow.nodes.base import BaseNodeData @@ -17,6 +19,22 @@ class ModelConfig(BaseModel): completion_params: dict[str, Any] = Field(default_factory=dict) +class LLMGenerationData(BaseModel): + """Generation data from LLM invocation with tools. + + For multi-turn tool calls like: thought1 -> text1 -> tool_call1 -> thought2 -> text2 -> tool_call2 + - reasoning_contents: [thought1, thought2, ...] - one element per turn + - tool_calls: [{id, name, arguments, result}, ...] - all tool calls with results + """ + + text: str = Field(..., description="Accumulated text content from all turns") + reasoning_contents: list[str] = Field(default_factory=list, description="Reasoning content per turn") + tool_calls: list[dict[str, Any]] = Field(default_factory=list, description="Tool calls with results") + usage: LLMUsage = Field(..., description="LLM usage statistics") + finish_reason: str | None = Field(None, description="Finish reason from LLM") + files: list[File] = Field(default_factory=list, description="Generated files") + + class ContextConfig(BaseModel): enabled: bool variable_selector: list[str] | None = None diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 738350f301..fe105c2ddb 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -83,6 +83,7 @@ from core.workflow.runtime import VariablePool from . import llm_utils from .entities import ( + LLMGenerationData, LLMNodeChatModelMessage, LLMNodeCompletionModelPromptTemplate, LLMNodeData, @@ -148,10 +149,84 @@ class LLMNode(Node[LLMNodeData]): def version(cls) -> str: return "1" + def _stream_llm_events( + self, + generator: Generator[NodeEventBase | LLMStructuredOutput, None, LLMGenerationData | None], + *, + model_instance: ModelInstance, + ) -> Generator[ + NodeEventBase, + None, + tuple[ + str, + str, + LLMUsage, + str | None, + LLMStructuredOutput | None, + LLMGenerationData | None, + ], + ]: + """ + Stream events and capture generator return value in one place. + Uses generator delegation so _run stays concise while still emitting events. + """ + clean_text = "" + reasoning_content = "" + usage = LLMUsage.empty_usage() + finish_reason: str | None = None + structured_output: LLMStructuredOutput | None = None + generation_data: LLMGenerationData | None = None + completed = False + + while True: + try: + event = next(generator) + except StopIteration as exc: + if isinstance(exc.value, LLMGenerationData): + generation_data = exc.value + break + + if completed: + # After completion we still drain to reach StopIteration.value + continue + + match event: + case StreamChunkEvent() | ThoughtChunkEvent(): + yield event + + case ModelInvokeCompletedEvent( + text=text, + usage=usage_event, + finish_reason=finish_reason_event, + reasoning_content=reasoning_event, + structured_output=structured_raw, + ): + clean_text = text + usage = usage_event + finish_reason = finish_reason_event + reasoning_content = reasoning_event or "" + + if self.node_data.reasoning_format != "tagged": + clean_text, _ = LLMNode._split_reasoning(clean_text, self.node_data.reasoning_format) + + structured_output = ( + LLMStructuredOutput(structured_output=structured_raw) if structured_raw else None + ) + + llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) + completed = True + + case LLMStructuredOutput(): + structured_output = event + + case _: + continue + + return clean_text, reasoning_content, usage, finish_reason, structured_output, generation_data + def _run(self) -> Generator: node_inputs: dict[str, Any] = {} process_data: dict[str, Any] = {} - result_text = "" clean_text = "" usage = LLMUsage.empty_usage() finish_reason = None @@ -240,10 +315,13 @@ class LLMNode(Node[LLMNodeData]): tenant_id=self.tenant_id, ) + # Variables for outputs + generation_data: LLMGenerationData | None = None + structured_output: LLMStructuredOutput | None = None + # Check if tools are configured if self.tool_call_enabled: # Use tool-enabled invocation (Agent V2 style) - # This generator handles all events including final events generator = self._invoke_llm_with_tools( model_instance=model_instance, prompt_messages=prompt_messages, @@ -253,10 +331,6 @@ class LLMNode(Node[LLMNodeData]): node_inputs=node_inputs, process_data=process_data, ) - # Forward all events and return early since _invoke_llm_with_tools - # already sends final event and StreamCompletedEvent - yield from generator - return else: # Use traditional LLM invocation generator = LLMNode.invoke_llm( @@ -274,39 +348,23 @@ class LLMNode(Node[LLMNodeData]): reasoning_format=self._node_data.reasoning_format, ) - structured_output: LLMStructuredOutput | None = None + ( + clean_text, + reasoning_content, + usage, + finish_reason, + structured_output, + generation_data, + ) = yield from self._stream_llm_events(generator, model_instance=model_instance) - for event in generator: - if isinstance(event, (StreamChunkEvent, ThoughtChunkEvent)): - yield event - elif isinstance(event, ModelInvokeCompletedEvent): - # Raw text - result_text = event.text - usage = event.usage - finish_reason = event.finish_reason - reasoning_content = event.reasoning_content or "" - - # For downstream nodes, determine clean text based on reasoning_format - if self.node_data.reasoning_format == "tagged": - # Keep tags for backward compatibility - clean_text = result_text - else: - # Extract clean text from tags - clean_text, _ = LLMNode._split_reasoning(result_text, self.node_data.reasoning_format) - - # Process structured output if available from the event. - structured_output = ( - LLMStructuredOutput(structured_output=event.structured_output) - if event.structured_output - else None - ) - - # deduct quota - llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) - break - elif isinstance(event, LLMStructuredOutput): - structured_output = event + # Extract variables from generation_data if available + if generation_data: + clean_text = generation_data.text + reasoning_content = "" + usage = generation_data.usage + finish_reason = generation_data.finish_reason + # Unified process_data building process_data = { "model_mode": model_config.mode, "prompts": PromptMessageUtil.prompt_messages_to_prompt_for_saving( @@ -318,38 +376,56 @@ class LLMNode(Node[LLMNodeData]): "model_name": model_config.model, } + # Unified outputs building outputs = { "text": clean_text, "reasoning_content": reasoning_content, "usage": jsonable_encoder(usage), "finish_reason": finish_reason, - "generation": { + } + + # Build generation field + if generation_data: + # Use generation_data from tool invocation (supports multi-turn) + generation = { + "content": generation_data.text, + "reasoning_content": generation_data.reasoning_contents, # [thought1, thought2, ...] + "tool_calls": generation_data.tool_calls, + } + files_to_output = generation_data.files + else: + # Traditional LLM invocation + generation = { "content": clean_text, "reasoning_content": [reasoning_content] if reasoning_content else [], "tool_calls": [], - }, - } + } + files_to_output = self._file_outputs + + outputs["generation"] = generation + if files_to_output: + outputs["files"] = ArrayFileSegment(value=files_to_output) if structured_output: outputs["structured_output"] = structured_output.structured_output - if self._file_outputs: - outputs["files"] = ArrayFileSegment(value=self._file_outputs) # Send final chunk event to indicate streaming is complete - yield StreamChunkEvent( - selector=[self._node_id, "text"], - chunk="", - is_final=True, - ) - yield StreamChunkEvent( - selector=[self._node_id, "generation", "content"], - chunk="", - is_final=True, - ) - yield ThoughtChunkEvent( - selector=[self._node_id, "generation", "thought"], - chunk="", - is_final=True, - ) + if not self.tool_call_enabled: + # For tool calls, final events are already sent in _process_tool_outputs + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk="", + is_final=True, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk="", + is_final=True, + ) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=True, + ) yield StreamCompletedEvent( node_run_result=NodeRunResult( @@ -1313,8 +1389,11 @@ class LLMNode(Node[LLMNodeData]): variable_pool: VariablePool, node_inputs: dict[str, Any], process_data: dict[str, Any], - ) -> Generator[NodeEventBase, None, None]: - """Invoke LLM with tools support (from Agent V2).""" + ) -> Generator[NodeEventBase, None, LLMGenerationData]: + """Invoke LLM with tools support (from Agent V2). + + Returns LLMGenerationData with text, reasoning_contents, tool_calls, usage, finish_reason, files + """ # Get model features to determine strategy model_features = self._get_model_features(model_instance) @@ -1342,8 +1421,9 @@ class LLMNode(Node[LLMNodeData]): stream=True, ) - # Process outputs - yield from self._process_tool_outputs(outputs, strategy, node_inputs, process_data) + # Process outputs and return generation result + result = yield from self._process_tool_outputs(outputs, strategy, node_inputs, process_data) + return result def _get_model_features(self, model_instance: ModelInstance) -> list[ModelFeature]: """Get model schema to determine features.""" @@ -1440,8 +1520,11 @@ class LLMNode(Node[LLMNodeData]): strategy: Any, node_inputs: dict[str, Any], process_data: dict[str, Any], - ) -> Generator[NodeEventBase, None, None]: - """Process strategy outputs and convert to node events.""" + ) -> Generator[NodeEventBase, None, LLMGenerationData]: + """Process strategy outputs and convert to node events. + + Returns LLMGenerationData with text, reasoning_contents, tool_calls, usage, finish_reason, files + """ text = "" files: list[File] = [] usage = LLMUsage.empty_usage() @@ -1450,7 +1533,9 @@ class LLMNode(Node[LLMNodeData]): agent_result: AgentResult | None = None think_parser = llm_utils.ThinkTagStreamParser() - reasoning_chunks: list[str] = [] + # Track reasoning per turn: each tool_call completion marks end of a turn + current_turn_reasoning: list[str] = [] # Buffer for current turn's thought chunks + reasoning_per_turn: list[str] = [] # Final list: one element per turn # Process each output from strategy try: @@ -1532,6 +1617,11 @@ class LLMNode(Node[LLMNodeData]): is_final=False, ) + # End of current turn: save accumulated thought as one element + if current_turn_reasoning: + reasoning_per_turn.append("".join(current_turn_reasoning)) + current_turn_reasoning.clear() + elif isinstance(output, LLMResultChunk): # Handle LLM result chunks - only process text content message = output.delta.message @@ -1549,7 +1639,7 @@ class LLMNode(Node[LLMNodeData]): continue if kind == "thought": - reasoning_chunks.append(segment) + current_turn_reasoning.append(segment) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, @@ -1594,7 +1684,7 @@ class LLMNode(Node[LLMNodeData]): if not segment: continue if kind == "thought": - reasoning_chunks.append(segment) + current_turn_reasoning.append(segment) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, @@ -1613,6 +1703,10 @@ class LLMNode(Node[LLMNodeData]): is_final=False, ) + # Save the last turn's thought if any + if current_turn_reasoning: + reasoning_per_turn.append("".join(current_turn_reasoning)) + # Send final events for all streams yield StreamChunkEvent( selector=[self._node_id, "text"], @@ -1653,45 +1747,32 @@ class LLMNode(Node[LLMNodeData]): is_final=True, ) - # Build generation field from agent_logs + # Build tool_calls from agent_logs (with results) tool_calls_for_generation = [] for log in agent_logs: - if log.label == "Tool Call": - tool_call_data = { - "id": log.data.get("tool_call_id", ""), - "name": log.data.get("tool_name", ""), - "arguments": json.dumps(log.data.get("tool_args", {})), - "result": log.data.get("output", ""), - } - tool_calls_for_generation.append(tool_call_data) + tool_call_id = log.data.get("tool_call_id") + if not tool_call_id or log.status == AgentLog.LogStatus.START.value: + continue - # Complete with results - yield StreamCompletedEvent( - node_run_result=NodeRunResult( - status=WorkflowNodeExecutionStatus.SUCCEEDED, - outputs={ - "text": text, - "files": ArrayFileSegment(value=files), - "usage": jsonable_encoder(usage), - "finish_reason": finish_reason, - "generation": { - "reasoning_content": ["".join(reasoning_chunks)] if reasoning_chunks else [], - "tool_calls": tool_calls_for_generation, - "content": text, - }, - }, - inputs={ - **node_inputs, - "tools": [ - {"provider_id": tool.provider_name, "tool_name": tool.tool_name} - for tool in self._node_data.tools - ] - if self._node_data.tools - else [], - }, - process_data=process_data, - llm_usage=usage, + tool_args = log.data.get("tool_args") or {} + tool_calls_for_generation.append( + { + "id": tool_call_id, + "name": log.data.get("tool_name", ""), + "arguments": json.dumps(tool_args) if tool_args else "", + # Prefer output, fall back to error text if present + "result": log.data.get("output") or log.data.get("error") or "", + } ) + + # Return generation data for caller + return LLMGenerationData( + text=text, + reasoning_contents=reasoning_per_turn, # Multi-turn: [thought1, thought2, ...] + tool_calls=tool_calls_for_generation, + usage=usage, + finish_reason=finish_reason, + files=files, ) def _accumulate_usage(self, total_usage: LLMUsage, delta_usage: LLMUsage) -> None: diff --git a/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py b/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py new file mode 100644 index 0000000000..340cc82bb5 --- /dev/null +++ b/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py @@ -0,0 +1,46 @@ +"""add llm generation detail table. + +Revision ID: 85c8b4a64f53 +Revises: 7bb281b7a422 +Create Date: 2025-12-10 16:17:46.597669 + +""" +from alembic import op +import models as models +import sqlalchemy as sa +from sqlalchemy.dialects import postgresql + +# revision identifiers, used by Alembic. +revision = '85c8b4a64f53' +down_revision = '7bb281b7a422' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.create_table('llm_generation_details', + sa.Column('id', models.types.StringUUID(), nullable=False), + sa.Column('tenant_id', models.types.StringUUID(), nullable=False), + sa.Column('app_id', models.types.StringUUID(), nullable=False), + sa.Column('message_id', models.types.StringUUID(), nullable=True), + sa.Column('workflow_run_id', models.types.StringUUID(), nullable=True), + sa.Column('node_id', sa.String(length=255), nullable=True), + sa.Column('reasoning_content', models.types.LongText(), nullable=True), + sa.Column('tool_calls', models.types.LongText(), nullable=True), + sa.Column('sequence', models.types.LongText(), nullable=True), + sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False), + sa.CheckConstraint('(message_id IS NOT NULL AND workflow_run_id IS NULL AND node_id IS NULL) OR (message_id IS NULL AND workflow_run_id IS NOT NULL AND node_id IS NOT NULL)', name=op.f('llm_generation_details_ck_llm_generation_detail_assoc_mode_check')), + sa.PrimaryKeyConstraint('id', name='llm_generation_detail_pkey'), + sa.UniqueConstraint('message_id', name=op.f('llm_generation_details_message_id_key')) + ) + with op.batch_alter_table('llm_generation_details', schema=None) as batch_op: + batch_op.create_index('idx_llm_generation_detail_message', ['message_id'], unique=False) + batch_op.create_index('idx_llm_generation_detail_workflow', ['workflow_run_id', 'node_id'], unique=False) + + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + op.drop_table('llm_generation_details') + # ### end Alembic commands ### From 13fa56b5b1bb522aeda2aa7629ef2882f1d9c43c Mon Sep 17 00:00:00 2001 From: Novice Date: Fri, 12 Dec 2025 16:24:49 +0800 Subject: [PATCH 06/18] feat: add tracing metadata --- api/core/workflow/enums.py | 1 + api/core/workflow/nodes/llm/entities.py | 26 +++++++++++++ api/core/workflow/nodes/llm/node.py | 49 +++++++++++++++++++++++++ 3 files changed, 76 insertions(+) diff --git a/api/core/workflow/enums.py b/api/core/workflow/enums.py index 3a60d34691..32c6b2d6e5 100644 --- a/api/core/workflow/enums.py +++ b/api/core/workflow/enums.py @@ -248,6 +248,7 @@ class WorkflowNodeExecutionMetadataKey(StrEnum): LOOP_VARIABLE_MAP = "loop_variable_map" # single loop variable output DATASOURCE_INFO = "datasource_info" LLM_CONTENT_SEQUENCE = "llm_content_sequence" + LLM_TRACE = "llm_trace" class WorkflowNodeExecutionStatus(StrEnum): diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index 2003820d80..8b4fa11dcf 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -19,6 +19,31 @@ class ModelConfig(BaseModel): completion_params: dict[str, Any] = Field(default_factory=dict) +class LLMTraceSegment(BaseModel): + """ + Streaming trace segment for LLM tool-enabled runs. + + We keep order as-is to allow direct replay: thought/content/tool_call/tool_result appear + exactly in the sequence they were emitted. + """ + + type: Literal["thought", "content", "tool_call", "tool_result"] + turn: int = Field(0, description="0-based turn index, increments after each tool_result") + + # Common optional fields + text: str | None = Field(None, description="Text chunk for thought/content") + + # Tool call fields + tool_call_id: str | None = None + tool_name: str | None = None + tool_arguments: str | None = None + + # Tool result fields + tool_output: str | None = None + tool_error: str | None = None + files: list[str] = Field(default_factory=list, description="File IDs from tool result if any") + + class LLMGenerationData(BaseModel): """Generation data from LLM invocation with tools. @@ -33,6 +58,7 @@ class LLMGenerationData(BaseModel): usage: LLMUsage = Field(..., description="LLM usage statistics") finish_reason: str | None = Field(None, description="Finish reason from LLM") files: list[File] = Field(default_factory=list, description="Generated files") + trace: list[LLMTraceSegment] = Field(default_factory=list, description="Streaming trace in emitted order") class ContextConfig(BaseModel): diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index fe105c2ddb..21e8c61325 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -87,6 +87,7 @@ from .entities import ( LLMNodeChatModelMessage, LLMNodeCompletionModelPromptTemplate, LLMNodeData, + LLMTraceSegment, ModelConfig, ) from .exc import ( @@ -437,6 +438,11 @@ class LLMNode(Node[LLMNodeData]): WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, + WorkflowNodeExecutionMetadataKey.LLM_TRACE: [ + segment.model_dump() for segment in generation_data.trace + ] + if generation_data + else [], }, llm_usage=usage, ) @@ -1536,6 +1542,9 @@ class LLMNode(Node[LLMNodeData]): # Track reasoning per turn: each tool_call completion marks end of a turn current_turn_reasoning: list[str] = [] # Buffer for current turn's thought chunks reasoning_per_turn: list[str] = [] # Final list: one element per turn + tool_call_index_map: dict[str, int] = {} # tool_call_id -> index + trace_segments: list[LLMTraceSegment] = [] # Ordered trace for replay + current_turn = 0 # Process each output from strategy try: @@ -1572,6 +1581,20 @@ class LLMNode(Node[LLMNodeData]): tool_args = output.data.get("tool_args", {}) tool_arguments = json.dumps(tool_args) if tool_args else "" + if tool_call_id and tool_call_id not in tool_call_index_map: + tool_call_index_map[tool_call_id] = len(tool_call_index_map) + + trace_segments.append( + LLMTraceSegment( + type="tool_call", + turn=current_turn, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + text=None, + ) + ) + yield ToolCallChunkEvent( selector=[self._node_id, "generation", "tool_calls"], chunk=tool_arguments, @@ -1589,6 +1612,9 @@ class LLMNode(Node[LLMNodeData]): tool_files = [] tool_error = None + if tool_call_id and tool_call_id not in tool_call_index_map: + tool_call_index_map[tool_call_id] = len(tool_call_index_map) + # Extract file IDs if present (only for success case) files_data = output.data.get("files") if files_data and isinstance(files_data, list): @@ -1607,6 +1633,20 @@ class LLMNode(Node[LLMNodeData]): if meta and isinstance(meta, dict) and meta.get("error"): tool_error = meta.get("error") + trace_segments.append( + LLMTraceSegment( + type="tool_result", + turn=current_turn, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_output=str(tool_output) if tool_output is not None else None, + tool_error=str(tool_error) if tool_error is not None else None, + files=[str(f) for f in tool_files] if tool_files else [], + text=None, + ) + ) + current_turn += 1 + yield ToolResultChunkEvent( selector=[self._node_id, "generation", "tool_results"], chunk=str(tool_output) if tool_output else "", @@ -1640,6 +1680,7 @@ class LLMNode(Node[LLMNodeData]): if kind == "thought": current_turn_reasoning.append(segment) + trace_segments.append(LLMTraceSegment(type="thought", turn=current_turn, text=segment)) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, @@ -1647,6 +1688,7 @@ class LLMNode(Node[LLMNodeData]): ) else: text += segment + trace_segments.append(LLMTraceSegment(type="content", turn=current_turn, text=segment)) yield StreamChunkEvent( selector=[self._node_id, "text"], chunk=segment, @@ -1685,6 +1727,7 @@ class LLMNode(Node[LLMNodeData]): continue if kind == "thought": current_turn_reasoning.append(segment) + trace_segments.append(LLMTraceSegment(type="thought", turn=current_turn, text=segment)) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, @@ -1692,6 +1735,7 @@ class LLMNode(Node[LLMNodeData]): ) else: text += segment + trace_segments.append(LLMTraceSegment(type="content", turn=current_turn, text=segment)) yield StreamChunkEvent( selector=[self._node_id, "text"], chunk=segment, @@ -1765,6 +1809,10 @@ class LLMNode(Node[LLMNodeData]): } ) + tool_calls_for_generation.sort( + key=lambda item: tool_call_index_map.get(item.get("id", ""), len(tool_call_index_map)) + ) + # Return generation data for caller return LLMGenerationData( text=text, @@ -1773,6 +1821,7 @@ class LLMNode(Node[LLMNodeData]): usage=usage, finish_reason=finish_reason, files=files, + trace=trace_segments, ) def _accumulate_usage(self, total_usage: LLMUsage, delta_usage: LLMUsage) -> None: From 9941d1f1609cd1a6f6a2a02ff541534eb0ca8e5b Mon Sep 17 00:00:00 2001 From: Novice Date: Mon, 15 Dec 2025 14:18:53 +0800 Subject: [PATCH 07/18] feat: add llm log metadata --- api/core/workflow/nodes/llm/entities.py | 11 +-- api/core/workflow/nodes/llm/node.py | 78 +++++++++++++------ .../graph_engine/test_response_coordinator.py | 1 + 3 files changed, 60 insertions(+), 30 deletions(-) diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index 8b4fa11dcf..068ea5ecf0 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -23,22 +23,19 @@ class LLMTraceSegment(BaseModel): """ Streaming trace segment for LLM tool-enabled runs. - We keep order as-is to allow direct replay: thought/content/tool_call/tool_result appear - exactly in the sequence they were emitted. + Order is preserved for replay. Tool calls are single entries containing both + arguments and results. """ - type: Literal["thought", "content", "tool_call", "tool_result"] - turn: int = Field(0, description="0-based turn index, increments after each tool_result") + type: Literal["thought", "content", "tool_call"] # Common optional fields text: str | None = Field(None, description="Text chunk for thought/content") - # Tool call fields + # Tool call fields (combined start + result) tool_call_id: str | None = None tool_name: str | None = None tool_arguments: str | None = None - - # Tool result fields tool_output: str | None = None tool_error: str | None = None files: list[str] = Field(default_factory=list, description="File IDs from tool result if any") diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 21e8c61325..5f4d938773 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -1544,7 +1544,22 @@ class LLMNode(Node[LLMNodeData]): reasoning_per_turn: list[str] = [] # Final list: one element per turn tool_call_index_map: dict[str, int] = {} # tool_call_id -> index trace_segments: list[LLMTraceSegment] = [] # Ordered trace for replay + tool_trace_map: dict[str, LLMTraceSegment] = {} current_turn = 0 + pending_thought: list[str] = [] + pending_content: list[str] = [] + + def _flush_thought() -> None: + if not pending_thought: + return + trace_segments.append(LLMTraceSegment(type="thought", text="".join(pending_thought))) + pending_thought.clear() + + def _flush_content() -> None: + if not pending_content: + return + trace_segments.append(LLMTraceSegment(type="content", text="".join(pending_content))) + pending_content.clear() # Process each output from strategy try: @@ -1584,16 +1599,19 @@ class LLMNode(Node[LLMNodeData]): if tool_call_id and tool_call_id not in tool_call_index_map: tool_call_index_map[tool_call_id] = len(tool_call_index_map) - trace_segments.append( - LLMTraceSegment( - type="tool_call", - turn=current_turn, - tool_call_id=tool_call_id, - tool_name=tool_name, - tool_arguments=tool_arguments, - text=None, - ) + _flush_thought() + _flush_content() + + tool_call_segment = LLMTraceSegment( + type="tool_call", + text=None, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, ) + trace_segments.append(tool_call_segment) + if tool_call_id: + tool_trace_map[tool_call_id] = tool_call_segment yield ToolCallChunkEvent( selector=[self._node_id, "generation", "tool_calls"], @@ -1615,6 +1633,9 @@ class LLMNode(Node[LLMNodeData]): if tool_call_id and tool_call_id not in tool_call_index_map: tool_call_index_map[tool_call_id] = len(tool_call_index_map) + _flush_thought() + _flush_content() + # Extract file IDs if present (only for success case) files_data = output.data.get("files") if files_data and isinstance(files_data, list): @@ -1633,18 +1654,22 @@ class LLMNode(Node[LLMNodeData]): if meta and isinstance(meta, dict) and meta.get("error"): tool_error = meta.get("error") - trace_segments.append( - LLMTraceSegment( - type="tool_result", - turn=current_turn, + tool_call_segment = tool_trace_map.get(tool_call_id) + if tool_call_segment is None: + tool_call_segment = LLMTraceSegment( + type="tool_call", + text=None, tool_call_id=tool_call_id, tool_name=tool_name, - tool_output=str(tool_output) if tool_output is not None else None, - tool_error=str(tool_error) if tool_error is not None else None, - files=[str(f) for f in tool_files] if tool_files else [], - text=None, + tool_arguments=None, ) - ) + trace_segments.append(tool_call_segment) + if tool_call_id: + tool_trace_map[tool_call_id] = tool_call_segment + + tool_call_segment.tool_output = str(tool_output) if tool_output is not None else None + tool_call_segment.tool_error = str(tool_error) if tool_error is not None else None + tool_call_segment.files = [str(f) for f in tool_files] if tool_files else [] current_turn += 1 yield ToolResultChunkEvent( @@ -1679,16 +1704,18 @@ class LLMNode(Node[LLMNodeData]): continue if kind == "thought": + _flush_content() current_turn_reasoning.append(segment) - trace_segments.append(LLMTraceSegment(type="thought", turn=current_turn, text=segment)) + pending_thought.append(segment) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, is_final=False, ) else: + _flush_thought() text += segment - trace_segments.append(LLMTraceSegment(type="content", turn=current_turn, text=segment)) + pending_content.append(segment) yield StreamChunkEvent( selector=[self._node_id, "text"], chunk=segment, @@ -1726,16 +1753,18 @@ class LLMNode(Node[LLMNodeData]): if not segment: continue if kind == "thought": + _flush_content() current_turn_reasoning.append(segment) - trace_segments.append(LLMTraceSegment(type="thought", turn=current_turn, text=segment)) + pending_thought.append(segment) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, is_final=False, ) else: + _flush_thought() text += segment - trace_segments.append(LLMTraceSegment(type="content", turn=current_turn, text=segment)) + pending_content.append(segment) yield StreamChunkEvent( selector=[self._node_id, "text"], chunk=segment, @@ -1751,6 +1780,9 @@ class LLMNode(Node[LLMNodeData]): if current_turn_reasoning: reasoning_per_turn.append("".join(current_turn_reasoning)) + _flush_thought() + _flush_content() + # Send final events for all streams yield StreamChunkEvent( selector=[self._node_id, "text"], @@ -1816,7 +1848,7 @@ class LLMNode(Node[LLMNodeData]): # Return generation data for caller return LLMGenerationData( text=text, - reasoning_contents=reasoning_per_turn, # Multi-turn: [thought1, thought2, ...] + reasoning_contents=reasoning_per_turn, tool_calls=tool_calls_for_generation, usage=usage, finish_reason=finish_reason, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py index 388496ce1d..5df6bba748 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -167,3 +167,4 @@ class TestResponseCoordinatorObjectStreaming: assert ("node1", "generation", "content") in children assert ("node1", "generation", "tool_calls") in children assert ("node1", "generation", "thought") in children + From ad18d084f3a7d15af2c3ebc5cd72c1b5ae643f7c Mon Sep 17 00:00:00 2001 From: Novice Date: Mon, 15 Dec 2025 14:59:06 +0800 Subject: [PATCH 08/18] feat: add sequence output variable. --- api/core/workflow/nodes/llm/entities.py | 1 + api/core/workflow/nodes/llm/node.py | 46 +++++++++++++++++++------ api/fields/workflow_run_fields.py | 1 + api/services/workflow_run_service.py | 39 +++++++++++++++++++-- 4 files changed, 75 insertions(+), 12 deletions(-) diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index 068ea5ecf0..82f146414b 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -52,6 +52,7 @@ class LLMGenerationData(BaseModel): text: str = Field(..., description="Accumulated text content from all turns") reasoning_contents: list[str] = Field(default_factory=list, description="Reasoning content per turn") tool_calls: list[dict[str, Any]] = Field(default_factory=list, description="Tool calls with results") + sequence: list[dict[str, Any]] = Field(default_factory=list, description="Ordered segments for rendering") usage: LLMUsage = Field(..., description="LLM usage statistics") finish_reason: str | None = Field(None, description="Finish reason from LLM") files: list[File] = Field(default_factory=list, description="Generated files") diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 5f4d938773..7fd74babe2 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -392,6 +392,7 @@ class LLMNode(Node[LLMNodeData]): "content": generation_data.text, "reasoning_content": generation_data.reasoning_contents, # [thought1, thought2, ...] "tool_calls": generation_data.tool_calls, + "sequence": generation_data.sequence, } files_to_output = generation_data.files else: @@ -400,6 +401,7 @@ class LLMNode(Node[LLMNodeData]): "content": clean_text, "reasoning_content": [reasoning_content] if reasoning_content else [], "tool_calls": [], + "sequence": [], } files_to_output = self._file_outputs @@ -428,22 +430,24 @@ class LLMNode(Node[LLMNodeData]): is_final=True, ) + metadata: dict[WorkflowNodeExecutionMetadataKey, Any] = { + WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, + WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, + WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, + } + + if generation_data and generation_data.trace: + metadata[WorkflowNodeExecutionMetadataKey.LLM_TRACE] = [ + segment.model_dump() for segment in generation_data.trace + ] + yield StreamCompletedEvent( node_run_result=NodeRunResult( status=WorkflowNodeExecutionStatus.SUCCEEDED, inputs=node_inputs, process_data=process_data, outputs=outputs, - metadata={ - WorkflowNodeExecutionMetadataKey.TOTAL_TOKENS: usage.total_tokens, - WorkflowNodeExecutionMetadataKey.TOTAL_PRICE: usage.total_price, - WorkflowNodeExecutionMetadataKey.CURRENCY: usage.currency, - WorkflowNodeExecutionMetadataKey.LLM_TRACE: [ - segment.model_dump() for segment in generation_data.trace - ] - if generation_data - else [], - }, + metadata=metadata, llm_usage=usage, ) ) @@ -1783,6 +1787,27 @@ class LLMNode(Node[LLMNodeData]): _flush_thought() _flush_content() + # Build sequence from trace_segments for rendering + sequence: list[dict[str, Any]] = [] + reasoning_index = 0 + content_position = 0 + tool_call_seen_index: dict[str, int] = {} + for segment in trace_segments: + if segment.type == "thought": + sequence.append({"type": "reasoning", "index": reasoning_index}) + reasoning_index += 1 + elif segment.type == "content": + segment_text = segment.text or "" + start = content_position + end = start + len(segment_text) + sequence.append({"type": "content", "start": start, "end": end}) + content_position = end + elif segment.type == "tool_call": + tool_id = segment.tool_call_id or "" + if tool_id not in tool_call_seen_index: + tool_call_seen_index[tool_id] = len(tool_call_seen_index) + sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]}) + # Send final events for all streams yield StreamChunkEvent( selector=[self._node_id, "text"], @@ -1850,6 +1875,7 @@ class LLMNode(Node[LLMNodeData]): text=text, reasoning_contents=reasoning_per_turn, tool_calls=tool_calls_for_generation, + sequence=sequence, usage=usage, finish_reason=finish_reason, files=files, diff --git a/api/fields/workflow_run_fields.py b/api/fields/workflow_run_fields.py index 6305d8d9d5..7b878e05c8 100644 --- a/api/fields/workflow_run_fields.py +++ b/api/fields/workflow_run_fields.py @@ -81,6 +81,7 @@ workflow_run_detail_fields = { "inputs": fields.Raw(attribute="inputs_dict"), "status": fields.String, "outputs": fields.Raw(attribute="outputs_dict"), + "outputs_as_generation": fields.Boolean, "error": fields.String, "elapsed_time": fields.Float, "total_tokens": fields.Integer, diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index b903d8df5f..3b6998d0b2 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -1,5 +1,6 @@ import threading -from collections.abc import Sequence +from collections.abc import Mapping, Sequence +from typing import Any from sqlalchemy import Engine from sqlalchemy.orm import sessionmaker @@ -102,12 +103,17 @@ class WorkflowRunService: :param app_model: app model :param run_id: workflow run id """ - return self._workflow_run_repo.get_workflow_run_by_id( + workflow_run = self._workflow_run_repo.get_workflow_run_by_id( tenant_id=app_model.tenant_id, app_id=app_model.id, run_id=run_id, ) + if workflow_run: + workflow_run.outputs_as_generation = self._are_all_generation_outputs(workflow_run.outputs_dict) + + return workflow_run + def get_workflow_runs_count( self, app_model: App, @@ -159,3 +165,32 @@ class WorkflowRunService: app_id=app_model.id, workflow_run_id=run_id, ) + + @staticmethod + def _are_all_generation_outputs(outputs: Mapping[str, Any]) -> bool: + if not outputs: + return False + + allowed_sequence_types = {"reasoning", "content", "tool_call"} + + for value in outputs.values(): + if not isinstance(value, Mapping): + return False + + content = value.get("content") + reasoning_content = value.get("reasoning_content") + tool_calls = value.get("tool_calls") + sequence = value.get("sequence") + + if not isinstance(content, str): + return False + if not isinstance(reasoning_content, list) or any(not isinstance(item, str) for item in reasoning_content): + return False + if not isinstance(tool_calls, list) or any(not isinstance(item, Mapping) for item in tool_calls): + return False + if not isinstance(sequence, list) or any( + not isinstance(item, Mapping) or item.get("type") not in allowed_sequence_types for item in sequence + ): + return False + + return True From ff57848268744c29fa5ce7b70cc21ffa571a1f99 Mon Sep 17 00:00:00 2001 From: "autofix-ci[bot]" <114827586+autofix-ci[bot]@users.noreply.github.com> Date: Mon, 15 Dec 2025 07:29:20 +0000 Subject: [PATCH 09/18] [autofix.ci] apply automated fixes --- .../core/workflow/graph_engine/test_response_coordinator.py | 1 - 1 file changed, 1 deletion(-) diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py index 5df6bba748..388496ce1d 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -167,4 +167,3 @@ class TestResponseCoordinatorObjectStreaming: assert ("node1", "generation", "content") in children assert ("node1", "generation", "tool_calls") in children assert ("node1", "generation", "thought") in children - From 0c4c268003cda2ba895658cb5e22bd4c86732d39 Mon Sep 17 00:00:00 2001 From: Novice Date: Tue, 16 Dec 2025 15:14:42 +0800 Subject: [PATCH 10/18] chore: fix ci issues --- api/core/agent/agent_app_runner.py | 2 +- api/core/workflow/nodes/llm/node.py | 30 ++++++++-------- ...4a64f53_add_llm_generation_detail_table.py | 2 +- api/models/workflow.py | 35 +++++++++++++++++++ api/services/workflow_run_service.py | 35 +------------------ api/tests/unit_tests/core/agent/__init__.py | 3 ++ 6 files changed, 56 insertions(+), 51 deletions(-) create mode 100644 api/tests/unit_tests/core/agent/__init__.py diff --git a/api/core/agent/agent_app_runner.py b/api/core/agent/agent_app_runner.py index 9be5be5c7c..e15ede15d2 100644 --- a/api/core/agent/agent_app_runner.py +++ b/api/core/agent/agent_app_runner.py @@ -108,7 +108,7 @@ class AgentAppRunner(BaseAgentRunner): current_agent_thought_id = None has_published_thought = False current_tool_name: str | None = None - self._current_message_file_ids = [] + self._current_message_file_ids: list[str] = [] # organize prompt messages prompt_messages = self._organize_prompt_messages() diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 54abef0552..3b82ec4a05 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -1720,15 +1720,15 @@ class LLMNode(Node[LLMNodeData]): if meta and isinstance(meta, dict) and meta.get("error"): tool_error = meta.get("error") - tool_call_segment = tool_trace_map.get(tool_call_id) - if tool_call_segment is None: - tool_call_segment = LLMTraceSegment( - type="tool_call", - text=None, - tool_call_id=tool_call_id, - tool_name=tool_name, - tool_arguments=None, - ) + existing_tool_segment = tool_trace_map.get(tool_call_id) + tool_call_segment = existing_tool_segment or LLMTraceSegment( + type="tool_call", + text=None, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=None, + ) + if existing_tool_segment is None: trace_segments.append(tool_call_segment) if tool_call_id: tool_trace_map[tool_call_id] = tool_call_segment @@ -1854,18 +1854,18 @@ class LLMNode(Node[LLMNodeData]): reasoning_index = 0 content_position = 0 tool_call_seen_index: dict[str, int] = {} - for segment in trace_segments: - if segment.type == "thought": + for trace_segment in trace_segments: + if trace_segment.type == "thought": sequence.append({"type": "reasoning", "index": reasoning_index}) reasoning_index += 1 - elif segment.type == "content": - segment_text = segment.text or "" + elif trace_segment.type == "content": + segment_text = trace_segment.text or "" start = content_position end = start + len(segment_text) sequence.append({"type": "content", "start": start, "end": end}) content_position = end - elif segment.type == "tool_call": - tool_id = segment.tool_call_id or "" + elif trace_segment.type == "tool_call": + tool_id = trace_segment.tool_call_id or "" if tool_id not in tool_call_seen_index: tool_call_seen_index[tool_id] = len(tool_call_seen_index) sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]}) diff --git a/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py b/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py index 340cc82bb5..700f9ea80b 100644 --- a/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py +++ b/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py @@ -12,7 +12,7 @@ from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '85c8b4a64f53' -down_revision = '7bb281b7a422' +down_revision = 'd57accd375ae' branch_labels = None depends_on = None diff --git a/api/models/workflow.py b/api/models/workflow.py index 853d5afefc..89ec0352df 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -57,6 +57,37 @@ from .types import EnumText, LongText, StringUUID logger = logging.getLogger(__name__) +def is_generation_outputs(outputs: Mapping[str, Any]) -> bool: + if not outputs: + return False + + allowed_sequence_types = {"reasoning", "content", "tool_call"} + + def valid_sequence_item(item: Mapping[str, Any]) -> bool: + return isinstance(item, Mapping) and item.get("type") in allowed_sequence_types + + def valid_value(value: Any) -> bool: + if not isinstance(value, Mapping): + return False + + content = value.get("content") + reasoning_content = value.get("reasoning_content") + tool_calls = value.get("tool_calls") + sequence = value.get("sequence") + + return ( + isinstance(content, str) + and isinstance(reasoning_content, list) + and all(isinstance(item, str) for item in reasoning_content) + and isinstance(tool_calls, list) + and all(isinstance(item, Mapping) for item in tool_calls) + and isinstance(sequence, list) + and all(valid_sequence_item(item) for item in sequence) + ) + + return all(valid_value(value) for value in outputs.values()) + + class WorkflowType(StrEnum): """ Workflow Type Enum @@ -652,6 +683,10 @@ class WorkflowRun(Base): def outputs_dict(self) -> Mapping[str, Any]: return json.loads(self.outputs) if self.outputs else {} + @property + def outputs_as_generation(self) -> bool: + return is_generation_outputs(self.outputs_dict) + @property def message(self): from .model import Message diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index 3b6998d0b2..1bc821c43d 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -1,6 +1,5 @@ import threading -from collections.abc import Mapping, Sequence -from typing import Any +from collections.abc import Sequence from sqlalchemy import Engine from sqlalchemy.orm import sessionmaker @@ -109,9 +108,6 @@ class WorkflowRunService: run_id=run_id, ) - if workflow_run: - workflow_run.outputs_as_generation = self._are_all_generation_outputs(workflow_run.outputs_dict) - return workflow_run def get_workflow_runs_count( @@ -165,32 +161,3 @@ class WorkflowRunService: app_id=app_model.id, workflow_run_id=run_id, ) - - @staticmethod - def _are_all_generation_outputs(outputs: Mapping[str, Any]) -> bool: - if not outputs: - return False - - allowed_sequence_types = {"reasoning", "content", "tool_call"} - - for value in outputs.values(): - if not isinstance(value, Mapping): - return False - - content = value.get("content") - reasoning_content = value.get("reasoning_content") - tool_calls = value.get("tool_calls") - sequence = value.get("sequence") - - if not isinstance(content, str): - return False - if not isinstance(reasoning_content, list) or any(not isinstance(item, str) for item in reasoning_content): - return False - if not isinstance(tool_calls, list) or any(not isinstance(item, Mapping) for item in tool_calls): - return False - if not isinstance(sequence, list) or any( - not isinstance(item, Mapping) or item.get("type") not in allowed_sequence_types for item in sequence - ): - return False - - return True diff --git a/api/tests/unit_tests/core/agent/__init__.py b/api/tests/unit_tests/core/agent/__init__.py new file mode 100644 index 0000000000..a9ccd45f4b --- /dev/null +++ b/api/tests/unit_tests/core/agent/__init__.py @@ -0,0 +1,3 @@ +""" +Mark agent test modules as a package to avoid import name collisions. +""" From d3486cab315b5f13744d49d52ebe98f11f976b73 Mon Sep 17 00:00:00 2001 From: Novice Date: Wed, 17 Dec 2025 10:30:21 +0800 Subject: [PATCH 11/18] refactor(llm node): tool call tool result entity --- .../advanced_chat/generate_task_pipeline.py | 25 ++-- .../apps/workflow/generate_task_pipeline.py | 17 ++- api/core/app/apps/workflow_app_runner.py | 7 +- api/core/app/entities/queue_entities.py | 19 +-- api/core/workflow/entities/__init__.py | 5 + api/core/workflow/entities/tool_entities.py | 33 ++++++ .../response_coordinator/coordinator.py | 31 +++-- api/core/workflow/graph_events/__init__.py | 4 + api/core/workflow/graph_events/node.py | 15 ++- api/core/workflow/node_events/node.py | 12 +- api/core/workflow/nodes/base/node.py | 25 ++-- api/core/workflow/nodes/llm/entities.py | 13 +- api/core/workflow/nodes/llm/node.py | 107 +++++++++++------ api/models/workflow.py | 5 +- api/services/workflow_run_service.py | 4 +- .../graph_engine/test_response_coordinator.py | 35 ++++-- .../node_events/test_stream_chunk_events.py | 112 ++++++++++-------- 17 files changed, 300 insertions(+), 169 deletions(-) create mode 100644 api/core/workflow/entities/tool_entities.py diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index f3e9439938..0d5e1e5dfd 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -516,6 +516,14 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): if tts_publisher and queue_message: tts_publisher.publish(queue_message) + tool_call = event.tool_call + tool_result = event.tool_result + tool_payload = tool_call or tool_result + tool_call_id = tool_payload.id if tool_payload and tool_payload.id else "" + tool_name = tool_payload.name if tool_payload and tool_payload.name else "" + tool_arguments = tool_call.arguments if tool_call and tool_call.arguments else "" + tool_files = tool_result.files if tool_result else [] + # Record stream event based on chunk type chunk_type = event.chunk_type or ChunkType.TEXT match chunk_type: @@ -525,13 +533,13 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): self._stream_buffer.record_thought_chunk(delta_text) case ChunkType.TOOL_CALL: self._stream_buffer.record_tool_call( - tool_call_id=event.tool_call_id or "", - tool_name=event.tool_name or "", - tool_arguments=event.tool_arguments or "", + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, ) case ChunkType.TOOL_RESULT: self._stream_buffer.record_tool_result( - tool_call_id=event.tool_call_id or "", + tool_call_id=tool_call_id, result=delta_text, ) @@ -541,11 +549,10 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): message_id=self._message_id, from_variable_selector=event.from_variable_selector, chunk_type=event.chunk_type.value if event.chunk_type else None, - tool_call_id=event.tool_call_id, - tool_name=event.tool_name, - tool_arguments=event.tool_arguments, - tool_files=event.tool_files, - tool_error=event.tool_error, + tool_call_id=tool_call_id or None, + tool_name=tool_name or None, + tool_arguments=tool_arguments or None, + tool_files=tool_files, ) def _handle_iteration_start_event( diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index a6c7067ccd..1a9d09f5e7 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -484,6 +484,14 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): if delta_text is None: return + tool_call = event.tool_call + tool_result = event.tool_result + tool_payload = tool_call or tool_result + tool_call_id = tool_payload.id if tool_payload and tool_payload.id else None + tool_name = tool_payload.name if tool_payload and tool_payload.name else None + tool_arguments = tool_call.arguments if tool_call else None + tool_files = tool_result.files if tool_result else [] + # only publish tts message at text chunk streaming if tts_publisher and queue_message: tts_publisher.publish(queue_message) @@ -492,11 +500,10 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): text=delta_text, from_variable_selector=event.from_variable_selector, chunk_type=event.chunk_type, - tool_call_id=event.tool_call_id, - tool_name=event.tool_name, - tool_arguments=event.tool_arguments, - tool_files=event.tool_files, - tool_error=event.tool_error, + tool_call_id=tool_call_id, + tool_name=tool_name, + tool_arguments=tool_arguments, + tool_files=tool_files, ) def _handle_agent_log_event(self, event: QueueAgentLogEvent, **kwargs) -> Generator[StreamResponse, None, None]: diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 23624cb934..6ce33c98ee 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -464,11 +464,8 @@ class WorkflowBasedAppRunner: in_iteration_id=event.in_iteration_id, in_loop_id=event.in_loop_id, chunk_type=QueueChunkType(event.chunk_type.value), - tool_call_id=event.tool_call_id, - tool_name=event.tool_name, - tool_arguments=event.tool_arguments, - tool_files=event.tool_files, - tool_error=event.tool_error, + tool_call=event.tool_call, + tool_result=event.tool_result, ) ) elif isinstance(event, NodeRunRetrieverResourceEvent): diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index edb2c8a1f3..e07efbc38c 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -9,6 +9,7 @@ from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.workflow.entities import AgentNodeStrategyInit from core.workflow.enums import WorkflowNodeExecutionMetadataKey +from core.workflow.graph_events import ToolCall, ToolResult from core.workflow.nodes import NodeType @@ -204,19 +205,11 @@ class QueueTextChunkEvent(AppQueueEvent): chunk_type: ChunkType = ChunkType.TEXT """type of the chunk""" - # Tool call fields (when chunk_type == TOOL_CALL) - tool_call_id: str | None = None - """unique identifier for this tool call""" - tool_name: str | None = None - """name of the tool being called""" - tool_arguments: str | None = None - """accumulated tool arguments JSON""" - - # Tool result fields (when chunk_type == TOOL_RESULT) - tool_files: list[str] = Field(default_factory=list) - """file IDs produced by tool""" - tool_error: str | None = None - """error message if tool failed""" + # Tool streaming payloads + tool_call: ToolCall | None = None + """structured tool call info""" + tool_result: ToolResult | None = None + """structured tool result info""" class QueueAgentMessageEvent(AppQueueEvent): diff --git a/api/core/workflow/entities/__init__.py b/api/core/workflow/entities/__init__.py index be70e467a0..0f3b9a5239 100644 --- a/api/core/workflow/entities/__init__.py +++ b/api/core/workflow/entities/__init__.py @@ -1,11 +1,16 @@ from .agent import AgentNodeStrategyInit from .graph_init_params import GraphInitParams +from .tool_entities import ToolCall, ToolCallResult, ToolResult, ToolResultStatus from .workflow_execution import WorkflowExecution from .workflow_node_execution import WorkflowNodeExecution __all__ = [ "AgentNodeStrategyInit", "GraphInitParams", + "ToolCall", + "ToolCallResult", + "ToolResult", + "ToolResultStatus", "WorkflowExecution", "WorkflowNodeExecution", ] diff --git a/api/core/workflow/entities/tool_entities.py b/api/core/workflow/entities/tool_entities.py new file mode 100644 index 0000000000..f4833218c7 --- /dev/null +++ b/api/core/workflow/entities/tool_entities.py @@ -0,0 +1,33 @@ +from enum import StrEnum + +from pydantic import BaseModel, Field + +from core.file import File + + +class ToolResultStatus(StrEnum): + SUCCESS = "success" + ERROR = "error" + + +class ToolCall(BaseModel): + id: str | None = Field(default=None, description="Unique identifier for this tool call") + name: str | None = Field(default=None, description="Name of the tool being called") + arguments: str | None = Field(default=None, description="Accumulated tool arguments JSON") + + +class ToolResult(BaseModel): + id: str | None = Field(default=None, description="Identifier of the tool call this result belongs to") + name: str | None = Field(default=None, description="Name of the tool") + output: str | None = Field(default=None, description="Tool output text, error or success message") + files: list[str] = Field(default_factory=list, description="File produced by tool") + status: ToolResultStatus | None = Field(default=ToolResultStatus.SUCCESS, description="Tool execution status") + + +class ToolCallResult(BaseModel): + id: str | None = Field(default=None, description="Identifier for the tool call") + name: str | None = Field(default=None, description="Name of the tool") + arguments: str | None = Field(default=None, description="Accumulated tool arguments JSON") + output: str | None = Field(default=None, description="Tool output text, error or success message") + files: list[File] = Field(default_factory=list, description="File produced by tool") + status: ToolResultStatus = Field(default=ToolResultStatus.SUCCESS, description="Tool execution status") diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index 1396c3a7ff..631440c6c1 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -16,7 +16,13 @@ from pydantic import BaseModel, Field from core.workflow.enums import NodeExecutionType, NodeState from core.workflow.graph import Graph -from core.workflow.graph_events import NodeRunStreamChunkEvent, NodeRunSucceededEvent +from core.workflow.graph_events import ( + ChunkType, + NodeRunStreamChunkEvent, + NodeRunSucceededEvent, + ToolCall, + ToolResult, +) from core.workflow.nodes.base.template import TextSegment, VariableSegment from core.workflow.runtime import VariablePool @@ -321,7 +327,9 @@ class ResponseStreamCoordinator: selector: Sequence[str], chunk: str, is_final: bool = False, - **extra_fields, + chunk_type: ChunkType = ChunkType.TEXT, + tool_call: ToolCall | None = None, + tool_result: ToolResult | None = None, ) -> NodeRunStreamChunkEvent: """Create a stream chunk event with consistent structure. @@ -334,7 +342,9 @@ class ResponseStreamCoordinator: selector: The variable selector chunk: The chunk content is_final: Whether this is the final chunk - **extra_fields: Additional fields for specialized events (chunk_type, tool_call_id, etc.) + chunk_type: The semantic type of the chunk being streamed + tool_call: Structured data for tool_call chunks + tool_result: Structured data for tool_result chunks """ # Check if this is a special selector that doesn't correspond to a node if selector and selector[0] not in self._graph.nodes and self._active_session: @@ -347,7 +357,9 @@ class ResponseStreamCoordinator: selector=selector, chunk=chunk, is_final=is_final, - **extra_fields, + chunk_type=chunk_type, + tool_call=tool_call, + tool_result=tool_result, ) # Standard case: selector refers to an actual node @@ -359,7 +371,9 @@ class ResponseStreamCoordinator: selector=selector, chunk=chunk, is_final=is_final, - **extra_fields, + chunk_type=chunk_type, + tool_call=tool_call, + tool_result=tool_result, ) def _process_variable_segment(self, segment: VariableSegment) -> tuple[Sequence[NodeRunStreamChunkEvent], bool]: @@ -436,11 +450,8 @@ class ResponseStreamCoordinator: chunk=event.chunk, is_final=event.is_final, chunk_type=event.chunk_type, - tool_call_id=event.tool_call_id, - tool_name=event.tool_name, - tool_arguments=event.tool_arguments, - tool_files=event.tool_files, - tool_error=event.tool_error, + tool_call=event.tool_call, + tool_result=event.tool_result, ) events.append(updated_event) else: diff --git a/api/core/workflow/graph_events/__init__.py b/api/core/workflow/graph_events/__init__.py index 6c37fa1bc6..4ee0ec94d2 100644 --- a/api/core/workflow/graph_events/__init__.py +++ b/api/core/workflow/graph_events/__init__.py @@ -45,6 +45,8 @@ from .node import ( NodeRunStartedEvent, NodeRunStreamChunkEvent, NodeRunSucceededEvent, + ToolCall, + ToolResult, ) __all__ = [ @@ -75,4 +77,6 @@ __all__ = [ "NodeRunStartedEvent", "NodeRunStreamChunkEvent", "NodeRunSucceededEvent", + "ToolCall", + "ToolResult", ] diff --git a/api/core/workflow/graph_events/node.py b/api/core/workflow/graph_events/node.py index 3351f028b1..01bc27d3e4 100644 --- a/api/core/workflow/graph_events/node.py +++ b/api/core/workflow/graph_events/node.py @@ -5,7 +5,7 @@ from enum import StrEnum from pydantic import Field from core.rag.entities.citation_metadata import RetrievalSourceMetadata -from core.workflow.entities import AgentNodeStrategyInit +from core.workflow.entities import AgentNodeStrategyInit, ToolCall, ToolResult from core.workflow.entities.pause_reason import PauseReason from .base import GraphNodeEventBase @@ -43,13 +43,16 @@ class NodeRunStreamChunkEvent(GraphNodeEventBase): chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") # Tool call fields (when chunk_type == TOOL_CALL) - tool_call_id: str | None = Field(default=None, description="unique identifier for this tool call") - tool_name: str | None = Field(default=None, description="name of the tool being called") - tool_arguments: str | None = Field(default=None, description="accumulated tool arguments JSON") + tool_call: ToolCall | None = Field( + default=None, + description="structured payload for tool_call chunks", + ) # Tool result fields (when chunk_type == TOOL_RESULT) - tool_files: list[str] = Field(default_factory=list, description="file IDs produced by tool") - tool_error: str | None = Field(default=None, description="error message if tool failed") + tool_result: ToolResult | None = Field( + default=None, + description="structured payload for tool_result chunks", + ) class NodeRunRetrieverResourceEvent(GraphNodeEventBase): diff --git a/api/core/workflow/node_events/node.py b/api/core/workflow/node_events/node.py index 43e00ae7d1..39f09d02a5 100644 --- a/api/core/workflow/node_events/node.py +++ b/api/core/workflow/node_events/node.py @@ -7,6 +7,7 @@ from pydantic import Field from core.file import File from core.model_runtime.entities.llm_entities import LLMUsage from core.rag.entities.citation_metadata import RetrievalSourceMetadata +from core.workflow.entities import ToolCall, ToolResult from core.workflow.entities.pause_reason import PauseReason from core.workflow.node_events import NodeRunResult @@ -51,25 +52,22 @@ class StreamChunkEvent(NodeEventBase): chunk: str = Field(..., description="the actual chunk content") is_final: bool = Field(default=False, description="indicates if this is the last chunk") chunk_type: ChunkType = Field(default=ChunkType.TEXT, description="type of the chunk") + tool_call: ToolCall | None = Field(default=None, description="structured payload for tool_call chunks") + tool_result: ToolResult | None = Field(default=None, description="structured payload for tool_result chunks") class ToolCallChunkEvent(StreamChunkEvent): """Tool call streaming event - tool call arguments streaming output.""" chunk_type: ChunkType = Field(default=ChunkType.TOOL_CALL, frozen=True) - tool_call_id: str = Field(..., description="unique identifier for this tool call") - tool_name: str = Field(..., description="name of the tool being called") - tool_arguments: str = Field(default="", description="accumulated tool arguments JSON") + tool_call: ToolCall | None = Field(default=None, description="structured tool call payload") class ToolResultChunkEvent(StreamChunkEvent): """Tool result event - tool execution result.""" chunk_type: ChunkType = Field(default=ChunkType.TOOL_RESULT, frozen=True) - tool_call_id: str = Field(..., description="identifier of the tool call this result belongs to") - tool_name: str = Field(..., description="name of the tool") - tool_files: list[str] = Field(default_factory=list, description="file IDs produced by tool") - tool_error: str | None = Field(default=None, description="error message if tool failed") + tool_result: ToolResult | None = Field(default=None, description="structured tool result payload") class ThoughtChunkEvent(StreamChunkEvent): diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index e71335d3b4..40feda8b57 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -556,6 +556,8 @@ class Node(Generic[NodeDataT]): chunk=event.chunk, is_final=event.is_final, chunk_type=ChunkType(event.chunk_type.value), + tool_call=event.tool_call, + tool_result=event.tool_result, ) @_dispatch.register @@ -570,14 +572,18 @@ class Node(Generic[NodeDataT]): chunk=event.chunk, is_final=event.is_final, chunk_type=ChunkType.TOOL_CALL, - tool_call_id=event.tool_call_id, - tool_name=event.tool_name, - tool_arguments=event.tool_arguments, + tool_call=event.tool_call, ) @_dispatch.register def _(self, event: ToolResultChunkEvent) -> NodeRunStreamChunkEvent: - from core.workflow.graph_events import ChunkType + from core.workflow.entities import ToolResult + from core.workflow.graph_events import ChunkType, ToolResultStatus + + tool_result = event.tool_result + status: ToolResultStatus = ( + tool_result.status if tool_result and tool_result.status is not None else ToolResultStatus.SUCCESS + ) return NodeRunStreamChunkEvent( id=self._node_execution_id, @@ -587,10 +593,13 @@ class Node(Generic[NodeDataT]): chunk=event.chunk, is_final=event.is_final, chunk_type=ChunkType.TOOL_RESULT, - tool_call_id=event.tool_call_id, - tool_name=event.tool_name, - tool_files=event.tool_files, - tool_error=event.tool_error, + tool_result=ToolResult( + id=tool_result.id if tool_result else None, + name=tool_result.name if tool_result else None, + output=tool_result.output if tool_result else None, + files=tool_result.files if tool_result else [], + status=status, + ), ) @_dispatch.register diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index 82f146414b..f57e251cdf 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -8,6 +8,7 @@ from core.model_runtime.entities import ImagePromptMessageContent, LLMMode from core.model_runtime.entities.llm_entities import LLMUsage from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig from core.tools.entities.tool_entities import ToolProviderType +from core.workflow.entities import ToolCallResult from core.workflow.nodes.base import BaseNodeData from core.workflow.nodes.base.entities import VariableSelector @@ -33,12 +34,10 @@ class LLMTraceSegment(BaseModel): text: str | None = Field(None, description="Text chunk for thought/content") # Tool call fields (combined start + result) - tool_call_id: str | None = None - tool_name: str | None = None - tool_arguments: str | None = None - tool_output: str | None = None - tool_error: str | None = None - files: list[str] = Field(default_factory=list, description="File IDs from tool result if any") + tool_call: ToolCallResult | None = Field( + default=None, + description="Combined tool call arguments and result for this segment", + ) class LLMGenerationData(BaseModel): @@ -51,7 +50,7 @@ class LLMGenerationData(BaseModel): text: str = Field(..., description="Accumulated text content from all turns") reasoning_contents: list[str] = Field(default_factory=list, description="Reasoning content per turn") - tool_calls: list[dict[str, Any]] = Field(default_factory=list, description="Tool calls with results") + tool_calls: list[ToolCallResult] = Field(default_factory=list, description="Tool calls with results") sequence: list[dict[str, Any]] = Field(default_factory=list, description="Ordered segments for rendering") usage: LLMUsage = Field(..., description="LLM usage statistics") finish_reason: str | None = Field(None, description="Finish reason from LLM") diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 3b82ec4a05..94b616bd34 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -60,7 +60,8 @@ from core.variables import ( StringSegment, ) from core.workflow.constants import SYSTEM_VARIABLE_NODE_ID -from core.workflow.entities import GraphInitParams +from core.workflow.entities import GraphInitParams, ToolCall, ToolResult, ToolResultStatus +from core.workflow.entities.tool_entities import ToolCallResult from core.workflow.enums import ( NodeType, SystemVariableKey, @@ -1671,9 +1672,11 @@ class LLMNode(Node[LLMNodeData]): tool_call_segment = LLMTraceSegment( type="tool_call", text=None, - tool_call_id=tool_call_id, - tool_name=tool_name, - tool_arguments=tool_arguments, + tool_call=ToolCallResult( + id=tool_call_id, + name=tool_name, + arguments=tool_arguments, + ), ) trace_segments.append(tool_call_segment) if tool_call_id: @@ -1682,9 +1685,11 @@ class LLMNode(Node[LLMNodeData]): yield ToolCallChunkEvent( selector=[self._node_id, "generation", "tool_calls"], chunk=tool_arguments, - tool_call_id=tool_call_id, - tool_name=tool_name, - tool_arguments=tool_arguments, + tool_call=ToolCall( + id=tool_call_id, + name=tool_name, + arguments=tool_arguments, + ), is_final=False, ) @@ -1724,27 +1729,50 @@ class LLMNode(Node[LLMNodeData]): tool_call_segment = existing_tool_segment or LLMTraceSegment( type="tool_call", text=None, - tool_call_id=tool_call_id, - tool_name=tool_name, - tool_arguments=None, + tool_call=ToolCallResult( + id=tool_call_id, + name=tool_name, + arguments=None, + ), ) if existing_tool_segment is None: trace_segments.append(tool_call_segment) if tool_call_id: tool_trace_map[tool_call_id] = tool_call_segment - tool_call_segment.tool_output = str(tool_output) if tool_output is not None else None - tool_call_segment.tool_error = str(tool_error) if tool_error is not None else None - tool_call_segment.files = [str(f) for f in tool_files] if tool_files else [] + if tool_call_segment.tool_call is None: + tool_call_segment.tool_call = ToolCallResult( + id=tool_call_id, + name=tool_name, + arguments=None, + ) + tool_call_segment.tool_call.output = ( + str(tool_output) + if tool_output is not None + else str(tool_error) + if tool_error is not None + else None + ) + tool_call_segment.tool_call.files = [] + tool_call_segment.tool_call.status = ( + ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS + ) current_turn += 1 + result_output = ( + str(tool_output) if tool_output is not None else str(tool_error) if tool_error else None + ) + yield ToolResultChunkEvent( selector=[self._node_id, "generation", "tool_results"], - chunk=str(tool_output) if tool_output else "", - tool_call_id=tool_call_id, - tool_name=tool_name, - tool_files=tool_files, - tool_error=tool_error, + chunk=result_output or "", + tool_result=ToolResult( + id=tool_call_id, + name=tool_name, + output=result_output, + files=tool_files, + status=ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS, + ), is_final=False, ) @@ -1865,7 +1893,7 @@ class LLMNode(Node[LLMNodeData]): sequence.append({"type": "content", "start": start, "end": end}) content_position = end elif trace_segment.type == "tool_call": - tool_id = trace_segment.tool_call_id or "" + tool_id = trace_segment.tool_call.id if trace_segment.tool_call and trace_segment.tool_call.id else "" if tool_id not in tool_call_seen_index: tool_call_seen_index[tool_id] = len(tool_call_seen_index) sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]}) @@ -1893,9 +1921,11 @@ class LLMNode(Node[LLMNodeData]): yield ToolCallChunkEvent( selector=[self._node_id, "generation", "tool_calls"], chunk="", - tool_call_id="", - tool_name="", - tool_arguments="", + tool_call=ToolCall( + id="", + name="", + arguments="", + ), is_final=True, ) @@ -1903,33 +1933,40 @@ class LLMNode(Node[LLMNodeData]): yield ToolResultChunkEvent( selector=[self._node_id, "generation", "tool_results"], chunk="", - tool_call_id="", - tool_name="", - tool_files=[], - tool_error=None, + tool_result=ToolResult( + id="", + name="", + output="", + files=[], + status=ToolResultStatus.SUCCESS, + ), is_final=True, ) # Build tool_calls from agent_logs (with results) - tool_calls_for_generation = [] + tool_calls_for_generation: list[ToolCallResult] = [] for log in agent_logs: tool_call_id = log.data.get("tool_call_id") if not tool_call_id or log.status == AgentLog.LogStatus.START.value: continue tool_args = log.data.get("tool_args") or {} + log_error = log.data.get("error") + log_output = log.data.get("output") + result_text = log_output or log_error or "" + status = ToolResultStatus.ERROR if log_error else ToolResultStatus.SUCCESS tool_calls_for_generation.append( - { - "id": tool_call_id, - "name": log.data.get("tool_name", ""), - "arguments": json.dumps(tool_args) if tool_args else "", - # Prefer output, fall back to error text if present - "result": log.data.get("output") or log.data.get("error") or "", - } + ToolCallResult( + id=tool_call_id, + name=log.data.get("tool_name", ""), + arguments=json.dumps(tool_args) if tool_args else "", + output=result_text, + status=status, + ) ) tool_calls_for_generation.sort( - key=lambda item: tool_call_index_map.get(item.get("id", ""), len(tool_call_index_map)) + key=lambda item: tool_call_index_map.get(item.id or "", len(tool_call_index_map)) ) # Return generation data for caller diff --git a/api/models/workflow.py b/api/models/workflow.py index 89ec0352df..bc229fb4e4 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -683,10 +683,6 @@ class WorkflowRun(Base): def outputs_dict(self) -> Mapping[str, Any]: return json.loads(self.outputs) if self.outputs else {} - @property - def outputs_as_generation(self) -> bool: - return is_generation_outputs(self.outputs_dict) - @property def message(self): from .model import Message @@ -712,6 +708,7 @@ class WorkflowRun(Base): "inputs": self.inputs_dict, "status": self.status, "outputs": self.outputs_dict, + "outputs_as_generation": is_generation_outputs(self.outputs_dict), "error": self.error, "elapsed_time": self.elapsed_time, "total_tokens": self.total_tokens, diff --git a/api/services/workflow_run_service.py b/api/services/workflow_run_service.py index 1bc821c43d..b903d8df5f 100644 --- a/api/services/workflow_run_service.py +++ b/api/services/workflow_run_service.py @@ -102,14 +102,12 @@ class WorkflowRunService: :param app_model: app model :param run_id: workflow run id """ - workflow_run = self._workflow_run_repo.get_workflow_run_by_id( + return self._workflow_run_repo.get_workflow_run_by_id( tenant_id=app_model.tenant_id, app_id=app_model.id, run_id=run_id, ) - return workflow_run - def get_workflow_runs_count( self, app_model: App, diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py index 388496ce1d..8e0eba71cc 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -2,10 +2,16 @@ from unittest.mock import MagicMock +from core.workflow.entities import ToolResultStatus from core.workflow.enums import NodeType from core.workflow.graph import Graph from core.workflow.graph_engine.response_coordinator.coordinator import ResponseStreamCoordinator -from core.workflow.graph_events import ChunkType, NodeRunStreamChunkEvent +from core.workflow.graph_events import ( + ChunkType, + NodeRunStreamChunkEvent, + ToolCall, + ToolResult, +) from core.workflow.nodes.base.entities import BaseNodeData from core.workflow.runtime import VariablePool @@ -80,9 +86,11 @@ class TestResponseCoordinatorObjectStreaming: chunk='{"query": "test"}', is_final=True, chunk_type=ChunkType.TOOL_CALL, - tool_call_id="call_123", - tool_name="search", - tool_arguments='{"query": "test"}', + tool_call=ToolCall( + id="call_123", + name="search", + arguments='{"query": "test"}', + ), ) # 3. Tool result stream @@ -94,10 +102,13 @@ class TestResponseCoordinatorObjectStreaming: chunk="Found 10 results", is_final=True, chunk_type=ChunkType.TOOL_RESULT, - tool_call_id="call_123", - tool_name="search", - tool_files=[], - tool_error=None, + tool_result=ToolResult( + id="call_123", + name="search", + output="Found 10 results", + files=[], + status=ToolResultStatus.SUCCESS, + ), ) # Intercept these events @@ -111,6 +122,14 @@ class TestResponseCoordinatorObjectStreaming: assert ("llm_node", "generation", "tool_calls") in coordinator._stream_buffers assert ("llm_node", "generation", "tool_results") in coordinator._stream_buffers + # Verify payloads are preserved in buffered events + buffered_call = coordinator._stream_buffers[("llm_node", "generation", "tool_calls")][0] + assert buffered_call.tool_call is not None + assert buffered_call.tool_call.id == "call_123" + buffered_result = coordinator._stream_buffers[("llm_node", "generation", "tool_results")][0] + assert buffered_result.tool_result is not None + assert buffered_result.tool_result.status == "success" + # Verify we can find child streams child_streams = coordinator._find_child_streams(["llm_node", "generation"]) assert len(child_streams) == 3 diff --git a/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py index f6e0834b1e..951149e933 100644 --- a/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py +++ b/api/tests/unit_tests/core/workflow/node_events/test_stream_chunk_events.py @@ -1,5 +1,6 @@ """Tests for StreamChunkEvent and its subclasses.""" +from core.workflow.entities import ToolCall, ToolResult, ToolResultStatus from core.workflow.node_events import ( ChunkType, StreamChunkEvent, @@ -87,14 +88,13 @@ class TestToolCallChunkEvent: event = ToolCallChunkEvent( selector=["node1", "tool_calls"], chunk='{"city": "Beijing"}', - tool_call_id="call_123", - tool_name="weather", + tool_call=ToolCall(id="call_123", name="weather", arguments=None), ) assert event.selector == ["node1", "tool_calls"] assert event.chunk == '{"city": "Beijing"}' - assert event.tool_call_id == "call_123" - assert event.tool_name == "weather" + assert event.tool_call.id == "call_123" + assert event.tool_call.name == "weather" assert event.chunk_type == ChunkType.TOOL_CALL def test_chunk_type_is_tool_call(self): @@ -102,8 +102,7 @@ class TestToolCallChunkEvent: event = ToolCallChunkEvent( selector=["node1", "tool_calls"], chunk="", - tool_call_id="call_123", - tool_name="test_tool", + tool_call=ToolCall(id="call_123", name="test_tool", arguments=None), ) assert event.chunk_type == ChunkType.TOOL_CALL @@ -113,30 +112,34 @@ class TestToolCallChunkEvent: event = ToolCallChunkEvent( selector=["node1", "tool_calls"], chunk='{"param": "value"}', - tool_call_id="call_123", - tool_name="test_tool", - tool_arguments='{"param": "value"}', + tool_call=ToolCall( + id="call_123", + name="test_tool", + arguments='{"param": "value"}', + ), ) - assert event.tool_arguments == '{"param": "value"}' + assert event.tool_call.arguments == '{"param": "value"}' def test_serialization(self): """Test that event can be serialized to dict.""" event = ToolCallChunkEvent( selector=["node1", "tool_calls"], chunk='{"city": "Beijing"}', - tool_call_id="call_123", - tool_name="weather", - tool_arguments='{"city": "Beijing"}', + tool_call=ToolCall( + id="call_123", + name="weather", + arguments='{"city": "Beijing"}', + ), is_final=True, ) data = event.model_dump() assert data["chunk_type"] == "tool_call" - assert data["tool_call_id"] == "call_123" - assert data["tool_name"] == "weather" - assert data["tool_arguments"] == '{"city": "Beijing"}' + assert data["tool_call"]["id"] == "call_123" + assert data["tool_call"]["name"] == "weather" + assert data["tool_call"]["arguments"] == '{"city": "Beijing"}' assert data["is_final"] is True @@ -148,14 +151,13 @@ class TestToolResultChunkEvent: event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="Weather: Sunny, 25°C", - tool_call_id="call_123", - tool_name="weather", + tool_result=ToolResult(id="call_123", name="weather", output="Weather: Sunny, 25°C"), ) assert event.selector == ["node1", "tool_results"] assert event.chunk == "Weather: Sunny, 25°C" - assert event.tool_call_id == "call_123" - assert event.tool_name == "weather" + assert event.tool_result.id == "call_123" + assert event.tool_result.name == "weather" assert event.chunk_type == ChunkType.TOOL_RESULT def test_chunk_type_is_tool_result(self): @@ -163,8 +165,7 @@ class TestToolResultChunkEvent: event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="result", - tool_call_id="call_123", - tool_name="test_tool", + tool_result=ToolResult(id="call_123", name="test_tool"), ) assert event.chunk_type == ChunkType.TOOL_RESULT @@ -174,55 +175,62 @@ class TestToolResultChunkEvent: event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="result", - tool_call_id="call_123", - tool_name="test_tool", + tool_result=ToolResult(id="call_123", name="test_tool"), ) - assert event.tool_files == [] + assert event.tool_result.files == [] def test_tool_files_with_values(self): """Test tool_files with file IDs.""" event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="result", - tool_call_id="call_123", - tool_name="test_tool", - tool_files=["file_1", "file_2"], + tool_result=ToolResult( + id="call_123", + name="test_tool", + files=["file_1", "file_2"], + ), ) - assert event.tool_files == ["file_1", "file_2"] + assert event.tool_result.files == ["file_1", "file_2"] - def test_tool_error_field(self): - """Test tool_error field.""" + def test_tool_error_output(self): + """Test error output captured in tool_result.""" event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="", - tool_call_id="call_123", - tool_name="test_tool", - tool_error="Tool execution failed", + tool_result=ToolResult( + id="call_123", + name="test_tool", + output="Tool execution failed", + status=ToolResultStatus.ERROR, + ), ) - assert event.tool_error == "Tool execution failed" + assert event.tool_result.output == "Tool execution failed" + assert event.tool_result.status == ToolResultStatus.ERROR def test_serialization(self): """Test that event can be serialized to dict.""" event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="Weather: Sunny", - tool_call_id="call_123", - tool_name="weather", - tool_files=["file_1"], - tool_error=None, + tool_result=ToolResult( + id="call_123", + name="weather", + output="Weather: Sunny", + files=["file_1"], + status=ToolResultStatus.SUCCESS, + ), is_final=True, ) data = event.model_dump() assert data["chunk_type"] == "tool_result" - assert data["tool_call_id"] == "call_123" - assert data["tool_name"] == "weather" - assert data["tool_files"] == ["file_1"] - assert data["tool_error"] is None + assert data["tool_result"]["id"] == "call_123" + assert data["tool_result"]["name"] == "weather" + assert data["tool_result"]["files"] == ["file_1"] assert data["is_final"] is True @@ -272,8 +280,7 @@ class TestEventInheritance: event = ToolCallChunkEvent( selector=["node1", "tool_calls"], chunk="", - tool_call_id="call_123", - tool_name="test", + tool_call=ToolCall(id="call_123", name="test", arguments=None), ) assert isinstance(event, StreamChunkEvent) @@ -283,8 +290,7 @@ class TestEventInheritance: event = ToolResultChunkEvent( selector=["node1", "tool_results"], chunk="result", - tool_call_id="call_123", - tool_name="test", + tool_result=ToolResult(id="call_123", name="test"), ) assert isinstance(event, StreamChunkEvent) @@ -302,8 +308,16 @@ class TestEventInheritance: """Test that all events have common StreamChunkEvent fields.""" events = [ StreamChunkEvent(selector=["n", "t"], chunk="a"), - ToolCallChunkEvent(selector=["n", "t"], chunk="b", tool_call_id="1", tool_name="t"), - ToolResultChunkEvent(selector=["n", "t"], chunk="c", tool_call_id="1", tool_name="t"), + ToolCallChunkEvent( + selector=["n", "t"], + chunk="b", + tool_call=ToolCall(id="1", name="t", arguments=None), + ), + ToolResultChunkEvent( + selector=["n", "t"], + chunk="c", + tool_result=ToolResult(id="1", name="t"), + ), ThoughtChunkEvent(selector=["n", "t"], chunk="d"), ] From 92fa7271edc41057e727dca62646c1a5fb78f270 Mon Sep 17 00:00:00 2001 From: Novice Date: Wed, 17 Dec 2025 15:42:23 +0800 Subject: [PATCH 12/18] refactor(llm node): remove unused args --- api/core/workflow/nodes/llm/entities.py | 248 +++++++-- api/core/workflow/nodes/llm/llm_utils.py | 92 ---- api/core/workflow/nodes/llm/node.py | 616 +++++++++++------------ 3 files changed, 502 insertions(+), 454 deletions(-) diff --git a/api/core/workflow/nodes/llm/entities.py b/api/core/workflow/nodes/llm/entities.py index f57e251cdf..c1938fb5e3 100644 --- a/api/core/workflow/nodes/llm/entities.py +++ b/api/core/workflow/nodes/llm/entities.py @@ -1,14 +1,17 @@ +import re from collections.abc import Mapping, Sequence from typing import Any, Literal -from pydantic import BaseModel, Field, field_validator +from pydantic import BaseModel, ConfigDict, Field, field_validator +from core.agent.entities import AgentLog, AgentResult from core.file import File from core.model_runtime.entities import ImagePromptMessageContent, LLMMode from core.model_runtime.entities.llm_entities import LLMUsage from core.prompt.entities.advanced_prompt_entities import ChatModelMessage, CompletionModelPromptTemplate, MemoryConfig from core.tools.entities.tool_entities import ToolProviderType from core.workflow.entities import ToolCallResult +from core.workflow.node_events import AgentLogEvent from core.workflow.nodes.base import BaseNodeData from core.workflow.nodes.base.entities import VariableSelector @@ -20,44 +23,6 @@ class ModelConfig(BaseModel): completion_params: dict[str, Any] = Field(default_factory=dict) -class LLMTraceSegment(BaseModel): - """ - Streaming trace segment for LLM tool-enabled runs. - - Order is preserved for replay. Tool calls are single entries containing both - arguments and results. - """ - - type: Literal["thought", "content", "tool_call"] - - # Common optional fields - text: str | None = Field(None, description="Text chunk for thought/content") - - # Tool call fields (combined start + result) - tool_call: ToolCallResult | None = Field( - default=None, - description="Combined tool call arguments and result for this segment", - ) - - -class LLMGenerationData(BaseModel): - """Generation data from LLM invocation with tools. - - For multi-turn tool calls like: thought1 -> text1 -> tool_call1 -> thought2 -> text2 -> tool_call2 - - reasoning_contents: [thought1, thought2, ...] - one element per turn - - tool_calls: [{id, name, arguments, result}, ...] - all tool calls with results - """ - - text: str = Field(..., description="Accumulated text content from all turns") - reasoning_contents: list[str] = Field(default_factory=list, description="Reasoning content per turn") - tool_calls: list[ToolCallResult] = Field(default_factory=list, description="Tool calls with results") - sequence: list[dict[str, Any]] = Field(default_factory=list, description="Ordered segments for rendering") - usage: LLMUsage = Field(..., description="LLM usage statistics") - finish_reason: str | None = Field(None, description="Finish reason from LLM") - files: list[File] = Field(default_factory=list, description="Generated files") - trace: list[LLMTraceSegment] = Field(default_factory=list, description="Streaming trace in emitted order") - - class ContextConfig(BaseModel): enabled: bool variable_selector: list[str] | None = None @@ -124,6 +89,211 @@ class ToolMetadata(BaseModel): extra: dict[str, Any] = Field(default_factory=dict, description="Extra tool configuration like custom description") +class LLMTraceSegment(BaseModel): + """ + Streaming trace segment for LLM tool-enabled runs. + + Order is preserved for replay. Tool calls are single entries containing both + arguments and results. + """ + + type: Literal["thought", "content", "tool_call"] + + # Common optional fields + text: str | None = Field(None, description="Text chunk for thought/content") + + # Tool call fields (combined start + result) + tool_call: ToolCallResult | None = Field( + default=None, + description="Combined tool call arguments and result for this segment", + ) + + +class LLMGenerationData(BaseModel): + """Generation data from LLM invocation with tools. + + For multi-turn tool calls like: thought1 -> text1 -> tool_call1 -> thought2 -> text2 -> tool_call2 + - reasoning_contents: [thought1, thought2, ...] - one element per turn + - tool_calls: [{id, name, arguments, result}, ...] - all tool calls with results + """ + + text: str = Field(..., description="Accumulated text content from all turns") + reasoning_contents: list[str] = Field(default_factory=list, description="Reasoning content per turn") + tool_calls: list[ToolCallResult] = Field(default_factory=list, description="Tool calls with results") + sequence: list[dict[str, Any]] = Field(default_factory=list, description="Ordered segments for rendering") + usage: LLMUsage = Field(..., description="LLM usage statistics") + finish_reason: str | None = Field(None, description="Finish reason from LLM") + files: list[File] = Field(default_factory=list, description="Generated files") + trace: list[LLMTraceSegment] = Field(default_factory=list, description="Streaming trace in emitted order") + + +class ThinkTagStreamParser: + """Lightweight state machine to split streaming chunks by tags.""" + + _START_PATTERN = re.compile(r"]*)?>", re.IGNORECASE) + _END_PATTERN = re.compile(r"", re.IGNORECASE) + _START_PREFIX = " int: + """Return length of the longest suffix of `text` that is a prefix of `prefix`.""" + max_len = min(len(text), len(prefix) - 1) + for i in range(max_len, 0, -1): + if text[-i:].lower() == prefix[:i].lower(): + return i + return 0 + + def process(self, chunk: str) -> list[tuple[str, str]]: + """ + Split incoming chunk into ('thought' | 'text', content) tuples. + Content excludes the tags themselves and handles split tags across chunks. + """ + parts: list[tuple[str, str]] = [] + self._buffer += chunk + + while self._buffer: + if self._in_think: + end_match = self._END_PATTERN.search(self._buffer) + if end_match: + thought_text = self._buffer[: end_match.start()] + if thought_text: + parts.append(("thought", thought_text)) + self._buffer = self._buffer[end_match.end() :] + self._in_think = False + continue + + hold_len = self._suffix_prefix_len(self._buffer, self._END_PREFIX) + emit = self._buffer[: len(self._buffer) - hold_len] + if emit: + parts.append(("thought", emit)) + self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" + break + + start_match = self._START_PATTERN.search(self._buffer) + if start_match: + prefix = self._buffer[: start_match.start()] + if prefix: + parts.append(("text", prefix)) + self._buffer = self._buffer[start_match.end() :] + self._in_think = True + continue + + hold_len = self._suffix_prefix_len(self._buffer, self._START_PREFIX) + emit = self._buffer[: len(self._buffer) - hold_len] + if emit: + parts.append(("text", emit)) + self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" + break + + cleaned_parts: list[tuple[str, str]] = [] + for kind, content in parts: + # Extra safeguard: strip any stray tags that slipped through. + content = self._START_PATTERN.sub("", content) + content = self._END_PATTERN.sub("", content) + if content: + cleaned_parts.append((kind, content)) + + return cleaned_parts + + def flush(self) -> list[tuple[str, str]]: + """Flush remaining buffer when the stream ends.""" + if not self._buffer: + return [] + kind = "thought" if self._in_think else "text" + content = self._buffer + # Drop dangling partial tags instead of emitting them + if content.lower().startswith(self._START_PREFIX) or content.lower().startswith(self._END_PREFIX): + content = "" + self._buffer = "" + if not content: + return [] + # Strip any complete tags that might still be present. + content = self._START_PATTERN.sub("", content) + content = self._END_PATTERN.sub("", content) + return [(kind, content)] if content else [] + + +class StreamBuffers(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + think_parser: ThinkTagStreamParser = Field(default_factory=ThinkTagStreamParser) + pending_thought: list[str] = Field(default_factory=list) + pending_content: list[str] = Field(default_factory=list) + current_turn_reasoning: list[str] = Field(default_factory=list) + reasoning_per_turn: list[str] = Field(default_factory=list) + + +class TraceState(BaseModel): + trace_segments: list[LLMTraceSegment] = Field(default_factory=list) + tool_trace_map: dict[str, LLMTraceSegment] = Field(default_factory=dict) + tool_call_index_map: dict[str, int] = Field(default_factory=dict) + + +class AggregatedResult(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + text: str = "" + files: list[File] = Field(default_factory=list) + usage: LLMUsage = Field(default_factory=LLMUsage.empty_usage) + finish_reason: str | None = None + + +class AgentContext(BaseModel): + agent_logs: list[AgentLogEvent] = Field(default_factory=list) + agent_result: AgentResult | None = None + + +class ToolOutputState(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + stream: StreamBuffers = Field(default_factory=StreamBuffers) + trace: TraceState = Field(default_factory=TraceState) + aggregate: AggregatedResult = Field(default_factory=AggregatedResult) + agent: AgentContext = Field(default_factory=AgentContext) + + +class ToolLogPayload(BaseModel): + model_config = ConfigDict(arbitrary_types_allowed=True) + + tool_name: str = "" + tool_call_id: str = "" + tool_args: dict[str, Any] = Field(default_factory=dict) + tool_output: Any = None + tool_error: Any = None + files: list[Any] = Field(default_factory=list) + meta: dict[str, Any] = Field(default_factory=dict) + + @classmethod + def from_log(cls, log: AgentLog) -> "ToolLogPayload": + data = log.data or {} + return cls( + tool_name=data.get("tool_name", ""), + tool_call_id=data.get("tool_call_id", ""), + tool_args=data.get("tool_args") or {}, + tool_output=data.get("output"), + tool_error=data.get("error"), + files=data.get("files") or [], + meta=data.get("meta") or {}, + ) + + @classmethod + def from_mapping(cls, data: Mapping[str, Any]) -> "ToolLogPayload": + return cls( + tool_name=data.get("tool_name", ""), + tool_call_id=data.get("tool_call_id", ""), + tool_args=data.get("tool_args") or {}, + tool_output=data.get("output"), + tool_error=data.get("error"), + files=data.get("files") or [], + meta=data.get("meta") or {}, + ) + + class LLMNodeData(BaseNodeData): model: ModelConfig prompt_template: Sequence[LLMNodeChatModelMessage] | LLMNodeCompletionModelPromptTemplate diff --git a/api/core/workflow/nodes/llm/llm_utils.py b/api/core/workflow/nodes/llm/llm_utils.py index e9c363851f..0c545469bc 100644 --- a/api/core/workflow/nodes/llm/llm_utils.py +++ b/api/core/workflow/nodes/llm/llm_utils.py @@ -1,4 +1,3 @@ -import re from collections.abc import Sequence from typing import cast @@ -155,94 +154,3 @@ def deduct_llm_quota(tenant_id: str, model_instance: ModelInstance, usage: LLMUs ) session.execute(stmt) session.commit() - - -class ThinkTagStreamParser: - """Lightweight state machine to split streaming chunks by tags.""" - - _START_PATTERN = re.compile(r"]*)?>", re.IGNORECASE) - _END_PATTERN = re.compile(r"", re.IGNORECASE) - _START_PREFIX = " int: - """Return length of the longest suffix of `text` that is a prefix of `prefix`.""" - max_len = min(len(text), len(prefix) - 1) - for i in range(max_len, 0, -1): - if text[-i:].lower() == prefix[:i].lower(): - return i - return 0 - - def process(self, chunk: str) -> list[tuple[str, str]]: - """ - Split incoming chunk into ('thought' | 'text', content) tuples. - Content excludes the tags themselves and handles split tags across chunks. - """ - parts: list[tuple[str, str]] = [] - self._buffer += chunk - - while self._buffer: - if self._in_think: - end_match = self._END_PATTERN.search(self._buffer) - if end_match: - thought_text = self._buffer[: end_match.start()] - if thought_text: - parts.append(("thought", thought_text)) - self._buffer = self._buffer[end_match.end() :] - self._in_think = False - continue - - hold_len = self._suffix_prefix_len(self._buffer, self._END_PREFIX) - emit = self._buffer[: len(self._buffer) - hold_len] - if emit: - parts.append(("thought", emit)) - self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" - break - - start_match = self._START_PATTERN.search(self._buffer) - if start_match: - prefix = self._buffer[: start_match.start()] - if prefix: - parts.append(("text", prefix)) - self._buffer = self._buffer[start_match.end() :] - self._in_think = True - continue - - hold_len = self._suffix_prefix_len(self._buffer, self._START_PREFIX) - emit = self._buffer[: len(self._buffer) - hold_len] - if emit: - parts.append(("text", emit)) - self._buffer = self._buffer[-hold_len:] if hold_len > 0 else "" - break - - cleaned_parts: list[tuple[str, str]] = [] - for kind, content in parts: - # Extra safeguard: strip any stray tags that slipped through. - content = self._START_PATTERN.sub("", content) - content = self._END_PATTERN.sub("", content) - if content: - cleaned_parts.append((kind, content)) - - return cleaned_parts - - def flush(self) -> list[tuple[str, str]]: - """Flush remaining buffer when the stream ends.""" - if not self._buffer: - return [] - kind = "thought" if self._in_think else "text" - content = self._buffer - # Drop dangling partial tags instead of emitting them - if content.lower().startswith(self._START_PREFIX) or content.lower().startswith(self._END_PREFIX): - content = "" - self._buffer = "" - if not content: - return [] - # Strip any complete tags that might still be present. - content = self._START_PATTERN.sub("", content) - content = self._END_PATTERN.sub("", content) - return [(kind, content)] if content else [] diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 94b616bd34..408363d226 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -90,12 +90,19 @@ from models.model import UploadFile from . import llm_utils from .entities import ( + AgentContext, + AggregatedResult, LLMGenerationData, LLMNodeChatModelMessage, LLMNodeCompletionModelPromptTemplate, LLMNodeData, LLMTraceSegment, ModelConfig, + StreamBuffers, + ThinkTagStreamParser, + ToolLogPayload, + ToolOutputState, + TraceState, ) from .exc import ( InvalidContextStructureError, @@ -582,7 +589,7 @@ class LLMNode(Node[LLMNodeData]): usage = LLMUsage.empty_usage() finish_reason = None full_text_buffer = io.StringIO() - think_parser = llm_utils.ThinkTagStreamParser() + think_parser = ThinkTagStreamParser() reasoning_chunks: list[str] = [] # Initialize streaming metrics tracking @@ -1495,7 +1502,7 @@ class LLMNode(Node[LLMNodeData]): ) # Process outputs and return generation result - result = yield from self._process_tool_outputs(outputs, strategy, node_inputs, process_data) + result = yield from self._process_tool_outputs(outputs) return result def _get_model_features(self, model_instance: ModelInstance) -> list[ModelFeature]: @@ -1587,278 +1594,213 @@ class LLMNode(Node[LLMNodeData]): return files - def _process_tool_outputs( - self, - outputs: Generator[LLMResultChunk | AgentLog, None, AgentResult], - strategy: Any, - node_inputs: dict[str, Any], - process_data: dict[str, Any], - ) -> Generator[NodeEventBase, None, LLMGenerationData]: - """Process strategy outputs and convert to node events. + def _flush_thought_segment(self, buffers: StreamBuffers, trace_state: TraceState) -> None: + if not buffers.pending_thought: + return + trace_state.trace_segments.append(LLMTraceSegment(type="thought", text="".join(buffers.pending_thought))) + buffers.pending_thought.clear() - Returns LLMGenerationData with text, reasoning_contents, tool_calls, usage, finish_reason, files - """ - text = "" - files: list[File] = [] - usage = LLMUsage.empty_usage() - agent_logs: list[AgentLogEvent] = [] - finish_reason = None - agent_result: AgentResult | None = None + def _flush_content_segment(self, buffers: StreamBuffers, trace_state: TraceState) -> None: + if not buffers.pending_content: + return + trace_state.trace_segments.append(LLMTraceSegment(type="content", text="".join(buffers.pending_content))) + buffers.pending_content.clear() - think_parser = llm_utils.ThinkTagStreamParser() - # Track reasoning per turn: each tool_call completion marks end of a turn - current_turn_reasoning: list[str] = [] # Buffer for current turn's thought chunks - reasoning_per_turn: list[str] = [] # Final list: one element per turn - tool_call_index_map: dict[str, int] = {} # tool_call_id -> index - trace_segments: list[LLMTraceSegment] = [] # Ordered trace for replay - tool_trace_map: dict[str, LLMTraceSegment] = {} - current_turn = 0 - pending_thought: list[str] = [] - pending_content: list[str] = [] + def _handle_agent_log_output( + self, output: AgentLog, buffers: StreamBuffers, trace_state: TraceState, agent_context: AgentContext + ) -> Generator[NodeEventBase, None, None]: + payload = ToolLogPayload.from_log(output) + agent_log_event = AgentLogEvent( + message_id=output.id, + label=output.label, + node_execution_id=self.id, + parent_id=output.parent_id, + error=output.error, + status=output.status.value, + data=output.data, + metadata={k.value: v for k, v in output.metadata.items()}, + node_id=self._node_id, + ) + for log in agent_context.agent_logs: + if log.message_id == agent_log_event.message_id: + log.data = agent_log_event.data + log.status = agent_log_event.status + log.error = agent_log_event.error + log.label = agent_log_event.label + log.metadata = agent_log_event.metadata + break + else: + agent_context.agent_logs.append(agent_log_event) - def _flush_thought() -> None: - if not pending_thought: - return - trace_segments.append(LLMTraceSegment(type="thought", text="".join(pending_thought))) - pending_thought.clear() + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.START: + tool_name = payload.tool_name + tool_call_id = payload.tool_call_id + tool_arguments = json.dumps(payload.tool_args) if payload.tool_args else "" - def _flush_content() -> None: - if not pending_content: - return - trace_segments.append(LLMTraceSegment(type="content", text="".join(pending_content))) - pending_content.clear() + if tool_call_id and tool_call_id not in trace_state.tool_call_index_map: + trace_state.tool_call_index_map[tool_call_id] = len(trace_state.tool_call_index_map) - # Process each output from strategy - try: - for output in outputs: - if isinstance(output, AgentLog): - # Store agent log event for metadata (no longer yielded, StreamChunkEvent contains the info) - agent_log_event = AgentLogEvent( - message_id=output.id, - label=output.label, - node_execution_id=self.id, - parent_id=output.parent_id, - error=output.error, - status=output.status.value, - data=output.data, - metadata={k.value: v for k, v in output.metadata.items()}, - node_id=self._node_id, + self._flush_thought_segment(buffers, trace_state) + self._flush_content_segment(buffers, trace_state) + + tool_call_segment = LLMTraceSegment( + type="tool_call", + text=None, + tool_call=ToolCallResult( + id=tool_call_id, + name=tool_name, + arguments=tool_arguments, + ), + ) + trace_state.trace_segments.append(tool_call_segment) + if tool_call_id: + trace_state.tool_trace_map[tool_call_id] = tool_call_segment + + yield ToolCallChunkEvent( + selector=[self._node_id, "generation", "tool_calls"], + chunk=tool_arguments, + tool_call=ToolCall( + id=tool_call_id, + name=tool_name, + arguments=tool_arguments, + ), + is_final=False, + ) + + if output.log_type == AgentLog.LogType.TOOL_CALL and output.status != AgentLog.LogStatus.START: + tool_name = payload.tool_name + tool_output = payload.tool_output + tool_call_id = payload.tool_call_id + tool_files = payload.files if isinstance(payload.files, list) else [] + tool_error = payload.tool_error + + if tool_call_id and tool_call_id not in trace_state.tool_call_index_map: + trace_state.tool_call_index_map[tool_call_id] = len(trace_state.tool_call_index_map) + + self._flush_thought_segment(buffers, trace_state) + self._flush_content_segment(buffers, trace_state) + + if output.status == AgentLog.LogStatus.ERROR: + tool_error = output.error or payload.tool_error + if not tool_error and payload.meta: + tool_error = payload.meta.get("error") + else: + if payload.meta: + meta_error = payload.meta.get("error") + if meta_error: + tool_error = meta_error + + existing_tool_segment = trace_state.tool_trace_map.get(tool_call_id) + tool_call_segment = existing_tool_segment or LLMTraceSegment( + type="tool_call", + text=None, + tool_call=ToolCallResult( + id=tool_call_id, + name=tool_name, + arguments=None, + ), + ) + if existing_tool_segment is None: + trace_state.trace_segments.append(tool_call_segment) + if tool_call_id: + trace_state.tool_trace_map[tool_call_id] = tool_call_segment + + if tool_call_segment.tool_call is None: + tool_call_segment.tool_call = ToolCallResult( + id=tool_call_id, + name=tool_name, + arguments=None, + ) + tool_call_segment.tool_call.output = ( + str(tool_output) if tool_output is not None else str(tool_error) if tool_error is not None else None + ) + tool_call_segment.tool_call.files = [] + tool_call_segment.tool_call.status = ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS + + result_output = str(tool_output) if tool_output is not None else str(tool_error) if tool_error else None + + yield ToolResultChunkEvent( + selector=[self._node_id, "generation", "tool_results"], + chunk=result_output or "", + tool_result=ToolResult( + id=tool_call_id, + name=tool_name, + output=result_output, + files=tool_files, + status=ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS, + ), + is_final=False, + ) + + if buffers.current_turn_reasoning: + buffers.reasoning_per_turn.append("".join(buffers.current_turn_reasoning)) + buffers.current_turn_reasoning.clear() + + def _handle_llm_chunk_output( + self, output: LLMResultChunk, buffers: StreamBuffers, trace_state: TraceState, aggregate: AggregatedResult + ) -> Generator[NodeEventBase, None, None]: + message = output.delta.message + + if message and message.content: + chunk_text = message.content + if isinstance(chunk_text, list): + chunk_text = "".join(getattr(content, "data", str(content)) for content in chunk_text) + else: + chunk_text = str(chunk_text) + + for kind, segment in buffers.think_parser.process(chunk_text): + if not segment: + continue + + if kind == "thought": + self._flush_content_segment(buffers, trace_state) + buffers.current_turn_reasoning.append(segment) + buffers.pending_thought.append(segment) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk=segment, + is_final=False, + ) + else: + self._flush_thought_segment(buffers, trace_state) + aggregate.text += segment + buffers.pending_content.append(segment) + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk=segment, + is_final=False, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk=segment, + is_final=False, ) - for log in agent_logs: - if log.message_id == agent_log_event.message_id: - # update the log - log.data = agent_log_event.data - log.status = agent_log_event.status - log.error = agent_log_event.error - log.label = agent_log_event.label - log.metadata = agent_log_event.metadata - break - else: - agent_logs.append(agent_log_event) - # Emit tool call events when tool call starts - if output.log_type == AgentLog.LogType.TOOL_CALL and output.status == AgentLog.LogStatus.START: - tool_name = output.data.get("tool_name", "") - tool_call_id = output.data.get("tool_call_id", "") - tool_args = output.data.get("tool_args", {}) - tool_arguments = json.dumps(tool_args) if tool_args else "" + if output.delta.usage: + self._accumulate_usage(aggregate.usage, output.delta.usage) - if tool_call_id and tool_call_id not in tool_call_index_map: - tool_call_index_map[tool_call_id] = len(tool_call_index_map) + if output.delta.finish_reason: + aggregate.finish_reason = output.delta.finish_reason - _flush_thought() - _flush_content() - - tool_call_segment = LLMTraceSegment( - type="tool_call", - text=None, - tool_call=ToolCallResult( - id=tool_call_id, - name=tool_name, - arguments=tool_arguments, - ), - ) - trace_segments.append(tool_call_segment) - if tool_call_id: - tool_trace_map[tool_call_id] = tool_call_segment - - yield ToolCallChunkEvent( - selector=[self._node_id, "generation", "tool_calls"], - chunk=tool_arguments, - tool_call=ToolCall( - id=tool_call_id, - name=tool_name, - arguments=tool_arguments, - ), - is_final=False, - ) - - # Emit tool result events when tool call completes (both success and error) - if output.log_type == AgentLog.LogType.TOOL_CALL and output.status != AgentLog.LogStatus.START: - tool_name = output.data.get("tool_name", "") - tool_output = output.data.get("output", "") - tool_call_id = output.data.get("tool_call_id", "") - tool_files = [] - tool_error = None - - if tool_call_id and tool_call_id not in tool_call_index_map: - tool_call_index_map[tool_call_id] = len(tool_call_index_map) - - _flush_thought() - _flush_content() - - # Extract file IDs if present (only for success case) - files_data = output.data.get("files") - if files_data and isinstance(files_data, list): - tool_files = files_data - - # Check for error from multiple sources - if output.status == AgentLog.LogStatus.ERROR: - # Priority: output.error > data.error > meta.error - tool_error = output.error or output.data.get("error") - meta = output.data.get("meta") - if not tool_error and meta and isinstance(meta, dict): - tool_error = meta.get("error") - else: - # For success case, check meta for potential errors - meta = output.data.get("meta") - if meta and isinstance(meta, dict) and meta.get("error"): - tool_error = meta.get("error") - - existing_tool_segment = tool_trace_map.get(tool_call_id) - tool_call_segment = existing_tool_segment or LLMTraceSegment( - type="tool_call", - text=None, - tool_call=ToolCallResult( - id=tool_call_id, - name=tool_name, - arguments=None, - ), - ) - if existing_tool_segment is None: - trace_segments.append(tool_call_segment) - if tool_call_id: - tool_trace_map[tool_call_id] = tool_call_segment - - if tool_call_segment.tool_call is None: - tool_call_segment.tool_call = ToolCallResult( - id=tool_call_id, - name=tool_name, - arguments=None, - ) - tool_call_segment.tool_call.output = ( - str(tool_output) - if tool_output is not None - else str(tool_error) - if tool_error is not None - else None - ) - tool_call_segment.tool_call.files = [] - tool_call_segment.tool_call.status = ( - ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS - ) - current_turn += 1 - - result_output = ( - str(tool_output) if tool_output is not None else str(tool_error) if tool_error else None - ) - - yield ToolResultChunkEvent( - selector=[self._node_id, "generation", "tool_results"], - chunk=result_output or "", - tool_result=ToolResult( - id=tool_call_id, - name=tool_name, - output=result_output, - files=tool_files, - status=ToolResultStatus.ERROR if tool_error else ToolResultStatus.SUCCESS, - ), - is_final=False, - ) - - # End of current turn: save accumulated thought as one element - if current_turn_reasoning: - reasoning_per_turn.append("".join(current_turn_reasoning)) - current_turn_reasoning.clear() - - elif isinstance(output, LLMResultChunk): - # Handle LLM result chunks - only process text content - message = output.delta.message - - # Handle text content - if message and message.content: - chunk_text = message.content - if isinstance(chunk_text, list): - # Extract text from content list - chunk_text = "".join(getattr(c, "data", str(c)) for c in chunk_text) - else: - chunk_text = str(chunk_text) - for kind, segment in think_parser.process(chunk_text): - if not segment: - continue - - if kind == "thought": - _flush_content() - current_turn_reasoning.append(segment) - pending_thought.append(segment) - yield ThoughtChunkEvent( - selector=[self._node_id, "generation", "thought"], - chunk=segment, - is_final=False, - ) - else: - _flush_thought() - text += segment - pending_content.append(segment) - yield StreamChunkEvent( - selector=[self._node_id, "text"], - chunk=segment, - is_final=False, - ) - yield StreamChunkEvent( - selector=[self._node_id, "generation", "content"], - chunk=segment, - is_final=False, - ) - - if output.delta.usage: - self._accumulate_usage(usage, output.delta.usage) - - # Capture finish reason - if output.delta.finish_reason: - finish_reason = output.delta.finish_reason - - except StopIteration as e: - # Get the return value from generator - if isinstance(getattr(e, "value", None), AgentResult): - agent_result = e.value - - # Use result from generator if available - if agent_result: - text = agent_result.text or text - files = agent_result.files - if agent_result.usage: - usage = agent_result.usage - if agent_result.finish_reason: - finish_reason = agent_result.finish_reason - - # Flush any remaining buffered content after streaming ends - for kind, segment in think_parser.flush(): + def _flush_remaining_stream( + self, buffers: StreamBuffers, trace_state: TraceState, aggregate: AggregatedResult + ) -> Generator[NodeEventBase, None, None]: + for kind, segment in buffers.think_parser.flush(): if not segment: continue if kind == "thought": - _flush_content() - current_turn_reasoning.append(segment) - pending_thought.append(segment) + self._flush_content_segment(buffers, trace_state) + buffers.current_turn_reasoning.append(segment) + buffers.pending_thought.append(segment) yield ThoughtChunkEvent( selector=[self._node_id, "generation", "thought"], chunk=segment, is_final=False, ) else: - _flush_thought() - text += segment - pending_content.append(segment) + self._flush_thought_segment(buffers, trace_state) + aggregate.text += segment + buffers.pending_content.append(segment) yield StreamChunkEvent( selector=[self._node_id, "text"], chunk=segment, @@ -1870,19 +1812,63 @@ class LLMNode(Node[LLMNodeData]): is_final=False, ) - # Save the last turn's thought if any - if current_turn_reasoning: - reasoning_per_turn.append("".join(current_turn_reasoning)) + if buffers.current_turn_reasoning: + buffers.reasoning_per_turn.append("".join(buffers.current_turn_reasoning)) - _flush_thought() - _flush_content() + self._flush_thought_segment(buffers, trace_state) + self._flush_content_segment(buffers, trace_state) - # Build sequence from trace_segments for rendering + def _close_streams(self) -> Generator[NodeEventBase, None, None]: + yield StreamChunkEvent( + selector=[self._node_id, "text"], + chunk="", + is_final=True, + ) + yield StreamChunkEvent( + selector=[self._node_id, "generation", "content"], + chunk="", + is_final=True, + ) + yield ThoughtChunkEvent( + selector=[self._node_id, "generation", "thought"], + chunk="", + is_final=True, + ) + yield ToolCallChunkEvent( + selector=[self._node_id, "generation", "tool_calls"], + chunk="", + tool_call=ToolCall( + id="", + name="", + arguments="", + ), + is_final=True, + ) + yield ToolResultChunkEvent( + selector=[self._node_id, "generation", "tool_results"], + chunk="", + tool_result=ToolResult( + id="", + name="", + output="", + files=[], + status=ToolResultStatus.SUCCESS, + ), + is_final=True, + ) + + def _build_generation_data( + self, + trace_state: TraceState, + agent_context: AgentContext, + aggregate: AggregatedResult, + buffers: StreamBuffers, + ) -> LLMGenerationData: sequence: list[dict[str, Any]] = [] reasoning_index = 0 content_position = 0 tool_call_seen_index: dict[str, int] = {} - for trace_segment in trace_segments: + for trace_segment in trace_state.trace_segments: if trace_segment.type == "thought": sequence.append({"type": "reasoning", "index": reasoning_index}) reasoning_index += 1 @@ -1898,67 +1884,22 @@ class LLMNode(Node[LLMNodeData]): tool_call_seen_index[tool_id] = len(tool_call_seen_index) sequence.append({"type": "tool_call", "index": tool_call_seen_index[tool_id]}) - # Send final events for all streams - yield StreamChunkEvent( - selector=[self._node_id, "text"], - chunk="", - is_final=True, - ) - - # Close generation sub-field streams - yield StreamChunkEvent( - selector=[self._node_id, "generation", "content"], - chunk="", - is_final=True, - ) - yield ThoughtChunkEvent( - selector=[self._node_id, "generation", "thought"], - chunk="", - is_final=True, - ) - - # Close tool_calls stream (already sent via ToolCallChunkEvent) - yield ToolCallChunkEvent( - selector=[self._node_id, "generation", "tool_calls"], - chunk="", - tool_call=ToolCall( - id="", - name="", - arguments="", - ), - is_final=True, - ) - - # Close tool_results stream (already sent via ToolResultChunkEvent) - yield ToolResultChunkEvent( - selector=[self._node_id, "generation", "tool_results"], - chunk="", - tool_result=ToolResult( - id="", - name="", - output="", - files=[], - status=ToolResultStatus.SUCCESS, - ), - is_final=True, - ) - - # Build tool_calls from agent_logs (with results) tool_calls_for_generation: list[ToolCallResult] = [] - for log in agent_logs: - tool_call_id = log.data.get("tool_call_id") + for log in agent_context.agent_logs: + payload = ToolLogPayload.from_mapping(log.data or {}) + tool_call_id = payload.tool_call_id if not tool_call_id or log.status == AgentLog.LogStatus.START.value: continue - tool_args = log.data.get("tool_args") or {} - log_error = log.data.get("error") - log_output = log.data.get("output") + tool_args = payload.tool_args + log_error = payload.tool_error + log_output = payload.tool_output result_text = log_output or log_error or "" status = ToolResultStatus.ERROR if log_error else ToolResultStatus.SUCCESS tool_calls_for_generation.append( ToolCallResult( id=tool_call_id, - name=log.data.get("tool_name", ""), + name=payload.tool_name, arguments=json.dumps(tool_args) if tool_args else "", output=result_text, status=status, @@ -1966,21 +1907,50 @@ class LLMNode(Node[LLMNodeData]): ) tool_calls_for_generation.sort( - key=lambda item: tool_call_index_map.get(item.id or "", len(tool_call_index_map)) + key=lambda item: trace_state.tool_call_index_map.get(item.id or "", len(trace_state.tool_call_index_map)) ) - # Return generation data for caller return LLMGenerationData( - text=text, - reasoning_contents=reasoning_per_turn, + text=aggregate.text, + reasoning_contents=buffers.reasoning_per_turn, tool_calls=tool_calls_for_generation, sequence=sequence, - usage=usage, - finish_reason=finish_reason, - files=files, - trace=trace_segments, + usage=aggregate.usage, + finish_reason=aggregate.finish_reason, + files=aggregate.files, + trace=trace_state.trace_segments, ) + def _process_tool_outputs( + self, + outputs: Generator[LLMResultChunk | AgentLog, None, AgentResult], + ) -> Generator[NodeEventBase, None, LLMGenerationData]: + """Process strategy outputs and convert to node events.""" + state = ToolOutputState() + + try: + for output in outputs: + if isinstance(output, AgentLog): + yield from self._handle_agent_log_output(output, state.stream, state.trace, state.agent) + else: + yield from self._handle_llm_chunk_output(output, state.stream, state.trace, state.aggregate) + except StopIteration as exception: + if isinstance(getattr(exception, "value", None), AgentResult): + state.agent.agent_result = exception.value + + if state.agent.agent_result: + state.aggregate.text = state.agent.agent_result.text or state.aggregate.text + state.aggregate.files = state.agent.agent_result.files + if state.agent.agent_result.usage: + state.aggregate.usage = state.agent.agent_result.usage + if state.agent.agent_result.finish_reason: + state.aggregate.finish_reason = state.agent.agent_result.finish_reason + + yield from self._flush_remaining_stream(state.stream, state.trace, state.aggregate) + yield from self._close_streams() + + return self._build_generation_data(state.trace, state.agent, state.aggregate, state.stream) + def _accumulate_usage(self, total_usage: LLMUsage, delta_usage: LLMUsage) -> None: """Accumulate LLM usage statistics.""" total_usage.prompt_tokens += delta_usage.prompt_tokens From cb99b8f04d27758e762cedad5724b75b28c69534 Mon Sep 17 00:00:00 2001 From: Novice Date: Wed, 17 Dec 2025 15:59:09 +0800 Subject: [PATCH 13/18] chore: handle migrations --- api/core/workflow/nodes/base/node.py | 4 ++-- ...2_17_1617-85c8b4a64f53_add_llm_generation_detail_table.py} | 2 +- .../core/workflow/graph_engine/test_response_coordinator.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) rename api/migrations/versions/{2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py => 2025_12_17_1617-85c8b4a64f53_add_llm_generation_detail_table.py} (98%) diff --git a/api/core/workflow/nodes/base/node.py b/api/core/workflow/nodes/base/node.py index 40feda8b57..302d77d625 100644 --- a/api/core/workflow/nodes/base/node.py +++ b/api/core/workflow/nodes/base/node.py @@ -577,8 +577,8 @@ class Node(Generic[NodeDataT]): @_dispatch.register def _(self, event: ToolResultChunkEvent) -> NodeRunStreamChunkEvent: - from core.workflow.entities import ToolResult - from core.workflow.graph_events import ChunkType, ToolResultStatus + from core.workflow.entities import ToolResult, ToolResultStatus + from core.workflow.graph_events import ChunkType tool_result = event.tool_result status: ToolResultStatus = ( diff --git a/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py b/api/migrations/versions/2025_12_17_1617-85c8b4a64f53_add_llm_generation_detail_table.py similarity index 98% rename from api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py rename to api/migrations/versions/2025_12_17_1617-85c8b4a64f53_add_llm_generation_detail_table.py index 700f9ea80b..60786a720c 100644 --- a/api/migrations/versions/2025_12_10_1617-85c8b4a64f53_add_llm_generation_detail_table.py +++ b/api/migrations/versions/2025_12_17_1617-85c8b4a64f53_add_llm_generation_detail_table.py @@ -12,7 +12,7 @@ from sqlalchemy.dialects import postgresql # revision identifiers, used by Alembic. revision = '85c8b4a64f53' -down_revision = 'd57accd375ae' +down_revision = '03ea244985ce' branch_labels = None depends_on = None diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py index 8e0eba71cc..c6b3797ce2 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -2,9 +2,9 @@ from unittest.mock import MagicMock -from core.workflow.entities import ToolResultStatus +from core.workflow.entities.tool_entities import ToolResultStatus from core.workflow.enums import NodeType -from core.workflow.graph import Graph +from core.workflow.graph.graph import Graph from core.workflow.graph_engine.response_coordinator.coordinator import ResponseStreamCoordinator from core.workflow.graph_events import ( ChunkType, From f54b9b12b0646391c12d2222f74e21c77a7cbf81 Mon Sep 17 00:00:00 2001 From: Novice Date: Wed, 17 Dec 2025 17:34:02 +0800 Subject: [PATCH 14/18] feat: add process data --- api/core/workflow/nodes/llm/node.py | 233 +++++++++++++++++----------- 1 file changed, 145 insertions(+), 88 deletions(-) diff --git a/api/core/workflow/nodes/llm/node.py b/api/core/workflow/nodes/llm/node.py index 408363d226..6be59b6ead 100644 --- a/api/core/workflow/nodes/llm/node.py +++ b/api/core/workflow/nodes/llm/node.py @@ -164,81 +164,6 @@ class LLMNode(Node[LLMNodeData]): def version(cls) -> str: return "1" - def _stream_llm_events( - self, - generator: Generator[NodeEventBase | LLMStructuredOutput, None, LLMGenerationData | None], - *, - model_instance: ModelInstance, - ) -> Generator[ - NodeEventBase, - None, - tuple[ - str, - str, - LLMUsage, - str | None, - LLMStructuredOutput | None, - LLMGenerationData | None, - ], - ]: - """ - Stream events and capture generator return value in one place. - Uses generator delegation so _run stays concise while still emitting events. - """ - clean_text = "" - reasoning_content = "" - usage = LLMUsage.empty_usage() - finish_reason: str | None = None - structured_output: LLMStructuredOutput | None = None - generation_data: LLMGenerationData | None = None - completed = False - - while True: - try: - event = next(generator) - except StopIteration as exc: - if isinstance(exc.value, LLMGenerationData): - generation_data = exc.value - break - - if completed: - # After completion we still drain to reach StopIteration.value - continue - - match event: - case StreamChunkEvent() | ThoughtChunkEvent(): - yield event - - case ModelInvokeCompletedEvent( - text=text, - usage=usage_event, - finish_reason=finish_reason_event, - reasoning_content=reasoning_event, - structured_output=structured_raw, - ): - clean_text = text - usage = usage_event - finish_reason = finish_reason_event - reasoning_content = reasoning_event or "" - - if self.node_data.reasoning_format != "tagged": - clean_text, _ = LLMNode._split_reasoning(clean_text, self.node_data.reasoning_format) - - structured_output = ( - LLMStructuredOutput(structured_output=structured_raw) if structured_raw else None - ) - - llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) - completed = True - - case LLMStructuredOutput(): - structured_output = event - - case _: - continue - - return clean_text, reasoning_content, usage, finish_reason, structured_output, generation_data - def _run(self) -> Generator: node_inputs: dict[str, Any] = {} process_data: dict[str, Any] = {} @@ -262,15 +187,6 @@ class LLMNode(Node[LLMNodeData]): # merge inputs inputs.update(jinja_inputs) - # Add all inputs to node_inputs for logging - node_inputs.update(inputs) - - # Add tools to inputs if configured - if self.tool_call_enabled: - node_inputs["tools"] = [ - {"provider_id": tool.provider_name, "tool_name": tool.tool_name} for tool in self._node_data.tools - ] - # fetch files files = ( llm_utils.fetch_files( @@ -372,6 +288,8 @@ class LLMNode(Node[LLMNodeData]): ( clean_text, reasoning_content, + generation_reasoning_content, + generation_clean_content, usage, finish_reason, structured_output, @@ -396,6 +314,16 @@ class LLMNode(Node[LLMNodeData]): "model_provider": model_config.provider, "model_name": model_config.model, } + if self.tool_call_enabled and self._node_data.tools: + process_data["tools"] = [ + { + "type": tool.type.value if hasattr(tool.type, "value") else tool.type, + "provider_name": tool.provider_name, + "tool_name": tool.tool_name, + } + for tool in self._node_data.tools + if tool.enabled + ] # Unified outputs building outputs = { @@ -411,17 +339,25 @@ class LLMNode(Node[LLMNodeData]): generation = { "content": generation_data.text, "reasoning_content": generation_data.reasoning_contents, # [thought1, thought2, ...] - "tool_calls": generation_data.tool_calls, + "tool_calls": [self._serialize_tool_call(item) for item in generation_data.tool_calls], "sequence": generation_data.sequence, } files_to_output = generation_data.files else: # Traditional LLM invocation + generation_reasoning = generation_reasoning_content or reasoning_content + generation_content = generation_clean_content or clean_text + sequence: list[dict[str, Any]] = [] + if generation_reasoning: + sequence = [ + {"type": "reasoning", "index": 0}, + {"type": "content", "start": 0, "end": len(generation_content)}, + ] generation = { - "content": clean_text, - "reasoning_content": [reasoning_content] if reasoning_content else [], + "content": generation_content, + "reasoning_content": [generation_reasoning] if generation_reasoning else [], "tool_calls": [], - "sequence": [], + "sequence": sequence, } files_to_output = self._file_outputs @@ -1460,6 +1396,104 @@ class LLMNode(Node[LLMNodeData]): and all(tool.enabled for tool in self.node_data.tools) ) + def _stream_llm_events( + self, + generator: Generator[NodeEventBase | LLMStructuredOutput, None, LLMGenerationData | None], + *, + model_instance: ModelInstance, + ) -> Generator[ + NodeEventBase, + None, + tuple[ + str, + str, + str, + str, + LLMUsage, + str | None, + LLMStructuredOutput | None, + LLMGenerationData | None, + ], + ]: + """ + Stream events and capture generator return value in one place. + Uses generator delegation so _run stays concise while still emitting events. + """ + clean_text = "" + reasoning_content = "" + generation_reasoning_content = "" + generation_clean_content = "" + usage = LLMUsage.empty_usage() + finish_reason: str | None = None + structured_output: LLMStructuredOutput | None = None + generation_data: LLMGenerationData | None = None + completed = False + + while True: + try: + event = next(generator) + except StopIteration as exc: + if isinstance(exc.value, LLMGenerationData): + generation_data = exc.value + break + + if completed: + # After completion we still drain to reach StopIteration.value + continue + + match event: + case StreamChunkEvent() | ThoughtChunkEvent(): + yield event + + case ModelInvokeCompletedEvent( + text=text, + usage=usage_event, + finish_reason=finish_reason_event, + reasoning_content=reasoning_event, + structured_output=structured_raw, + ): + clean_text = text + usage = usage_event + finish_reason = finish_reason_event + reasoning_content = reasoning_event or "" + generation_reasoning_content = reasoning_content + generation_clean_content = clean_text + + if self.node_data.reasoning_format == "tagged": + # Keep tagged text for output; also extract reasoning for generation field + generation_clean_content, generation_reasoning_content = LLMNode._split_reasoning( + clean_text, reasoning_format="separated" + ) + else: + clean_text, generation_reasoning_content = LLMNode._split_reasoning( + clean_text, self.node_data.reasoning_format + ) + generation_clean_content = clean_text + + structured_output = ( + LLMStructuredOutput(structured_output=structured_raw) if structured_raw else None + ) + + llm_utils.deduct_llm_quota(tenant_id=self.tenant_id, model_instance=model_instance, usage=usage) + completed = True + + case LLMStructuredOutput(): + structured_output = event + + case _: + continue + + return ( + clean_text, + reasoning_content, + generation_reasoning_content, + generation_clean_content, + usage, + finish_reason, + structured_output, + generation_data, + ) + def _invoke_llm_with_tools( self, model_instance: ModelInstance, @@ -1594,6 +1628,29 @@ class LLMNode(Node[LLMNodeData]): return files + @staticmethod + def _serialize_tool_call(tool_call: ToolCallResult) -> dict[str, Any]: + """Convert ToolCallResult into JSON-friendly dict.""" + + def _file_to_ref(file: File) -> str | None: + # Align with streamed tool result events which carry file IDs + return file.id or file.related_id + + files = [] + for file in tool_call.files or []: + ref = _file_to_ref(file) + if ref: + files.append(ref) + + return { + "id": tool_call.id, + "name": tool_call.name, + "arguments": tool_call.arguments, + "output": tool_call.output, + "files": files, + "status": tool_call.status.value if hasattr(tool_call.status, "value") else tool_call.status, + } + def _flush_thought_segment(self, buffers: StreamBuffers, trace_state: TraceState) -> None: if not buffers.pending_thought: return From 047ea8c143efaaec61a5bc2b6fc419d85e1a9bbe Mon Sep 17 00:00:00 2001 From: Novice Date: Thu, 18 Dec 2025 10:09:31 +0800 Subject: [PATCH 15/18] chore: improve type checking --- api/core/app/entities/queue_entities.py | 3 +- .../index_tool_callback_handler.py | 3 +- .../workflow/nodes/test_llm_node_streaming.py | 148 ++++++++++++++++++ 3 files changed, 151 insertions(+), 3 deletions(-) create mode 100644 api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py diff --git a/api/core/app/entities/queue_entities.py b/api/core/app/entities/queue_entities.py index e07efbc38c..31b95ad165 100644 --- a/api/core/app/entities/queue_entities.py +++ b/api/core/app/entities/queue_entities.py @@ -7,9 +7,8 @@ from pydantic import BaseModel, ConfigDict, Field from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk from core.rag.entities.citation_metadata import RetrievalSourceMetadata -from core.workflow.entities import AgentNodeStrategyInit +from core.workflow.entities import AgentNodeStrategyInit, ToolCall, ToolResult from core.workflow.enums import WorkflowNodeExecutionMetadataKey -from core.workflow.graph_events import ToolCall, ToolResult from core.workflow.nodes import NodeType diff --git a/api/core/callback_handler/index_tool_callback_handler.py b/api/core/callback_handler/index_tool_callback_handler.py index d0279349ca..5249fea8cd 100644 --- a/api/core/callback_handler/index_tool_callback_handler.py +++ b/api/core/callback_handler/index_tool_callback_handler.py @@ -5,7 +5,6 @@ from sqlalchemy import select from core.app.apps.base_app_queue_manager import AppQueueManager, PublishFrom from core.app.entities.app_invoke_entities import InvokeFrom -from core.app.entities.queue_entities import QueueRetrieverResourcesEvent from core.rag.entities.citation_metadata import RetrievalSourceMetadata from core.rag.index_processor.constant.index_type import IndexStructureType from core.rag.models.document import Document @@ -90,6 +89,8 @@ class DatasetIndexToolCallbackHandler: # TODO(-LAN-): Improve type check def return_retriever_resource_info(self, resource: Sequence[RetrievalSourceMetadata]): """Handle return_retriever_resource_info.""" + from core.app.entities.queue_entities import QueueRetrieverResourcesEvent + self._queue_manager.publish( QueueRetrieverResourcesEvent(retriever_resources=resource), PublishFrom.APPLICATION_MANAGER ) diff --git a/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py b/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py new file mode 100644 index 0000000000..9d793f804f --- /dev/null +++ b/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py @@ -0,0 +1,148 @@ +import types +from collections.abc import Generator +from typing import Any + +import pytest + +from core.model_runtime.entities.llm_entities import LLMUsage +from core.workflow.entities import ToolCallResult +from core.workflow.entities.tool_entities import ToolResultStatus +from core.workflow.node_events import ModelInvokeCompletedEvent, NodeEventBase +from core.workflow.nodes.llm.node import LLMNode + + +class _StubModelInstance: + """Minimal stub to satisfy _stream_llm_events signature.""" + + provider_model_bundle = None + + +def _drain(generator: Generator[NodeEventBase, None, Any]): + events: list = [] + try: + while True: + events.append(next(generator)) + except StopIteration as exc: + return events, exc.value + + +@pytest.fixture(autouse=True) +def patch_deduct_llm_quota(monkeypatch): + # Avoid touching real quota logic during unit tests + monkeypatch.setattr("core.workflow.nodes.llm.node.llm_utils.deduct_llm_quota", lambda **_: None) + + +def _make_llm_node(reasoning_format: str) -> LLMNode: + node = LLMNode.__new__(LLMNode) + object.__setattr__(node, "_node_data", types.SimpleNamespace(reasoning_format=reasoning_format, tools=[])) + object.__setattr__(node, "tenant_id", "tenant") + return node + + +def test_stream_llm_events_extracts_reasoning_for_tagged(): + node = _make_llm_node(reasoning_format="tagged") + tagged_text = "ThoughtAnswer" + usage = LLMUsage.empty_usage() + + def generator(): + yield ModelInvokeCompletedEvent( + text=tagged_text, + usage=usage, + finish_reason="stop", + reasoning_content="", + structured_output=None, + ) + + events, returned = _drain( + node._stream_llm_events(generator(), model_instance=types.SimpleNamespace(provider_model_bundle=None)) + ) + + assert events == [] + clean_text, reasoning_content, gen_reasoning, gen_clean, ret_usage, finish_reason, structured, gen_data = returned + assert clean_text == tagged_text # original preserved for output + assert reasoning_content == "" # tagged mode keeps reasoning separate + assert gen_clean == "Answer" # stripped content for generation + assert gen_reasoning == "Thought" # reasoning extracted from tag + assert ret_usage == usage + assert finish_reason == "stop" + assert structured is None + assert gen_data is None + + # generation building should include reasoning and sequence + generation_content = gen_clean or clean_text + sequence = [ + {"type": "reasoning", "index": 0}, + {"type": "content", "start": 0, "end": len(generation_content)}, + ] + assert sequence == [ + {"type": "reasoning", "index": 0}, + {"type": "content", "start": 0, "end": len("Answer")}, + ] + + +def test_stream_llm_events_no_reasoning_results_in_empty_sequence(): + node = _make_llm_node(reasoning_format="tagged") + plain_text = "Hello world" + usage = LLMUsage.empty_usage() + + def generator(): + yield ModelInvokeCompletedEvent( + text=plain_text, + usage=usage, + finish_reason=None, + reasoning_content="", + structured_output=None, + ) + + events, returned = _drain( + node._stream_llm_events(generator(), model_instance=types.SimpleNamespace(provider_model_bundle=None)) + ) + + assert events == [] + _, _, gen_reasoning, gen_clean, *_ = returned + generation_content = gen_clean or plain_text + assert gen_reasoning == "" + assert generation_content == plain_text + # Empty reasoning should imply empty sequence in generation construction + sequence = [] + assert sequence == [] + + +def test_serialize_tool_call_strips_files_to_ids(): + file_cls = pytest.importorskip("core.file").File + file_type = pytest.importorskip("core.file.enums").FileType + transfer_method = pytest.importorskip("core.file.enums").FileTransferMethod + + file_with_id = file_cls( + id="f1", + tenant_id="t", + type=file_type.IMAGE, + transfer_method=transfer_method.REMOTE_URL, + remote_url="http://example.com/f1", + storage_key="k1", + ) + file_with_related = file_cls( + id=None, + tenant_id="t", + type=file_type.IMAGE, + transfer_method=transfer_method.REMOTE_URL, + related_id="rel2", + remote_url="http://example.com/f2", + storage_key="k2", + ) + tool_call = ToolCallResult( + id="tc", + name="do", + arguments='{"a":1}', + output="ok", + files=[file_with_id, file_with_related], + status=ToolResultStatus.SUCCESS, + ) + + serialized = LLMNode._serialize_tool_call(tool_call) + + assert serialized["files"] == ["f1", "rel2"] + assert serialized["id"] == "tc" + assert serialized["name"] == "do" + assert serialized["arguments"] == '{"a":1}' + assert serialized["output"] == "ok" From 7fc25cafb2d30bd6aedc31702d792a833d41bca7 Mon Sep 17 00:00:00 2001 From: Novice Date: Thu, 25 Dec 2025 10:28:21 +0800 Subject: [PATCH 16/18] feat: basic app add thought field --- api/core/agent/agent_app_runner.py | 38 +++-- api/core/agent/patterns/README.md | 82 +++++------ api/core/app/apps/workflow_app_runner.py | 3 + .../easy_ui_based_generate_task_pipeline.py | 32 ++++- ...hemy_workflow_node_execution_repository.py | 133 ++++++++++-------- .../response_coordinator/coordinator.py | 105 ++++++-------- api/models/model.py | 1 + api/models/workflow.py | 6 +- api/tests/unit_tests/core/agent/__init__.py | 1 + .../test_workflow_app_runner_stream_chunk.py | 48 +++++++ .../graph_engine/test_response_coordinator.py | 43 ++++++ .../workflow/nodes/test_llm_node_streaming.py | 1 + 12 files changed, 314 insertions(+), 179 deletions(-) create mode 100644 api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py diff --git a/api/core/agent/agent_app_runner.py b/api/core/agent/agent_app_runner.py index e15ede15d2..2ee0a23aab 100644 --- a/api/core/agent/agent_app_runner.py +++ b/api/core/agent/agent_app_runner.py @@ -183,7 +183,24 @@ class AgentAppRunner(BaseAgentRunner): elif output.status == AgentLog.LogStatus.SUCCESS: if output.log_type == AgentLog.LogType.THOUGHT: - pass + if current_agent_thought_id is None: + continue + + thought_text = output.data.get("thought") + self.save_agent_thought( + agent_thought_id=current_agent_thought_id, + tool_name=None, + tool_input=None, + thought=thought_text, + observation=None, + tool_invoke_meta=None, + answer=None, + messages_ids=[], + ) + self.queue_manager.publish( + QueueAgentThoughtEvent(agent_thought_id=current_agent_thought_id), + PublishFrom.APPLICATION_MANAGER, + ) elif output.log_type == AgentLog.LogType.TOOL_CALL: if current_agent_thought_id is None: @@ -269,15 +286,20 @@ class AgentAppRunner(BaseAgentRunner): """ Initialize system message """ - if not prompt_messages and prompt_template: - return [ - SystemPromptMessage(content=prompt_template), - ] + if not prompt_template: + return prompt_messages or [] - if prompt_messages and not isinstance(prompt_messages[0], SystemPromptMessage) and prompt_template: - prompt_messages.insert(0, SystemPromptMessage(content=prompt_template)) + prompt_messages = prompt_messages or [] - return prompt_messages or [] + if prompt_messages and isinstance(prompt_messages[0], SystemPromptMessage): + prompt_messages[0] = SystemPromptMessage(content=prompt_template) + return prompt_messages + + if not prompt_messages: + return [SystemPromptMessage(content=prompt_template)] + + prompt_messages.insert(0, SystemPromptMessage(content=prompt_template)) + return prompt_messages def _organize_user_query(self, query: str, prompt_messages: list[PromptMessage]) -> list[PromptMessage]: """ diff --git a/api/core/agent/patterns/README.md b/api/core/agent/patterns/README.md index f6437ba05a..95b1bf87fa 100644 --- a/api/core/agent/patterns/README.md +++ b/api/core/agent/patterns/README.md @@ -1,67 +1,55 @@ # Agent Patterns -A unified agent pattern module that provides common agent execution strategies for both Agent V2 nodes and Agent Applications in Dify. +A unified agent pattern module that powers both Agent V2 workflow nodes and agent applications. Strategies share a common execution contract while adapting to model capabilities and tool availability. ## Overview -This module implements a strategy pattern for agent execution, automatically selecting the appropriate strategy based on model capabilities. It serves as the core engine for agent-based interactions across different components of the Dify platform. +The module applies a strategy pattern around LLM/tool orchestration. `StrategyFactory` auto-selects the best implementation based on model features or an explicit agent strategy, and each strategy streams logs and usage consistently. ## Key Features -### 1. Multiple Agent Strategies - -- **Function Call Strategy**: Leverages native function/tool calling capabilities of advanced LLMs (e.g., GPT-4, Claude) -- **ReAct Strategy**: Implements the ReAct (Reasoning + Acting) approach for models without native function calling support - -### 2. Automatic Strategy Selection - -The `StrategyFactory` intelligently selects the optimal strategy based on model features: - -- Models with `TOOL_CALL`, `MULTI_TOOL_CALL`, or `STREAM_TOOL_CALL` capabilities → Function Call Strategy -- Other models → ReAct Strategy - -### 3. Unified Interface - -- Common base class (`AgentPattern`) ensures consistent behavior across strategies -- Seamless integration with both workflow nodes and standalone agent applications -- Standardized input/output formats for easy consumption - -### 4. Advanced Capabilities - -- **Streaming Support**: Real-time response streaming for better user experience -- **File Handling**: Built-in support for processing and managing files during agent execution -- **Iteration Control**: Configurable maximum iterations with safety limits (capped at 99) -- **Tool Management**: Flexible tool integration supporting various tool types -- **Context Propagation**: Execution context for tracing, auditing, and debugging +- **Dual strategies** + - `FunctionCallStrategy`: uses native LLM function/tool calling when the model exposes `TOOL_CALL`, `MULTI_TOOL_CALL`, or `STREAM_TOOL_CALL`. + - `ReActStrategy`: ReAct (reasoning + acting) flow driven by `CotAgentOutputParser`, used when function calling is unavailable or explicitly requested. +- **Explicit or auto selection** + - `StrategyFactory.create_strategy` prefers an explicit `AgentEntity.Strategy` (FUNCTION_CALLING or CHAIN_OF_THOUGHT). + - Otherwise it falls back to function calling when tool-call features exist, or ReAct when they do not. +- **Unified execution contract** + - `AgentPattern.run` yields streaming `AgentLog` entries and `LLMResultChunk` data, returning an `AgentResult` with text, files, usage, and `finish_reason`. + - Iterations are configurable and hard-capped at 99 rounds; the last round forces a final answer by withholding tools. +- **Tool handling and hooks** + - Tools convert to `PromptMessageTool` objects before invocation. + - Optional `tool_invoke_hook` lets callers override tool execution (e.g., agent apps) while workflow runs use `ToolEngine.generic_invoke`. + - Tool outputs support text, links, JSON, variables, blobs, retriever resources, and file attachments; `target=="self"` files are reloaded into model context, others are returned as outputs. +- **File-aware arguments** + - Tool args accept `[File: ]` or `[Files: ]` placeholders that resolve to `File` objects before invocation, enabling models to reference uploaded files safely. +- **ReAct prompt shaping** + - System prompts replace `{{instruction}}`, `{{tools}}`, and `{{tool_names}}` placeholders. + - Adds `Observation` to stop sequences and appends scratchpad text so the model sees prior Thought/Action/Observation history. +- **Observability and accounting** + - Standardized `AgentLog` entries for rounds, model thoughts, and tool calls, including usage aggregation (`LLMUsage`) across streaming and non-streaming paths. ## Architecture ``` agent/patterns/ -├── base.py # Abstract base class defining the agent pattern interface -├── function_call.py # Implementation using native LLM function calling -├── react.py # Implementation using ReAct prompting approach -└── strategy_factory.py # Factory for automatic strategy selection +├── base.py # Shared utilities: logging, usage, tool invocation, file handling +├── function_call.py # Native function-calling loop with tool execution +├── react.py # ReAct loop with CoT parsing and scratchpad wiring +└── strategy_factory.py # Strategy selection by model features or explicit override ``` ## Usage -The module is designed to be used by: - -1. **Agent V2 Nodes**: In workflow orchestration for complex agent tasks -1. **Agent Applications**: For standalone conversational agents -1. **Custom Implementations**: As a foundation for building specialized agent behaviors +- For auto-selection: + - Call `StrategyFactory.create_strategy(model_features, model_instance, context, tools, files, ...)` and run the returned strategy with prompt messages and model params. +- For explicit behavior: + - Pass `agent_strategy=AgentEntity.Strategy.FUNCTION_CALLING` to force native calls (falls back to ReAct if unsupported), or `CHAIN_OF_THOUGHT` to force ReAct. +- Both strategies stream chunks and logs; collect the generator output until it returns an `AgentResult`. ## Integration Points -- **Model Runtime**: Interfaces with Dify's model runtime for LLM interactions -- **Tool System**: Integrates with the tool framework for external capabilities -- **Memory Management**: Compatible with conversation memory systems -- **File Management**: Handles file inputs/outputs during agent execution - -## Benefits - -1. **Consistency**: Unified implementation reduces code duplication and maintenance overhead -1. **Flexibility**: Easy to extend with new strategies or customize existing ones -1. **Performance**: Optimized for each model's capabilities to ensure best performance -1. **Reliability**: Built-in safety mechanisms and error handling +- **Model runtime**: delegates to `ModelInstance.invoke_llm` for both streaming and non-streaming calls. +- **Tool system**: defaults to `ToolEngine.generic_invoke`, with `tool_invoke_hook` for custom callers. +- **Files**: flows through `File` objects for tool inputs/outputs and model-context attachments. +- **Execution context**: `ExecutionContext` fields (user/app/conversation/message) propagate to tool invocations and logging. diff --git a/api/core/app/apps/workflow_app_runner.py b/api/core/app/apps/workflow_app_runner.py index 6ce33c98ee..3b02683764 100644 --- a/api/core/app/apps/workflow_app_runner.py +++ b/api/core/app/apps/workflow_app_runner.py @@ -457,6 +457,9 @@ class WorkflowBasedAppRunner: elif isinstance(event, NodeRunStreamChunkEvent): from core.app.entities.queue_entities import ChunkType as QueueChunkType + if event.is_final and not event.chunk: + return + self._publish_event( QueueTextChunkEvent( text=event.chunk, diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 2a308c6ecd..6cbd48e27b 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -1,4 +1,5 @@ import logging +import re import time from collections.abc import Generator from threading import Thread @@ -68,6 +69,8 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): EasyUIBasedGenerateTaskPipeline is a class that generate stream output and state management for Application. """ + _THINK_PATTERN = re.compile(r"]*>(.*?)", re.IGNORECASE | re.DOTALL) + _task_state: EasyUITaskState _application_generate_entity: Union[ChatAppGenerateEntity, CompletionAppGenerateEntity, AgentChatAppGenerateEntity] @@ -441,7 +444,13 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): for thought in agent_thoughts: # Add thought/reasoning if thought.thought: - reasoning_list.append(thought.thought) + reasoning_text = thought.thought + if " blocks and clean the final answer + clean_answer, reasoning_content = self._split_reasoning_from_answer(answer) + if reasoning_content: + answer = clean_answer + llm_result.message.content = clean_answer + llm_result.reasoning_content = reasoning_content + message.answer = clean_answer if reasoning_content: reasoning_list = [reasoning_content] # Content comes first, then reasoning @@ -493,6 +510,19 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): ) session.add(generation_detail) + @classmethod + def _split_reasoning_from_answer(cls, text: str) -> tuple[str, str]: + """ + Extract reasoning segments from blocks and return (clean_text, reasoning). + """ + matches = cls._THINK_PATTERN.findall(text) + reasoning_content = "\n".join(match.strip() for match in matches) if matches else "" + + clean_text = cls._THINK_PATTERN.sub("", text) + clean_text = re.sub(r"\n\s*\n", "\n\n", clean_text).strip() + + return clean_text, reasoning_content or "" + def _handle_stop(self, event: QueueStopEvent): """ Handle stop. diff --git a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py index 0a3189f398..a45d1d1046 100644 --- a/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py +++ b/api/core/repositories/sqlalchemy_workflow_node_execution_repository.py @@ -474,57 +474,67 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) outputs = execution.outputs or {} metadata = execution.metadata or {} - # Extract reasoning_content from outputs - reasoning_content = outputs.get("reasoning_content") - reasoning_list: list[str] = [] - if reasoning_content: - # reasoning_content could be a string or already a list - if isinstance(reasoning_content, str): - reasoning_list = [reasoning_content] if reasoning_content.strip() else [] - elif isinstance(reasoning_content, list): - # Filter out empty or whitespace-only strings - reasoning_list = [r.strip() for r in reasoning_content if isinstance(r, str) and r.strip()] + reasoning_list = self._extract_reasoning(outputs) + tool_calls_list = self._extract_tool_calls(metadata.get(WorkflowNodeExecutionMetadataKey.AGENT_LOG)) - # Extract tool_calls from metadata.agent_log - tool_calls_list: list[dict] = [] - agent_log = metadata.get(WorkflowNodeExecutionMetadataKey.AGENT_LOG) - if agent_log and isinstance(agent_log, list): - for log in agent_log: - # Each log entry has label, data, status, etc. - log_data = log.data if hasattr(log, "data") else log.get("data", {}) - tool_name = log_data.get("tool_name") - # Only include tool calls with valid tool_name - if tool_name and str(tool_name).strip(): - tool_calls_list.append( - { - "id": log_data.get("tool_call_id", ""), - "name": tool_name, - "arguments": json.dumps(log_data.get("tool_args", {})), - "result": str(log_data.get("output", "")), - } - ) - - # Only save if there's meaningful generation detail (reasoning or tool calls) - has_valid_reasoning = bool(reasoning_list) - has_valid_tool_calls = bool(tool_calls_list) - - if not has_valid_reasoning and not has_valid_tool_calls: + if not reasoning_list and not tool_calls_list: return - # Build sequence based on content, reasoning, and tool_calls - sequence: list[dict] = [] - text = outputs.get("text", "") + sequence = self._build_generation_sequence(outputs.get("text", ""), reasoning_list, tool_calls_list) + self._upsert_generation_detail(session, execution, reasoning_list, tool_calls_list, sequence) - # For now, use a simple sequence: content -> reasoning -> tool_calls - # This can be enhanced later to track actual streaming order + def _extract_reasoning(self, outputs: Mapping[str, Any]) -> list[str]: + """Extract reasoning_content as a clean list of non-empty strings.""" + reasoning_content = outputs.get("reasoning_content") + if isinstance(reasoning_content, str): + trimmed = reasoning_content.strip() + return [trimmed] if trimmed else [] + if isinstance(reasoning_content, list): + return [item.strip() for item in reasoning_content if isinstance(item, str) and item.strip()] + return [] + + def _extract_tool_calls(self, agent_log: Any) -> list[dict[str, str]]: + """Extract tool call records from agent logs.""" + if not agent_log or not isinstance(agent_log, list): + return [] + + tool_calls: list[dict[str, str]] = [] + for log in agent_log: + log_data = log.data if hasattr(log, "data") else (log.get("data", {}) if isinstance(log, dict) else {}) + tool_name = log_data.get("tool_name") + if tool_name and str(tool_name).strip(): + tool_calls.append( + { + "id": log_data.get("tool_call_id", ""), + "name": tool_name, + "arguments": json.dumps(log_data.get("tool_args", {})), + "result": str(log_data.get("output", "")), + } + ) + return tool_calls + + def _build_generation_sequence( + self, text: str, reasoning_list: list[str], tool_calls_list: list[dict[str, str]] + ) -> list[dict[str, Any]]: + """Build a simple content/reasoning/tool_call sequence.""" + sequence: list[dict[str, Any]] = [] if text: sequence.append({"type": "content", "start": 0, "end": len(text)}) - for i, _ in enumerate(reasoning_list): - sequence.append({"type": "reasoning", "index": i}) - for i in range(len(tool_calls_list)): - sequence.append({"type": "tool_call", "index": i}) + for index in range(len(reasoning_list)): + sequence.append({"type": "reasoning", "index": index}) + for index in range(len(tool_calls_list)): + sequence.append({"type": "tool_call", "index": index}) + return sequence - # Check if generation detail already exists for this node execution + def _upsert_generation_detail( + self, + session, + execution: WorkflowNodeExecution, + reasoning_list: list[str], + tool_calls_list: list[dict[str, str]], + sequence: list[dict[str, Any]], + ) -> None: + """Insert or update LLMGenerationDetail with serialized fields.""" existing = ( session.query(LLMGenerationDetail) .filter_by( @@ -534,23 +544,26 @@ class SQLAlchemyWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository) .first() ) + reasoning_json = json.dumps(reasoning_list) if reasoning_list else None + tool_calls_json = json.dumps(tool_calls_list) if tool_calls_list else None + sequence_json = json.dumps(sequence) if sequence else None + if existing: - # Update existing record - existing.reasoning_content = json.dumps(reasoning_list) if reasoning_list else None - existing.tool_calls = json.dumps(tool_calls_list) if tool_calls_list else None - existing.sequence = json.dumps(sequence) if sequence else None - else: - # Create new record - generation_detail = LLMGenerationDetail( - tenant_id=self._tenant_id, - app_id=self._app_id, - workflow_run_id=execution.workflow_execution_id, - node_id=execution.node_id, - reasoning_content=json.dumps(reasoning_list) if reasoning_list else None, - tool_calls=json.dumps(tool_calls_list) if tool_calls_list else None, - sequence=json.dumps(sequence) if sequence else None, - ) - session.add(generation_detail) + existing.reasoning_content = reasoning_json + existing.tool_calls = tool_calls_json + existing.sequence = sequence_json + return + + generation_detail = LLMGenerationDetail( + tenant_id=self._tenant_id, + app_id=self._app_id, + workflow_run_id=execution.workflow_execution_id, + node_id=execution.node_id, + reasoning_content=reasoning_json, + tool_calls=tool_calls_json, + sequence=sequence_json, + ) + session.add(generation_detail) def get_db_models_by_workflow_run( self, diff --git a/api/core/workflow/graph_engine/response_coordinator/coordinator.py b/api/core/workflow/graph_engine/response_coordinator/coordinator.py index 631440c6c1..c5ea94ba80 100644 --- a/api/core/workflow/graph_engine/response_coordinator/coordinator.py +++ b/api/core/workflow/graph_engine/response_coordinator/coordinator.py @@ -391,12 +391,9 @@ class ResponseStreamCoordinator: # Determine which node to attribute the output to # For special selectors (sys, env, conversation), use the active response node # For regular selectors, use the source node - if self._active_session and source_selector_prefix not in self._graph.nodes: - # Special selector - use active response node - output_node_id = self._active_session.node_id - else: - # Regular node selector - output_node_id = source_selector_prefix + active_session = self._active_session + special_selector = bool(active_session and source_selector_prefix not in self._graph.nodes) + output_node_id = active_session.node_id if special_selector and active_session else source_selector_prefix execution_id = self._get_or_create_execution_id(output_node_id) # Check if there's a direct stream for this selector @@ -404,65 +401,27 @@ class ResponseStreamCoordinator: tuple(segment.selector) in self._stream_buffers or tuple(segment.selector) in self._closed_streams ) - if has_direct_stream: - # Stream all available chunks for direct stream - while self._has_unread_stream(segment.selector): - if event := self._pop_stream_chunk(segment.selector): - # For special selectors, update the event to use active response node's information - if self._active_session and source_selector_prefix not in self._graph.nodes: - response_node = self._graph.nodes[self._active_session.node_id] - updated_event = NodeRunStreamChunkEvent( - id=execution_id, - node_id=response_node.id, - node_type=response_node.node_type, - selector=event.selector, - chunk=event.chunk, - is_final=event.is_final, + stream_targets = [segment.selector] if has_direct_stream else sorted(self._find_child_streams(segment.selector)) + + if stream_targets: + all_complete = True + + for target_selector in stream_targets: + while self._has_unread_stream(target_selector): + if event := self._pop_stream_chunk(target_selector): + events.append( + self._rewrite_stream_event( + event=event, + output_node_id=output_node_id, + execution_id=execution_id, + special_selector=bool(special_selector), + ) ) - events.append(updated_event) - else: - events.append(event) - # Check if stream is closed - if self._is_stream_closed(segment.selector): - is_complete = True + if not self._is_stream_closed(target_selector): + all_complete = False - else: - # No direct stream - check for child field streams (for object types) - child_streams = self._find_child_streams(segment.selector) - - if child_streams: - # Process all child streams - all_children_complete = True - - for child_selector in sorted(child_streams): - # Stream all available chunks from this child - while self._has_unread_stream(child_selector): - if event := self._pop_stream_chunk(child_selector): - # Forward child stream event - if self._active_session and source_selector_prefix not in self._graph.nodes: - response_node = self._graph.nodes[self._active_session.node_id] - updated_event = NodeRunStreamChunkEvent( - id=execution_id, - node_id=response_node.id, - node_type=response_node.node_type, - selector=event.selector, - chunk=event.chunk, - is_final=event.is_final, - chunk_type=event.chunk_type, - tool_call=event.tool_call, - tool_result=event.tool_result, - ) - events.append(updated_event) - else: - events.append(event) - - # Check if this child stream is complete - if not self._is_stream_closed(child_selector): - all_children_complete = False - - # Object segment is complete only when all children are complete - is_complete = all_children_complete + is_complete = all_complete # Fallback: check if scalar value exists in variable pool if not is_complete and not has_direct_stream: @@ -485,6 +444,28 @@ class ResponseStreamCoordinator: return events, is_complete + def _rewrite_stream_event( + self, + event: NodeRunStreamChunkEvent, + output_node_id: str, + execution_id: str, + special_selector: bool, + ) -> NodeRunStreamChunkEvent: + """Rewrite event to attribute to active response node when selector is special.""" + if not special_selector: + return event + + return self._create_stream_chunk_event( + node_id=output_node_id, + execution_id=execution_id, + selector=event.selector, + chunk=event.chunk, + is_final=event.is_final, + chunk_type=event.chunk_type, + tool_call=event.tool_call, + tool_result=event.tool_result, + ) + def _process_text_segment(self, segment: TextSegment) -> Sequence[NodeRunStreamChunkEvent]: """Process a text segment. Returns (events, is_complete).""" assert self._active_session is not None diff --git a/api/models/model.py b/api/models/model.py index ba075e2474..32be20e60a 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -1203,6 +1203,7 @@ class Message(Base): .all() ) + # FIXME (Novice) -- It's easy to cause N+1 query problem here. @property def generation_detail(self) -> dict[str, Any] | None: """ diff --git a/api/models/workflow.py b/api/models/workflow.py index bc229fb4e4..5131177836 100644 --- a/api/models/workflow.py +++ b/api/models/workflow.py @@ -695,6 +695,10 @@ class WorkflowRun(Base): def workflow(self): return db.session.query(Workflow).where(Workflow.id == self.workflow_id).first() + @property + def outputs_as_generation(self): + return is_generation_outputs(self.outputs_dict) + def to_dict(self): return { "id": self.id, @@ -708,7 +712,7 @@ class WorkflowRun(Base): "inputs": self.inputs_dict, "status": self.status, "outputs": self.outputs_dict, - "outputs_as_generation": is_generation_outputs(self.outputs_dict), + "outputs_as_generation": self.outputs_as_generation, "error": self.error, "elapsed_time": self.elapsed_time, "total_tokens": self.total_tokens, diff --git a/api/tests/unit_tests/core/agent/__init__.py b/api/tests/unit_tests/core/agent/__init__.py index a9ccd45f4b..e7c478bf83 100644 --- a/api/tests/unit_tests/core/agent/__init__.py +++ b/api/tests/unit_tests/core/agent/__init__.py @@ -1,3 +1,4 @@ """ Mark agent test modules as a package to avoid import name collisions. """ + diff --git a/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py new file mode 100644 index 0000000000..6a8a691a25 --- /dev/null +++ b/api/tests/unit_tests/core/app/apps/test_workflow_app_runner_stream_chunk.py @@ -0,0 +1,48 @@ +from unittest.mock import MagicMock + +from core.app.apps.base_app_queue_manager import PublishFrom +from core.app.apps.workflow_app_runner import WorkflowBasedAppRunner +from core.workflow.graph_events import NodeRunStreamChunkEvent +from core.workflow.nodes import NodeType + + +class DummyQueueManager: + def __init__(self) -> None: + self.published = [] + + def publish(self, event, publish_from: PublishFrom) -> None: + self.published.append((event, publish_from)) + + +def test_skip_empty_final_chunk() -> None: + queue_manager = DummyQueueManager() + runner = WorkflowBasedAppRunner(queue_manager=queue_manager, app_id="app") + + empty_final_event = NodeRunStreamChunkEvent( + id="exec", + node_id="node", + node_type=NodeType.LLM, + selector=["node", "text"], + chunk="", + is_final=True, + ) + + runner._handle_event(workflow_entry=MagicMock(), event=empty_final_event) + assert queue_manager.published == [] + + normal_event = NodeRunStreamChunkEvent( + id="exec", + node_id="node", + node_type=NodeType.LLM, + selector=["node", "text"], + chunk="hi", + is_final=False, + ) + + runner._handle_event(workflow_entry=MagicMock(), event=normal_event) + + assert len(queue_manager.published) == 1 + published_event, publish_from = queue_manager.published[0] + assert publish_from == PublishFrom.APPLICATION_MANAGER + assert published_event.text == "hi" + diff --git a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py index c6b3797ce2..822b6a808f 100644 --- a/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py +++ b/api/tests/unit_tests/core/workflow/graph_engine/test_response_coordinator.py @@ -6,6 +6,7 @@ from core.workflow.entities.tool_entities import ToolResultStatus from core.workflow.enums import NodeType from core.workflow.graph.graph import Graph from core.workflow.graph_engine.response_coordinator.coordinator import ResponseStreamCoordinator +from core.workflow.graph_engine.response_coordinator.session import ResponseSession from core.workflow.graph_events import ( ChunkType, NodeRunStreamChunkEvent, @@ -13,6 +14,7 @@ from core.workflow.graph_events import ( ToolResult, ) from core.workflow.nodes.base.entities import BaseNodeData +from core.workflow.nodes.base.template import Template, VariableSegment from core.workflow.runtime import VariablePool @@ -186,3 +188,44 @@ class TestResponseCoordinatorObjectStreaming: assert ("node1", "generation", "content") in children assert ("node1", "generation", "tool_calls") in children assert ("node1", "generation", "thought") in children + + def test_special_selector_rewrites_to_active_response_node(self): + """Ensure special selectors attribute streams to the active response node.""" + graph = MagicMock(spec=Graph) + variable_pool = MagicMock(spec=VariablePool) + + response_node = MagicMock() + response_node.id = "response_node" + response_node.node_type = NodeType.ANSWER + graph.nodes = {"response_node": response_node} + graph.root_node = response_node + + coordinator = ResponseStreamCoordinator(variable_pool, graph) + coordinator.track_node_execution("response_node", "exec_resp") + + coordinator._active_session = ResponseSession( + node_id="response_node", + template=Template(segments=[VariableSegment(selector=["sys", "foo"])]), + ) + + event = NodeRunStreamChunkEvent( + id="stream_1", + node_id="llm_node", + node_type=NodeType.LLM, + selector=["sys", "foo"], + chunk="hi", + is_final=True, + chunk_type=ChunkType.TEXT, + ) + + coordinator._stream_buffers[("sys", "foo")] = [event] + coordinator._stream_positions[("sys", "foo")] = 0 + coordinator._closed_streams.add(("sys", "foo")) + + events, is_complete = coordinator._process_variable_segment(VariableSegment(selector=["sys", "foo"])) + + assert is_complete + assert len(events) == 1 + rewritten = events[0] + assert rewritten.node_id == "response_node" + assert rewritten.id == "exec_resp" diff --git a/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py b/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py index 9d793f804f..55f6525bcc 100644 --- a/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py +++ b/api/tests/unit_tests/core/workflow/nodes/test_llm_node_streaming.py @@ -146,3 +146,4 @@ def test_serialize_tool_call_strips_files_to_ids(): assert serialized["name"] == "do" assert serialized["arguments"] == '{"a":1}' assert serialized["output"] == "ok" + From f55faae31b0e9e4b308056c6ad956737f3abd7a0 Mon Sep 17 00:00:00 2001 From: Novice Date: Thu, 25 Dec 2025 13:59:38 +0800 Subject: [PATCH 17/18] chore: strip reasoning from chatflow answers and persist generation details --- .../advanced_chat/generate_task_pipeline.py | 18 ++++++++++++- .../easy_ui_based_generate_task_pipeline.py | 25 ++++++++++++++++--- 2 files changed, 38 insertions(+), 5 deletions(-) diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 0d5e1e5dfd..53fa27cca7 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -529,7 +529,9 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): match chunk_type: case ChunkType.TEXT: self._stream_buffer.record_text_chunk(delta_text) + self._task_state.answer += delta_text case ChunkType.THOUGHT: + # Reasoning should not be part of final answer text self._stream_buffer.record_thought_chunk(delta_text) case ChunkType.TOOL_CALL: self._stream_buffer.record_tool_call( @@ -542,8 +544,8 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): tool_call_id=tool_call_id, result=delta_text, ) + self._task_state.answer += delta_text - self._task_state.answer += delta_text yield self._message_cycle_manager.message_to_stream_response( answer=delta_text, message_id=self._message_id, @@ -920,6 +922,7 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): # If there are assistant files, remove markdown image links from answer answer_text = self._task_state.answer + answer_text = self._strip_think_blocks(answer_text) if self._recorded_files: # Remove markdown image links since we're storing files separately answer_text = re.sub(r"!\[.*?\]\(.*?\)", "", answer_text).strip() @@ -971,6 +974,19 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): ] session.add_all(message_files) + # Save generation detail (reasoning/tool calls/sequence) from stream buffer + self._save_generation_detail(session=session, message=message) + + @staticmethod + def _strip_think_blocks(text: str) -> str: + """Remove ... blocks (including their content) from text.""" + if not text or "]*>.*?", "", text, flags=re.IGNORECASE | re.DOTALL) + clean_text = re.sub(r"\n\s*\n", "\n\n", clean_text).strip() + return clean_text + def _save_generation_detail(self, *, session: Session, message: Message) -> None: """ Save LLM generation detail for Chatflow using stream event buffer. diff --git a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py index 6cbd48e27b..c4ea428270 100644 --- a/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py +++ b/api/core/app/task_pipeline/easy_ui_based_generate_task_pipeline.py @@ -441,6 +441,7 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): if agent_thoughts: # Agent-Chat mode: merge MessageAgentThought records content_pos = 0 + cleaned_answer_parts: list[str] = [] for thought in agent_thoughts: # Add thought/reasoning if thought.thought: @@ -466,10 +467,26 @@ class EasyUIBasedGenerateTaskPipeline(BasedGenerateTaskPipeline): # Add answer content if present if thought.answer: - start = content_pos - end = content_pos + len(thought.answer) - sequence.append({"type": "content", "start": start, "end": end}) - content_pos = end + content_text = thought.answer + if " Date: Tue, 30 Dec 2025 10:19:40 +0800 Subject: [PATCH 18/18] chore: remove frontend changes --- web/.env.example | 3 + web/.gitignore | 10 + web/.oxlintrc.json | 144 - web/.storybook/preview.tsx | 2 +- web/.storybook/utils/form-story-wrapper.tsx | 9 +- web/.vscode/extensions.json | 1 - web/.vscode/launch.json | 28 +- web/.vscode/settings.example.json | 66 +- web/README.md | 4 +- web/__mocks__/mime.js | 0 web/__mocks__/provider-context.ts | 40 +- web/__mocks__/react-i18next.ts | 40 - web/__tests__/check-i18n.test.ts | 16 +- web/__tests__/description-validation.test.tsx | 12 +- .../document-detail-navigation-fix.test.tsx | 64 +- web/__tests__/document-list-sorting.test.tsx | 12 +- web/__tests__/embedded-user-id-auth.test.tsx | 55 +- web/__tests__/embedded-user-id-store.test.tsx | 67 +- .../goto-anything/command-selector.test.tsx | 23 +- .../goto-anything/match-action.test.ts | 35 +- .../goto-anything/scope-command-tags.test.tsx | 18 +- .../search-error-handling.test.ts | 27 +- .../slash-command-modes.test.tsx | 49 +- web/__tests__/i18n-upload-features.test.ts | 59 +- web/__tests__/navigation-utils.test.ts | 68 +- web/__tests__/real-browser-flicker.test.tsx | 85 +- web/__tests__/unified-tags-logic.test.ts | 12 +- .../workflow-onboarding-integration.test.tsx | 73 +- .../workflow-parallel-limit.test.tsx | 191 +- web/__tests__/xss-prevention.test.tsx | 9 +- .../[appId]/annotations/page.tsx | 2 +- .../[appId]/configuration/page.tsx | 2 +- .../[appId]/develop/page.tsx | 4 +- .../(appDetailLayout)/[appId]/layout-main.tsx | 62 +- .../(appDetailLayout)/[appId]/logs/page.tsx | 2 +- .../[appId]/overview/card-view.tsx | 62 +- .../[appId]/overview/chart-view.tsx | 72 +- .../overview/long-time-range-picker.tsx | 24 +- .../[appId]/overview/page.tsx | 4 +- .../time-range-picker/date-picker.tsx | 23 +- .../overview/time-range-picker/index.tsx | 31 +- .../time-range-picker/range-selector.tsx | 30 +- .../svg-attribute-error-reproduction.spec.tsx | 12 +- .../overview/tracing/config-button.tsx | 13 +- .../[appId]/overview/tracing/config-popup.tsx | 113 +- .../[appId]/overview/tracing/field.tsx | 15 +- .../[appId]/overview/tracing/panel.tsx | 67 +- .../tracing/provider-config-modal.tsx | 857 ++-- .../overview/tracing/provider-panel.tsx | 35 +- .../[appId]/overview/tracing/tracing-icon.tsx | 6 +- .../[appId]/workflow/page.tsx | 2 +- .../app/(appDetailLayout)/layout.tsx | 5 +- .../[datasetId]/api/page.tsx | 2 +- .../documents/[documentId]/page.tsx | 4 +- .../documents/[documentId]/settings/page.tsx | 4 +- .../documents/create-from-pipeline/page.tsx | 2 +- .../[datasetId]/documents/create/page.tsx | 2 +- .../[datasetId]/documents/page.tsx | 2 +- .../[datasetId]/hitTesting/page.tsx | 2 +- .../[datasetId]/layout-main.tsx | 42 +- .../[datasetId]/pipeline/page.tsx | 2 +- .../[datasetId]/settings/page.tsx | 15 +- .../datasets/(datasetDetailLayout)/layout.tsx | 2 +- .../(commonLayout)/datasets/connect/page.tsx | 2 +- .../datasets/create-from-pipeline/page.tsx | 2 +- .../(commonLayout)/datasets/create/page.tsx | 2 +- web/app/(commonLayout)/datasets/layout.tsx | 6 +- .../(commonLayout)/education-apply/page.tsx | 8 +- web/app/(commonLayout)/explore/apps/page.tsx | 2 +- .../explore/installed/[appId]/page.tsx | 2 +- web/app/(commonLayout)/explore/layout.tsx | 4 +- web/app/(commonLayout)/layout.tsx | 26 +- web/app/(commonLayout)/plugins/page.tsx | 4 +- web/app/(commonLayout)/tools/page.tsx | 6 +- web/app/(shareLayout)/chat/[token]/page.tsx | 2 +- .../(shareLayout)/chatbot/[token]/page.tsx | 2 +- .../(shareLayout)/completion/[token]/page.tsx | 2 +- .../components/authenticated-layout.tsx | 57 +- web/app/(shareLayout)/components/splash.tsx | 35 +- .../webapp-reset-password/check-code/page.tsx | 76 +- .../webapp-reset-password/layout.tsx | 47 +- .../webapp-reset-password/page.tsx | 80 +- .../set-password/page.tsx | 83 +- .../webapp-signin/check-code/page.tsx | 97 +- .../components/external-member-sso-auth.tsx | 21 +- .../components/mail-and-code-auth.tsx | 37 +- .../components/mail-and-password-auth.tsx | 146 +- .../webapp-signin/components/sso-auth.tsx | 13 +- .../(shareLayout)/webapp-signin/layout.tsx | 40 +- .../webapp-signin/normalForm.tsx | 248 +- web/app/(shareLayout)/webapp-signin/page.tsx | 47 +- .../(shareLayout)/workflow/[token]/page.tsx | 2 +- .../account-page/AvatarWithEdit.tsx | 73 +- .../account-page/email-change-modal.tsx | 153 +- .../(commonLayout)/account-page/index.tsx | 179 +- web/app/account/(commonLayout)/avatar.tsx | 38 +- .../delete-account/components/check-email.tsx | 49 +- .../delete-account/components/feed-back.tsx | 49 +- .../components/verify-email.tsx | 53 +- .../(commonLayout)/delete-account/index.tsx | 39 +- web/app/account/(commonLayout)/header.tsx | 38 +- web/app/account/(commonLayout)/layout.tsx | 18 +- web/app/account/(commonLayout)/page.tsx | 12 +- web/app/account/oauth/authorize/layout.tsx | 53 +- web/app/account/oauth/authorize/page.tsx | 95 +- web/app/activate/activateForm.tsx | 47 +- web/app/activate/page.tsx | 18 +- ...wr-initializer.tsx => app-initializer.tsx} | 30 +- web/app/components/app-sidebar/app-info.tsx | 183 +- .../components/app-sidebar/app-operations.tsx | 47 +- .../app-sidebar/app-sidebar-dropdown.tsx | 55 +- web/app/components/app-sidebar/basic.tsx | 125 +- .../app-sidebar/dataset-info/dropdown.tsx | 49 +- .../app-sidebar/dataset-info/index.spec.tsx | 379 ++ .../app-sidebar/dataset-info/index.tsx | 37 +- .../app-sidebar/dataset-info/menu-item.tsx | 8 +- .../app-sidebar/dataset-info/menu.tsx | 22 +- .../app-sidebar/dataset-sidebar-dropdown.tsx | 69 +- web/app/components/app-sidebar/index.tsx | 33 +- .../components/app-sidebar/navLink.spec.tsx | 19 +- web/app/components/app-sidebar/navLink.tsx | 47 +- .../sidebar-animation-issues.spec.tsx | 32 +- .../text-squeeze-fix-verification.spec.tsx | 16 +- .../components/app-sidebar/toggle-button.tsx | 26 +- .../edit-item/index.spec.tsx | 20 +- .../add-annotation-modal/edit-item/index.tsx | 18 +- .../add-annotation-modal/index.spec.tsx | 53 +- .../annotation/add-annotation-modal/index.tsx | 38 +- .../app/annotation/batch-action.spec.tsx | 42 + .../app/annotation/batch-action.tsx | 41 +- .../csv-downloader.spec.tsx | 72 + .../csv-downloader.tsx | 50 +- .../csv-uploader.spec.tsx | 19 +- .../csv-uploader.tsx | 41 +- .../batch-add-annotation-modal/index.spec.tsx | 165 + .../batch-add-annotation-modal/index.tsx | 41 +- .../index.spec.tsx | 43 +- .../index.tsx | 6 +- .../edit-item/index.spec.tsx | 83 +- .../edit-annotation-modal/edit-item/index.tsx | 143 +- .../edit-annotation-modal/index.spec.tsx | 323 +- .../edit-annotation-modal/index.tsx | 103 +- .../app/annotation/empty-element.spec.tsx | 13 + .../app/annotation/empty-element.tsx | 23 +- .../components/app/annotation/filter.spec.tsx | 332 ++ web/app/components/app/annotation/filter.tsx | 16 +- .../app/annotation/header-opts/index.spec.tsx | 461 ++ .../app/annotation/header-opts/index.tsx | 85 +- .../components/app/annotation/index.spec.tsx | 243 + web/app/components/app/annotation/index.tsx | 138 +- .../components/app/annotation/list.spec.tsx | 116 + web/app/components/app/annotation/list.tsx | 63 +- .../index.spec.tsx | 43 +- .../remove-annotation-confirm-modal/index.tsx | 4 +- web/app/components/app/annotation/type.ts | 6 + .../hit-history-no-data.tsx | 10 +- .../view-annotation-modal/index.spec.tsx | 162 + .../view-annotation-modal/index.tsx | 175 +- .../access-control-dialog.tsx | 8 +- .../access-control-item.tsx | 26 +- .../access-control.spec.tsx | 389 ++ .../add-member-or-group-pop.tsx | 211 +- .../app/app-access-control/index.tsx | 96 +- .../specific-groups-or-members.tsx | 134 +- .../app/app-publisher/features-wrapper.tsx | 22 +- .../components/app/app-publisher/index.tsx | 451 +- .../publish-with-multiple-model.tsx | 43 +- .../app/app-publisher/suggested-action.tsx | 18 +- .../app/app-publisher/version-info-modal.tsx | 92 +- .../base/feature-panel/index.spec.tsx | 71 + .../base/feature-panel/index.tsx | 18 +- .../base/group-name/index.spec.tsx | 2 +- .../configuration/base/group-name/index.tsx | 12 +- .../base/operation-btn/index.spec.tsx | 20 +- .../base/operation-btn/index.tsx | 19 +- .../base/var-highlight/index.spec.tsx | 10 +- .../base/var-highlight/index.tsx | 6 +- .../cannot-query-dataset.spec.tsx | 10 +- .../warning-mask/cannot-query-dataset.tsx | 18 +- .../warning-mask/formatting-changed.spec.tsx | 14 +- .../base/warning-mask/formatting-changed.tsx | 20 +- .../warning-mask/has-not-set-api.spec.tsx | 14 +- .../base/warning-mask/has-not-set-api.tsx | 17 +- .../base/warning-mask/index.spec.tsx | 4 +- .../configuration/base/warning-mask/index.tsx | 13 +- .../config-prompt/advanced-prompt-input.tsx | 138 +- .../confirm-add-var/index.spec.tsx | 20 +- .../config-prompt/confirm-add-var/index.tsx | 33 +- .../conversation-history/edit-modal.spec.tsx | 20 +- .../conversation-history/edit-modal.tsx | 28 +- .../history-panel.spec.tsx | 16 +- .../conversation-history/history-panel.tsx | 49 +- .../config-prompt/index.spec.tsx | 36 +- .../app/configuration/config-prompt/index.tsx | 82 +- .../message-type-selector.spec.tsx | 12 +- .../config-prompt/message-type-selector.tsx | 30 +- .../prompt-editor-height-resize-wrap.spec.tsx | 18 +- .../prompt-editor-height-resize-wrap.tsx | 17 +- .../config-prompt/simple-prompt-input.tsx | 67 +- .../config-var/config-modal/config.ts | 4 +- .../config-var/config-modal/field.tsx | 14 +- .../config-var/config-modal/index.tsx | 185 +- .../config-var/config-modal/type-select.tsx | 32 +- .../config-var/config-select/index.spec.tsx | 14 +- .../config-var/config-select/index.tsx | 28 +- .../config-var/config-string/index.spec.tsx | 11 +- .../config-var/config-string/index.tsx | 3 +- .../configuration/config-var/index.spec.tsx | 394 ++ .../app/configuration/config-var/index.tsx | 193 +- .../config-var/input-type-icon.tsx | 2 +- .../configuration/config-var/modal-foot.tsx | 8 +- .../select-type-item/index.spec.tsx | 12 +- .../config-var/select-type-item/index.tsx | 22 +- .../config-var/select-var-type.tsx | 35 +- .../app/configuration/config-var/var-item.tsx | 40 +- .../config-vision/index.spec.tsx | 228 + .../app/configuration/config-vision/index.tsx | 41 +- .../config-vision/param-config-content.tsx | 61 +- .../config-vision/param-config.tsx | 16 +- .../config/agent-setting-button.spec.tsx | 103 + .../config/agent-setting-button.tsx | 17 +- .../config/agent/agent-setting/index.spec.tsx | 108 + .../config/agent/agent-setting/index.tsx | 83 +- .../agent/agent-setting/item-panel.spec.tsx | 21 + .../config/agent/agent-setting/item-panel.tsx | 15 +- .../config/agent/agent-tools/index.spec.tsx | 469 ++ .../config/agent/agent-tools/index.tsx | 178 +- .../setting-built-in-tool.spec.tsx | 249 + .../agent-tools/setting-built-in-tool.tsx | 129 +- .../config/agent/prompt-editor.tsx | 41 +- .../assistant-type-picker/index.spec.tsx | 865 ++++ .../config/assistant-type-picker/index.tsx | 71 +- .../config/automatic/automatic-btn.tsx | 10 +- .../config/automatic/get-automatic-res.tsx | 178 +- .../config/automatic/idea-output.tsx | 26 +- .../instruction-editor-in-workflow.tsx | 9 +- .../config/automatic/instruction-editor.tsx | 62 +- .../automatic/prompt-res-in-workflow.tsx | 10 +- .../config/automatic/prompt-res.tsx | 9 +- .../config/automatic/prompt-toast.tsx | 16 +- .../config/automatic/res-placeholder.tsx | 12 +- .../configuration/config/automatic/result.tsx | 91 +- .../config/automatic/version-selector.tsx | 41 +- .../code-generator/get-code-generator-res.tsx | 103 +- .../config/config-audio.spec.tsx | 127 + .../app/configuration/config/config-audio.tsx | 39 +- .../config/config-document.spec.tsx | 120 + .../configuration/config/config-document.tsx | 39 +- .../app/configuration/config/index.spec.tsx | 255 + .../app/configuration/config/index.tsx | 22 +- .../ctrl-btn-group/index.spec.tsx | 10 +- .../configuration/ctrl-btn-group/index.tsx | 8 +- .../dataset-config/card-item/index.spec.tsx | 31 +- .../dataset-config/card-item/index.tsx | 66 +- .../dataset-config/context-var/index.spec.tsx | 21 +- .../dataset-config/context-var/index.tsx | 24 +- .../context-var/var-picker.spec.tsx | 24 +- .../dataset-config/context-var/var-picker.tsx | 81 +- .../dataset-config/index.spec.tsx | 1050 ++++ .../configuration/dataset-config/index.tsx | 111 +- .../params-config/config-content.spec.tsx | 391 ++ .../params-config/config-content.tsx | 147 +- .../params-config/index.spec.tsx | 266 + .../dataset-config/params-config/index.tsx | 52 +- .../params-config/weighted-score.spec.tsx | 81 + .../params-config/weighted-score.tsx | 24 +- .../select-dataset/index.spec.tsx | 141 + .../dataset-config/select-dataset/index.tsx | 62 +- .../settings-modal/index.spec.tsx | 539 +++ .../dataset-config/settings-modal/index.tsx | 238 +- .../settings-modal/retrieval-section.spec.tsx | 281 ++ .../settings-modal/retrieval-section.tsx | 218 + .../configuration/debug/chat-user-input.tsx | 29 +- .../debug-with-multiple-model/chat-item.tsx | 36 +- .../debug-with-multiple-model/context.tsx | 7 +- .../debug-with-multiple-model/debug-item.tsx | 55 +- .../debug-with-multiple-model/index.spec.tsx | 102 +- .../debug/debug-with-multiple-model/index.tsx | 26 +- .../model-parameter-trigger.tsx | 39 +- .../text-generation-item.tsx | 18 +- .../debug-with-single-model/index.spec.tsx | 1015 ++++ .../debug/debug-with-single-model/index.tsx | 38 +- .../app/configuration/debug/hooks.tsx | 28 +- .../app/configuration/debug/index.tsx | 139 +- .../hooks/use-advanced-prompt-config.ts | 18 +- .../components/app/configuration/index.tsx | 261 +- .../prompt-value-panel/index.spec.tsx | 125 + .../prompt-value-panel/index.tsx | 84 +- .../prompt-value-panel/utils.spec.ts | 29 + .../tools/external-data-tool-modal.tsx | 131 +- .../app/configuration/tools/index.tsx | 89 +- .../create-app-dialog/app-card/index.spec.tsx | 26 +- .../app/create-app-dialog/app-card/index.tsx | 39 +- .../create-app-dialog/app-list/index.spec.tsx | 136 + .../app/create-app-dialog/app-list/index.tsx | 179 +- .../app-list/sidebar.spec.tsx | 38 + .../create-app-dialog/app-list/sidebar.tsx | 57 +- .../app/create-app-dialog/index.spec.tsx | 163 +- .../app/create-app-dialog/index.tsx | 15 +- .../app/create-app-modal/index.spec.tsx | 162 + .../components/app/create-app-modal/index.tsx | 460 +- .../dsl-confirm-modal.tsx | 30 +- .../app/create-from-dsl-modal/index.tsx | 120 +- .../app/create-from-dsl-modal/uploader.tsx | 43 +- .../app/duplicate-modal/index.spec.tsx | 167 + .../components/app/duplicate-modal/index.tsx | 73 +- .../app/log-annotation/index.spec.tsx | 176 + .../components/app/log-annotation/index.tsx | 29 +- web/app/components/app/log/empty-element.tsx | 47 +- web/app/components/app/log/filter.tsx | 53 +- web/app/components/app/log/index.tsx | 82 +- web/app/components/app/log/list.tsx | 412 +- web/app/components/app/log/model-info.tsx | 43 +- web/app/components/app/log/var-panel.tsx | 39 +- .../overview/__tests__/toggle-logic.test.ts | 11 +- .../apikey-info-panel.test-utils.tsx | 37 +- .../overview/apikey-info-panel/cloud.spec.tsx | 4 +- .../overview/apikey-info-panel/index.spec.tsx | 4 +- .../app/overview/apikey-info-panel/index.tsx | 55 +- web/app/components/app/overview/app-card.tsx | 308 +- web/app/components/app/overview/app-chart.tsx | 258 +- .../app/overview/customize/index.spec.tsx | 14 +- .../app/overview/customize/index.tsx | 154 +- .../app/overview/embedded/index.spec.tsx | 121 + .../app/overview/embedded/index.tsx | 46 +- .../app/overview/settings/index.spec.tsx | 216 + .../app/overview/settings/index.tsx | 206 +- .../components/app/overview/trigger-card.tsx | 36 +- web/app/components/app/store.ts | 4 +- .../app/switch-app-modal/index.spec.tsx | 299 ++ .../components/app/switch-app-modal/index.tsx | 116 +- .../app/text-generate/item/index.tsx | 175 +- .../app/text-generate/item/result-tab.tsx | 14 +- .../text-generate/saved-items/index.spec.tsx | 67 + .../app/text-generate/saved-items/index.tsx | 85 +- .../saved-items/no-data/index.spec.tsx | 22 + .../saved-items/no-data/index.tsx | 27 +- .../app/type-selector/index.spec.tsx | 142 + .../components/app/type-selector/index.tsx | 170 +- .../app/workflow-log/detail.spec.tsx | 28 +- .../components/app/workflow-log/detail.tsx | 33 +- .../app/workflow-log/filter.spec.tsx | 56 +- .../components/app/workflow-log/filter.tsx | 41 +- .../app/workflow-log/index.spec.tsx | 542 ++- web/app/components/app/workflow-log/index.tsx | 71 +- .../components/app/workflow-log/list.spec.tsx | 43 +- web/app/components/app/workflow-log/list.tsx | 148 +- .../workflow-log/trigger-by-display.spec.tsx | 14 +- .../app/workflow-log/trigger-by-display.tsx | 40 +- web/app/components/apps/app-card.spec.tsx | 590 ++- web/app/components/apps/app-card.tsx | 232 +- web/app/components/apps/empty.spec.tsx | 4 +- web/app/components/apps/empty.tsx | 10 +- web/app/components/apps/footer.spec.tsx | 4 +- web/app/components/apps/footer.tsx | 30 +- .../apps/hooks/use-apps-query-state.spec.ts | 363 -- .../apps/hooks/use-apps-query-state.spec.tsx | 248 + .../apps/hooks/use-apps-query-state.ts | 87 +- .../apps/hooks/use-dsl-drag-drop.spec.ts | 17 +- web/app/components/apps/index.spec.tsx | 17 +- web/app/components/apps/index.tsx | 12 +- web/app/components/apps/list.spec.tsx | 616 ++- web/app/components/apps/list.tsx | 113 +- web/app/components/apps/new-app-card.spec.tsx | 61 +- web/app/components/apps/new-app-card.tsx | 32 +- .../base/action-button/index.spec.tsx | 30 +- .../components/base/action-button/index.tsx | 14 +- .../base/agent-log-modal/detail.tsx | 35 +- .../base/agent-log-modal/index.stories.tsx | 8 +- .../components/base/agent-log-modal/index.tsx | 18 +- .../base/agent-log-modal/iteration.tsx | 16 +- .../base/agent-log-modal/result.tsx | 74 +- .../base/agent-log-modal/tool-call.tsx | 29 +- .../base/agent-log-modal/tracing.tsx | 4 +- .../base/amplitude/AmplitudeProvider.tsx | 3 +- web/app/components/base/answer-icon/index.tsx | 37 +- .../base/app-icon-picker/ImageInput.tsx | 52 +- .../base/app-icon-picker/index.stories.tsx | 3 +- .../components/base/app-icon-picker/index.tsx | 110 +- .../components/base/app-icon-picker/utils.ts | 2 +- .../components/base/app-icon/index.spec.tsx | 57 +- web/app/components/base/app-icon/index.tsx | 30 +- web/app/components/base/app-unavailable.tsx | 16 +- .../base/audio-btn/audio.player.manager.ts | 1 + .../base/audio-btn/index.stories.tsx | 2 +- web/app/components/base/audio-btn/index.tsx | 37 +- .../base/audio-gallery/AudioPlayer.tsx | 29 +- .../components/base/audio-gallery/index.tsx | 5 +- .../base/auto-height-textarea/index.tsx | 51 +- web/app/components/base/avatar/index.spec.tsx | 308 ++ .../components/base/avatar/index.stories.tsx | 5 +- web/app/components/base/avatar/index.tsx | 4 +- web/app/components/base/badge.tsx | 4 +- web/app/components/base/badge/index.spec.tsx | 360 ++ web/app/components/base/badge/index.tsx | 22 +- web/app/components/base/block-input/index.tsx | 62 +- web/app/components/base/button/add-button.tsx | 6 +- web/app/components/base/button/index.spec.tsx | 58 +- web/app/components/base/button/index.tsx | 16 +- .../components/base/button/sync-button.tsx | 10 +- .../__snapshots__/utils.spec.ts.snap | 12 +- .../base/chat/__tests__/utils.spec.ts | 6 +- .../chat/chat-with-history/chat-wrapper.tsx | 95 +- .../base/chat/chat-with-history/context.tsx | 14 +- .../chat-with-history/header-in-mobile.tsx | 56 +- .../chat/chat-with-history/header/index.tsx | 52 +- .../header/mobile-operation-dropdown.tsx | 22 +- .../chat-with-history/header/operation.tsx | 21 +- .../chat/chat-with-history/hooks.spec.tsx | 270 ++ .../base/chat/chat-with-history/hooks.tsx | 140 +- .../base/chat/chat-with-history/index.tsx | 27 +- .../chat-with-history/inputs-form/content.tsx | 33 +- .../chat-with-history/inputs-form/index.tsx | 40 +- .../inputs-form/view-form-dropdown.tsx | 24 +- .../chat/chat-with-history/sidebar/index.tsx | 77 +- .../chat/chat-with-history/sidebar/item.tsx | 10 +- .../chat/chat-with-history/sidebar/list.tsx | 6 +- .../chat-with-history/sidebar/operation.tsx | 29 +- .../sidebar/rename-modal.tsx | 20 +- .../base/chat/chat/answer/agent-content.tsx | 6 +- .../base/chat/chat/answer/basic-content.tsx | 4 +- .../base/chat/chat/answer/index.stories.tsx | 36 +- .../base/chat/chat/answer/index.tsx | 46 +- .../components/base/chat/chat/answer/more.tsx | 28 +- .../base/chat/chat/answer/operation.tsx | 241 +- .../chat/chat/answer/suggested-questions.tsx | 9 +- .../base/chat/chat/answer/tool-detail.tsx | 38 +- .../chat/chat/answer/workflow-process.tsx | 39 +- .../base/chat/chat/chat-input-area/index.tsx | 55 +- .../chat/chat/chat-input-area/operation.tsx | 38 +- .../base/chat/chat/check-input-forms-hooks.ts | 6 +- .../base/chat/chat/citation/index.tsx | 24 +- .../base/chat/chat/citation/popup.tsx | 87 +- .../chat/chat/citation/progress-tooltip.tsx | 16 +- .../base/chat/chat/citation/tooltip.tsx | 13 +- .../base/chat/chat/content-switch.tsx | 11 +- web/app/components/base/chat/chat/context.tsx | 8 +- web/app/components/base/chat/chat/hooks.ts | 66 +- web/app/components/base/chat/chat/index.tsx | 70 +- .../base/chat/chat/loading-anim/index.tsx | 4 +- .../components/base/chat/chat/log/index.tsx | 6 +- .../base/chat/chat/question.stories.tsx | 10 +- .../components/base/chat/chat/question.tsx | 106 +- .../base/chat/chat/thought/index.tsx | 4 +- .../components/base/chat/chat/try-to-ask.tsx | 16 +- web/app/components/base/chat/chat/type.ts | 7 +- web/app/components/base/chat/chat/utils.ts | 4 +- .../chat/embedded-chatbot/chat-wrapper.tsx | 85 +- .../base/chat/embedded-chatbot/context.tsx | 10 +- .../chat/embedded-chatbot/header/index.tsx | 74 +- .../base/chat/embedded-chatbot/hooks.spec.tsx | 257 + .../base/chat/embedded-chatbot/hooks.tsx | 105 +- .../base/chat/embedded-chatbot/index.tsx | 114 +- .../embedded-chatbot/inputs-form/content.tsx | 35 +- .../embedded-chatbot/inputs-form/index.tsx | 40 +- .../inputs-form/view-form-dropdown.tsx | 24 +- web/app/components/base/chat/types.ts | 12 +- web/app/components/base/chat/utils.ts | 16 +- .../components/base/checkbox-list/index.tsx | 149 +- .../checkbox/assets/indeterminate-icon.tsx | 4 +- .../components/base/checkbox/index.spec.tsx | 4 +- .../base/checkbox/index.stories.tsx | 9 +- web/app/components/base/checkbox/index.tsx | 4 +- web/app/components/base/chip/index.spec.tsx | 394 ++ .../components/base/chip/index.stories.tsx | 15 +- web/app/components/base/chip/index.tsx | 42 +- web/app/components/base/confirm/index.tsx | 34 +- .../components/base/content-dialog/index.tsx | 21 +- .../components/base/copy-feedback/index.tsx | 28 +- web/app/components/base/copy-icon/index.tsx | 24 +- .../components/base/corner-label/index.tsx | 6 +- .../calendar/days-of-week.tsx | 6 +- .../date-and-time-picker/calendar/index.tsx | 30 +- .../date-and-time-picker/calendar/item.tsx | 10 +- .../common/option-list-item.tsx | 6 +- .../date-picker/footer.tsx | 39 +- .../date-picker/header.tsx | 30 +- .../date-picker/index.tsx | 197 +- .../base/date-and-time-picker/hooks.ts | 42 +- .../date-and-time-picker/index.stories.tsx | 16 +- .../time-picker/footer.tsx | 23 +- .../time-picker/header.tsx | 8 +- .../time-picker/index.spec.tsx | 107 +- .../time-picker/index.tsx | 110 +- .../time-picker/options.tsx | 13 +- .../base/date-and-time-picker/types.ts | 2 +- .../date-and-time-picker/utils/dayjs.spec.ts | 31 +- .../base/date-and-time-picker/utils/dayjs.ts | 20 +- .../year-and-month-picker/footer.tsx | 14 +- .../year-and-month-picker/header.tsx | 14 +- .../year-and-month-picker/options.tsx | 11 +- web/app/components/base/dialog/index.tsx | 27 +- .../components/base/divider/index.spec.tsx | 1 - web/app/components/base/divider/index.tsx | 37 +- .../base/drawer-plus/index.stories.tsx | 6 +- web/app/components/base/drawer-plus/index.tsx | 21 +- web/app/components/base/drawer/index.spec.tsx | 42 +- .../components/base/drawer/index.stories.tsx | 6 +- web/app/components/base/drawer/index.tsx | 76 +- .../base/dropdown/index.stories.tsx | 9 +- web/app/components/base/dropdown/index.tsx | 38 +- web/app/components/base/effect/index.tsx | 4 +- .../base/emoji-picker/Inner.stories.tsx | 2 +- .../components/base/emoji-picker/Inner.tsx | 219 +- .../base/emoji-picker/index.stories.tsx | 2 +- .../components/base/emoji-picker/index.tsx | 74 +- .../base/encrypted-bottom/index.tsx | 27 +- .../components/base/error-boundary/index.tsx | 59 +- web/app/components/base/features/context.tsx | 8 +- web/app/components/base/features/hooks.ts | 2 +- .../base/features/index.stories.tsx | 2 +- .../annotation-ctrl-button.tsx | 24 +- .../annotation-reply/config-param-modal.tsx | 73 +- .../annotation-reply/config-param.tsx | 10 +- .../annotation-reply/index.tsx | 68 +- .../score-slider/base-slider/index.tsx | 40 +- .../annotation-reply/score-slider/index.tsx | 14 +- .../annotation-reply/use-annotation-config.ts | 11 +- .../features/new-feature-panel/citation.tsx | 25 +- .../conversation-opener/index.tsx | 39 +- .../conversation-opener/modal.tsx | 94 +- .../new-feature-panel/dialog-wrapper.tsx | 12 +- .../new-feature-panel/feature-bar.tsx | 87 +- .../new-feature-panel/feature-card.tsx | 14 +- .../new-feature-panel/file-upload/index.tsx | 49 +- .../file-upload/setting-content.tsx | 33 +- .../file-upload/setting-modal.tsx | 13 +- .../features/new-feature-panel/follow-up.tsx | 25 +- .../new-feature-panel/image-upload/index.tsx | 57 +- .../base/features/new-feature-panel/index.tsx | 65 +- .../moderation/form-generation.tsx | 18 +- .../new-feature-panel/moderation/index.tsx | 71 +- .../moderation/moderation-content.tsx | 36 +- .../moderation/moderation-setting-modal.tsx | 172 +- .../new-feature-panel/more-like-this.tsx | 27 +- .../new-feature-panel/speech-to-text.tsx | 25 +- .../text-to-speech/index.tsx | 61 +- .../text-to-speech/param-config-content.tsx | 123 +- .../text-to-speech/voice-settings.tsx | 8 +- web/app/components/base/features/store.ts | 2 +- web/app/components/base/features/types.ts | 2 +- .../base/file-icon/index.stories.tsx | 5 +- web/app/components/base/file-icon/index.tsx | 2 +- .../base/file-thumb/image-render.tsx | 6 +- web/app/components/base/file-thumb/index.tsx | 37 +- .../base/file-uploader/audio-preview.tsx | 12 +- .../file-from-link-or-local/index.tsx | 55 +- .../base/file-uploader/file-image-render.tsx | 2 +- .../base/file-uploader/file-input.tsx | 8 +- .../base/file-uploader/file-list-in-log.tsx | 35 +- .../base/file-uploader/file-list.stories.tsx | 4 +- .../base/file-uploader/file-type-icon.tsx | 6 +- .../file-uploader-in-attachment/file-item.tsx | 82 +- .../index.stories.tsx | 8 +- .../file-uploader-in-attachment/index.tsx | 36 +- .../file-image-item.tsx | 38 +- .../file-uploader-in-chat-input/file-item.tsx | 48 +- .../file-uploader-in-chat-input/file-list.tsx | 10 +- .../index.stories.tsx | 8 +- .../file-uploader-in-chat-input/index.tsx | 16 +- .../components/base/file-uploader/hooks.ts | 58 +- .../components/base/file-uploader/index.ts | 4 +- .../base/file-uploader/pdf-preview.tsx | 60 +- .../components/base/file-uploader/store.tsx | 20 +- .../base/file-uploader/utils.spec.ts | 123 +- .../components/base/file-uploader/utils.ts | 16 +- .../base/file-uploader/video-preview.tsx | 12 +- .../float-right-container/index.stories.tsx | 2 +- .../base/float-right-container/index.tsx | 2 +- .../base/form/components/base/base-field.tsx | 64 +- .../base/form/components/base/base-form.tsx | 43 +- .../base/form/components/base/index.tsx | 2 +- .../base/form/components/field/checkbox.tsx | 10 +- .../form/components/field/custom-select.tsx | 6 +- .../base/form/components/field/file-types.tsx | 10 +- .../form/components/field/file-uploader.tsx | 12 +- .../field/input-type-select/hooks.tsx | 20 +- .../field/input-type-select/index.tsx | 12 +- .../field/input-type-select/option.tsx | 6 +- .../field/input-type-select/trigger.tsx | 34 +- .../field/mixed-variable-text-input/index.tsx | 4 +- .../mixed-variable-text-input/placeholder.tsx | 19 +- .../form/components/field/number-input.tsx | 10 +- .../form/components/field/number-slider.tsx | 8 +- .../base/form/components/field/options.tsx | 10 +- .../base/form/components/field/select.tsx | 6 +- .../base/form/components/field/text-area.tsx | 10 +- .../base/form/components/field/text.tsx | 9 +- .../form/components/field/upload-method.tsx | 16 +- .../field/variable-or-constant-input.tsx | 20 +- .../components/field/variable-selector.tsx | 12 +- .../base/form/components/form/actions.tsx | 8 +- .../components/base/form/components/label.tsx | 16 +- .../base/form/form-scenarios/auth/index.tsx | 6 +- .../base/form/form-scenarios/base/field.tsx | 10 +- .../base/form/form-scenarios/base/index.tsx | 9 +- .../base/form/form-scenarios/base/types.ts | 6 +- .../base/form/form-scenarios/base/utils.ts | 3 +- .../form-scenarios/demo/contact-fields.tsx | 18 +- .../base/form/form-scenarios/demo/index.tsx | 14 +- .../form/form-scenarios/input-field/field.tsx | 7 +- .../form/form-scenarios/input-field/types.ts | 4 +- .../form/form-scenarios/input-field/utils.ts | 3 +- .../form/form-scenarios/node-panel/field.tsx | 7 +- .../base/form/hooks/use-check-validated.ts | 4 +- .../base/form/hooks/use-get-form-values.ts | 4 +- .../base/form/hooks/use-get-validators.ts | 10 +- .../components/base/form/index.stories.tsx | 15 +- web/app/components/base/form/index.tsx | 20 +- web/app/components/base/form/types.ts | 8 +- .../base/fullscreen-modal/index.tsx | 43 +- web/app/components/base/ga/index.tsx | 12 +- web/app/components/base/grid-mask/index.tsx | 10 +- .../components/base/icons/IconBase.spec.tsx | 13 +- web/app/components/base/icons/IconBase.tsx | 4 +- .../base/icons/icon-gallery.stories.tsx | 19 +- web/app/components/base/icons/script.mjs | 10 +- .../icons/src/image/llm/BaichuanTextCn.tsx | 4 +- .../base/icons/src/image/llm/Minimax.tsx | 4 +- .../base/icons/src/image/llm/MinimaxText.tsx | 4 +- .../base/icons/src/image/llm/Tongyi.tsx | 4 +- .../base/icons/src/image/llm/TongyiText.tsx | 4 +- .../base/icons/src/image/llm/TongyiTextCn.tsx | 4 +- .../base/icons/src/image/llm/Wxyy.tsx | 4 +- .../base/icons/src/image/llm/WxyyText.tsx | 4 +- .../base/icons/src/image/llm/WxyyTextCn.tsx | 4 +- .../base/icons/src/image/llm/index.ts | 10 +- .../base/icons/src/public/avatar/Robot.json | 180 +- .../base/icons/src/public/avatar/Robot.tsx | 8 +- .../base/icons/src/public/avatar/User.json | 174 +- .../base/icons/src/public/avatar/User.tsx | 8 +- .../icons/src/public/billing/ArCube1.json | 54 +- .../base/icons/src/public/billing/ArCube1.tsx | 8 +- .../icons/src/public/billing/Asterisk.json | 72 +- .../icons/src/public/billing/Asterisk.tsx | 8 +- .../public/billing/AwsMarketplaceDark.json | 354 +- .../src/public/billing/AwsMarketplaceDark.tsx | 8 +- .../public/billing/AwsMarketplaceLight.json | 354 +- .../public/billing/AwsMarketplaceLight.tsx | 8 +- .../base/icons/src/public/billing/Azure.json | 382 +- .../base/icons/src/public/billing/Azure.tsx | 8 +- .../icons/src/public/billing/Buildings.json | 74 +- .../icons/src/public/billing/Buildings.tsx | 8 +- .../icons/src/public/billing/Diamond.json | 74 +- .../base/icons/src/public/billing/Diamond.tsx | 8 +- .../icons/src/public/billing/GoogleCloud.json | 128 +- .../icons/src/public/billing/GoogleCloud.tsx | 8 +- .../base/icons/src/public/billing/Group2.json | 54 +- .../base/icons/src/public/billing/Group2.tsx | 8 +- .../icons/src/public/billing/Keyframe.json | 52 +- .../icons/src/public/billing/Keyframe.tsx | 8 +- .../icons/src/public/billing/Sparkles.json | 186 +- .../icons/src/public/billing/Sparkles.tsx | 8 +- .../src/public/billing/SparklesSoft.json | 68 +- .../icons/src/public/billing/SparklesSoft.tsx | 8 +- .../base/icons/src/public/billing/index.ts | 2 +- .../base/icons/src/public/common/D.json | 246 +- .../base/icons/src/public/common/D.tsx | 8 +- .../public/common/DiagonalDividingLine.json | 52 +- .../public/common/DiagonalDividingLine.tsx | 8 +- .../base/icons/src/public/common/Dify.json | 120 +- .../base/icons/src/public/common/Dify.tsx | 8 +- .../base/icons/src/public/common/Gdpr.json | 676 +-- .../base/icons/src/public/common/Gdpr.tsx | 8 +- .../base/icons/src/public/common/Github.json | 68 +- .../base/icons/src/public/common/Github.tsx | 8 +- .../icons/src/public/common/Highlight.json | 130 +- .../icons/src/public/common/Highlight.tsx | 8 +- .../base/icons/src/public/common/Iso.json | 238 +- .../base/icons/src/public/common/Iso.tsx | 8 +- .../base/icons/src/public/common/Line3.json | 52 +- .../base/icons/src/public/common/Line3.tsx | 8 +- .../base/icons/src/public/common/Lock.json | 72 +- .../base/icons/src/public/common/Lock.tsx | 8 +- .../src/public/common/MessageChatSquare.json | 70 +- .../src/public/common/MessageChatSquare.tsx | 8 +- .../src/public/common/MultiPathRetrieval.json | 302 +- .../src/public/common/MultiPathRetrieval.tsx | 8 +- .../src/public/common/NTo1Retrieval.json | 288 +- .../icons/src/public/common/NTo1Retrieval.tsx | 8 +- .../base/icons/src/public/common/Notion.json | 162 +- .../base/icons/src/public/common/Notion.tsx | 8 +- .../base/icons/src/public/common/Soc2.json | 1872 +++---- .../base/icons/src/public/common/Soc2.tsx | 8 +- .../icons/src/public/common/SparklesSoft.json | 90 +- .../icons/src/public/common/SparklesSoft.tsx | 8 +- .../src/public/common/SparklesSoftAccent.json | 68 +- .../src/public/common/SparklesSoftAccent.tsx | 8 +- .../base/icons/src/public/common/index.ts | 4 +- .../icons/src/public/education/Triangle.json | 50 +- .../icons/src/public/education/Triangle.tsx | 8 +- .../base/icons/src/public/files/Csv.json | 358 +- .../base/icons/src/public/files/Csv.tsx | 8 +- .../base/icons/src/public/files/Doc.json | 334 +- .../base/icons/src/public/files/Doc.tsx | 8 +- .../base/icons/src/public/files/Docx.json | 352 +- .../base/icons/src/public/files/Docx.tsx | 8 +- .../base/icons/src/public/files/Html.json | 352 +- .../base/icons/src/public/files/Html.tsx | 8 +- .../base/icons/src/public/files/Json.json | 352 +- .../base/icons/src/public/files/Json.tsx | 8 +- .../base/icons/src/public/files/Md.json | 284 +- .../base/icons/src/public/files/Md.tsx | 8 +- .../base/icons/src/public/files/Pdf.json | 334 +- .../base/icons/src/public/files/Pdf.tsx | 8 +- .../base/icons/src/public/files/Txt.json | 356 +- .../base/icons/src/public/files/Txt.tsx | 8 +- .../base/icons/src/public/files/Unknown.json | 394 +- .../base/icons/src/public/files/Unknown.tsx | 8 +- .../base/icons/src/public/files/Xlsx.json | 286 +- .../base/icons/src/public/files/Xlsx.tsx | 8 +- .../base/icons/src/public/files/Yaml.json | 358 +- .../base/icons/src/public/files/Yaml.tsx | 8 +- .../base/icons/src/public/knowledge/File.json | 70 +- .../base/icons/src/public/knowledge/File.tsx | 8 +- .../knowledge/OptionCardEffectBlue.json | 176 +- .../public/knowledge/OptionCardEffectBlue.tsx | 8 +- .../knowledge/OptionCardEffectBlueLight.json | 176 +- .../knowledge/OptionCardEffectBlueLight.tsx | 8 +- .../knowledge/OptionCardEffectOrange.json | 176 +- .../knowledge/OptionCardEffectOrange.tsx | 8 +- .../knowledge/OptionCardEffectPurple.json | 176 +- .../knowledge/OptionCardEffectPurple.tsx | 8 +- .../knowledge/OptionCardEffectTeal.json | 174 +- .../public/knowledge/OptionCardEffectTeal.tsx | 8 +- .../src/public/knowledge/SelectionMod.json | 228 +- .../src/public/knowledge/SelectionMod.tsx | 8 +- .../src/public/knowledge/Watercrawl.json | 372 +- .../icons/src/public/knowledge/Watercrawl.tsx | 8 +- .../dataset-card/ExternalKnowledgeBase.json | 470 +- .../dataset-card/ExternalKnowledgeBase.tsx | 8 +- .../knowledge/dataset-card/General.json | 910 ++-- .../public/knowledge/dataset-card/General.tsx | 8 +- .../public/knowledge/dataset-card/Graph.json | 2112 ++++---- .../public/knowledge/dataset-card/Graph.tsx | 8 +- .../knowledge/dataset-card/ParentChild.json | 692 +-- .../knowledge/dataset-card/ParentChild.tsx | 8 +- .../src/public/knowledge/dataset-card/Qa.json | 470 +- .../src/public/knowledge/dataset-card/Qa.tsx | 8 +- .../base/icons/src/public/knowledge/index.ts | 2 +- .../knowledge/online-drive/BucketsBlue.json | 1178 ++--- .../knowledge/online-drive/BucketsBlue.tsx | 8 +- .../knowledge/online-drive/BucketsGray.json | 1178 ++--- .../knowledge/online-drive/BucketsGray.tsx | 8 +- .../public/knowledge/online-drive/Folder.json | 2176 ++++----- .../public/knowledge/online-drive/Folder.tsx | 8 +- .../base/icons/src/public/llm/Anthropic.json | 70 +- .../base/icons/src/public/llm/Anthropic.tsx | 8 +- .../icons/src/public/llm/AnthropicDark.json | 2088 ++++---- .../icons/src/public/llm/AnthropicDark.tsx | 8 +- .../icons/src/public/llm/AnthropicLight.json | 2088 ++++---- .../icons/src/public/llm/AnthropicLight.tsx | 8 +- .../icons/src/public/llm/AnthropicText.json | 1074 ++-- .../icons/src/public/llm/AnthropicText.tsx | 8 +- .../src/public/llm/AzureOpenaiService.json | 144 +- .../src/public/llm/AzureOpenaiService.tsx | 8 +- .../public/llm/AzureOpenaiServiceText.json | 468 +- .../src/public/llm/AzureOpenaiServiceText.tsx | 8 +- .../base/icons/src/public/llm/Azureai.json | 356 +- .../base/icons/src/public/llm/Azureai.tsx | 8 +- .../icons/src/public/llm/AzureaiText.json | 482 +- .../base/icons/src/public/llm/AzureaiText.tsx | 8 +- .../base/icons/src/public/llm/Baichuan.json | 148 +- .../base/icons/src/public/llm/Baichuan.tsx | 8 +- .../icons/src/public/llm/BaichuanText.json | 308 +- .../icons/src/public/llm/BaichuanText.tsx | 8 +- .../base/icons/src/public/llm/Chatglm.json | 140 +- .../base/icons/src/public/llm/Chatglm.tsx | 8 +- .../icons/src/public/llm/ChatglmText.json | 266 +- .../base/icons/src/public/llm/ChatglmText.tsx | 8 +- .../base/icons/src/public/llm/Cohere.json | 220 +- .../base/icons/src/public/llm/Cohere.tsx | 8 +- .../base/icons/src/public/llm/CohereText.json | 176 +- .../base/icons/src/public/llm/CohereText.tsx | 8 +- .../base/icons/src/public/llm/Gpt3.json | 98 +- .../base/icons/src/public/llm/Gpt3.tsx | 8 +- .../base/icons/src/public/llm/Gpt4.json | 98 +- .../base/icons/src/public/llm/Gpt4.tsx | 8 +- .../icons/src/public/llm/Huggingface.json | 312 +- .../base/icons/src/public/llm/Huggingface.tsx | 8 +- .../icons/src/public/llm/HuggingfaceText.json | 640 +-- .../icons/src/public/llm/HuggingfaceText.tsx | 8 +- .../src/public/llm/HuggingfaceTextHub.json | 696 +-- .../src/public/llm/HuggingfaceTextHub.tsx | 8 +- .../icons/src/public/llm/IflytekSpark.json | 84 +- .../icons/src/public/llm/IflytekSpark.tsx | 8 +- .../src/public/llm/IflytekSparkText.json | 370 +- .../icons/src/public/llm/IflytekSparkText.tsx | 8 +- .../src/public/llm/IflytekSparkTextCn.json | 192 +- .../src/public/llm/IflytekSparkTextCn.tsx | 8 +- .../base/icons/src/public/llm/Jina.json | 66 +- .../base/icons/src/public/llm/Jina.tsx | 8 +- .../base/icons/src/public/llm/JinaText.json | 160 +- .../base/icons/src/public/llm/JinaText.tsx | 8 +- .../base/icons/src/public/llm/Localai.json | 210 +- .../base/icons/src/public/llm/Localai.tsx | 8 +- .../icons/src/public/llm/LocalaiText.json | 336 +- .../base/icons/src/public/llm/LocalaiText.tsx | 8 +- .../base/icons/src/public/llm/Microsoft.json | 148 +- .../base/icons/src/public/llm/Microsoft.tsx | 8 +- .../icons/src/public/llm/OpenaiBlack.json | 70 +- .../base/icons/src/public/llm/OpenaiBlack.tsx | 8 +- .../base/icons/src/public/llm/OpenaiBlue.json | 70 +- .../base/icons/src/public/llm/OpenaiBlue.tsx | 8 +- .../icons/src/public/llm/OpenaiGreen.json | 70 +- .../base/icons/src/public/llm/OpenaiGreen.tsx | 8 +- .../base/icons/src/public/llm/OpenaiTeal.json | 70 +- .../base/icons/src/public/llm/OpenaiTeal.tsx | 8 +- .../base/icons/src/public/llm/OpenaiText.json | 150 +- .../base/icons/src/public/llm/OpenaiText.tsx | 8 +- .../src/public/llm/OpenaiTransparent.json | 48 +- .../src/public/llm/OpenaiTransparent.tsx | 8 +- .../icons/src/public/llm/OpenaiViolet.json | 70 +- .../icons/src/public/llm/OpenaiViolet.tsx | 8 +- .../icons/src/public/llm/OpenaiYellow.json | 70 +- .../icons/src/public/llm/OpenaiYellow.tsx | 8 +- .../base/icons/src/public/llm/Openllm.json | 162 +- .../base/icons/src/public/llm/Openllm.tsx | 8 +- .../icons/src/public/llm/OpenllmText.json | 282 +- .../base/icons/src/public/llm/OpenllmText.tsx | 8 +- .../base/icons/src/public/llm/Replicate.json | 74 +- .../base/icons/src/public/llm/Replicate.tsx | 8 +- .../icons/src/public/llm/ReplicateText.json | 228 +- .../icons/src/public/llm/ReplicateText.tsx | 8 +- .../src/public/llm/XorbitsInference.json | 348 +- .../icons/src/public/llm/XorbitsInference.tsx | 8 +- .../src/public/llm/XorbitsInferenceText.json | 654 +-- .../src/public/llm/XorbitsInferenceText.tsx | 8 +- .../base/icons/src/public/llm/Zhipuai.json | 102 +- .../base/icons/src/public/llm/Zhipuai.tsx | 8 +- .../icons/src/public/llm/ZhipuaiText.json | 84 +- .../base/icons/src/public/llm/ZhipuaiText.tsx | 8 +- .../icons/src/public/llm/ZhipuaiTextCn.json | 120 +- .../icons/src/public/llm/ZhipuaiTextCn.tsx | 8 +- .../base/icons/src/public/llm/index.ts | 36 +- .../base/icons/src/public/model/Checked.json | 54 +- .../base/icons/src/public/model/Checked.tsx | 8 +- .../src/public/other/DefaultToolIcon.json | 158 +- .../src/public/other/DefaultToolIcon.tsx | 8 +- .../icons/src/public/other/Icon3Dots.json | 54 +- .../base/icons/src/public/other/Icon3Dots.tsx | 8 +- .../icons/src/public/other/Message3Fill.json | 342 +- .../icons/src/public/other/Message3Fill.tsx | 8 +- .../icons/src/public/other/RowStruct.json | 108 +- .../base/icons/src/public/other/RowStruct.tsx | 8 +- .../base/icons/src/public/other/index.ts | 2 +- .../base/icons/src/public/plugins/Google.json | 102 +- .../base/icons/src/public/plugins/Google.tsx | 8 +- .../icons/src/public/plugins/PartnerDark.json | 890 ++-- .../icons/src/public/plugins/PartnerDark.tsx | 8 +- .../src/public/plugins/PartnerLight.json | 888 ++-- .../icons/src/public/plugins/PartnerLight.tsx | 8 +- .../src/public/plugins/VerifiedDark.json | 910 ++-- .../icons/src/public/plugins/VerifiedDark.tsx | 8 +- .../src/public/plugins/VerifiedLight.json | 908 ++-- .../src/public/plugins/VerifiedLight.tsx | 8 +- .../icons/src/public/plugins/WebReader.json | 74 +- .../icons/src/public/plugins/WebReader.tsx | 8 +- .../icons/src/public/plugins/Wikipedia.json | 48 +- .../icons/src/public/plugins/Wikipedia.tsx | 8 +- .../icons/src/public/thought/DataSet.json | 124 +- .../base/icons/src/public/thought/DataSet.tsx | 8 +- .../icons/src/public/thought/Loading.json | 124 +- .../base/icons/src/public/thought/Loading.tsx | 8 +- .../base/icons/src/public/thought/Search.json | 124 +- .../base/icons/src/public/thought/Search.tsx | 8 +- .../icons/src/public/thought/ThoughtList.json | 162 +- .../icons/src/public/thought/ThoughtList.tsx | 8 +- .../icons/src/public/thought/WebReader.json | 124 +- .../icons/src/public/thought/WebReader.tsx | 8 +- .../icons/src/public/tracing/AliyunIcon.json | 254 +- .../icons/src/public/tracing/AliyunIcon.tsx | 8 +- .../src/public/tracing/AliyunIconBig.json | 152 +- .../src/public/tracing/AliyunIconBig.tsx | 8 +- .../icons/src/public/tracing/ArizeIcon.json | 240 +- .../icons/src/public/tracing/ArizeIcon.tsx | 8 +- .../src/public/tracing/ArizeIconBig.json | 240 +- .../icons/src/public/tracing/ArizeIconBig.tsx | 8 +- .../src/public/tracing/DatabricksIcon.json | 266 +- .../src/public/tracing/DatabricksIcon.tsx | 8 +- .../src/public/tracing/DatabricksIconBig.json | 266 +- .../src/public/tracing/DatabricksIconBig.tsx | 8 +- .../src/public/tracing/LangfuseIcon.json | 468 +- .../icons/src/public/tracing/LangfuseIcon.tsx | 8 +- .../src/public/tracing/LangfuseIconBig.json | 468 +- .../src/public/tracing/LangfuseIconBig.tsx | 8 +- .../src/public/tracing/LangsmithIcon.json | 372 +- .../src/public/tracing/LangsmithIcon.tsx | 8 +- .../src/public/tracing/LangsmithIconBig.json | 372 +- .../src/public/tracing/LangsmithIconBig.tsx | 8 +- .../icons/src/public/tracing/MlflowIcon.json | 212 +- .../icons/src/public/tracing/MlflowIcon.tsx | 8 +- .../src/public/tracing/MlflowIconBig.json | 212 +- .../src/public/tracing/MlflowIconBig.tsx | 8 +- .../icons/src/public/tracing/OpikIcon.json | 322 +- .../icons/src/public/tracing/OpikIcon.tsx | 8 +- .../icons/src/public/tracing/OpikIconBig.json | 320 +- .../icons/src/public/tracing/OpikIconBig.tsx | 8 +- .../icons/src/public/tracing/PhoenixIcon.json | 1702 +++---- .../icons/src/public/tracing/PhoenixIcon.tsx | 8 +- .../src/public/tracing/PhoenixIconBig.json | 1702 +++---- .../src/public/tracing/PhoenixIconBig.tsx | 8 +- .../icons/src/public/tracing/TencentIcon.tsx | 8 +- .../src/public/tracing/TencentIconBig.tsx | 8 +- .../icons/src/public/tracing/TracingIcon.json | 90 +- .../icons/src/public/tracing/TracingIcon.tsx | 8 +- .../icons/src/public/tracing/WeaveIcon.json | 554 +-- .../icons/src/public/tracing/WeaveIcon.tsx | 8 +- .../src/public/tracing/WeaveIconBig.json | 554 +-- .../icons/src/public/tracing/WeaveIconBig.tsx | 8 +- .../base/icons/src/public/tracing/index.ts | 22 +- .../icons/src/vender/features/Citations.json | 48 +- .../icons/src/vender/features/Citations.tsx | 8 +- .../vender/features/ContentModeration.json | 52 +- .../src/vender/features/ContentModeration.tsx | 8 +- .../icons/src/vender/features/Document.json | 42 +- .../icons/src/vender/features/Document.tsx | 8 +- .../src/vender/features/FolderUpload.json | 48 +- .../src/vender/features/FolderUpload.tsx | 8 +- .../src/vender/features/LoveMessage.json | 48 +- .../icons/src/vender/features/LoveMessage.tsx | 8 +- .../src/vender/features/MessageFast.json | 52 +- .../icons/src/vender/features/MessageFast.tsx | 8 +- .../src/vender/features/Microphone01.json | 70 +- .../src/vender/features/Microphone01.tsx | 8 +- .../src/vender/features/TextToAudio.json | 150 +- .../icons/src/vender/features/TextToAudio.tsx | 8 +- .../src/vender/features/VirtualAssistant.json | 66 +- .../src/vender/features/VirtualAssistant.tsx | 8 +- .../icons/src/vender/features/Vision.json | 52 +- .../base/icons/src/vender/features/Vision.tsx | 8 +- .../icons/src/vender/knowledge/AddChunks.json | 138 +- .../icons/src/vender/knowledge/AddChunks.tsx | 8 +- .../src/vender/knowledge/ApiAggregate.json | 48 +- .../src/vender/knowledge/ApiAggregate.tsx | 8 +- .../src/vender/knowledge/ArrowShape.json | 50 +- .../icons/src/vender/knowledge/ArrowShape.tsx | 8 +- .../icons/src/vender/knowledge/Chunk.json | 228 +- .../base/icons/src/vender/knowledge/Chunk.tsx | 8 +- .../icons/src/vender/knowledge/Collapse.json | 120 +- .../icons/src/vender/knowledge/Collapse.tsx | 8 +- .../icons/src/vender/knowledge/Divider.json | 54 +- .../icons/src/vender/knowledge/Divider.tsx | 8 +- .../icons/src/vender/knowledge/Economic.json | 106 +- .../icons/src/vender/knowledge/Economic.tsx | 8 +- .../src/vender/knowledge/FullTextSearch.json | 108 +- .../src/vender/knowledge/FullTextSearch.tsx | 8 +- .../src/vender/knowledge/GeneralChunk.json | 198 +- .../src/vender/knowledge/GeneralChunk.tsx | 8 +- .../src/vender/knowledge/HighQuality.json | 68 +- .../src/vender/knowledge/HighQuality.tsx | 8 +- .../src/vender/knowledge/HybridSearch.json | 228 +- .../src/vender/knowledge/HybridSearch.tsx | 8 +- .../vender/knowledge/ParentChildChunk.json | 120 +- .../src/vender/knowledge/ParentChildChunk.tsx | 8 +- .../vender/knowledge/QuestionAndAnswer.json | 72 +- .../vender/knowledge/QuestionAndAnswer.tsx | 8 +- .../src/vender/knowledge/SearchMenu.json | 150 +- .../icons/src/vender/knowledge/SearchMenu.tsx | 8 +- .../src/vender/knowledge/VectorSearch.json | 228 +- .../src/vender/knowledge/VectorSearch.tsx | 8 +- .../line/alertsAndFeedback/AlertTriangle.json | 74 +- .../line/alertsAndFeedback/AlertTriangle.tsx | 8 +- .../line/alertsAndFeedback/ThumbsDown.json | 128 +- .../line/alertsAndFeedback/ThumbsDown.tsx | 8 +- .../line/alertsAndFeedback/ThumbsUp.json | 128 +- .../line/alertsAndFeedback/ThumbsUp.tsx | 8 +- .../line/alertsAndFeedback/Warning.json | 48 +- .../vender/line/alertsAndFeedback/Warning.tsx | 8 +- .../vender/line/arrows/ArrowNarrowLeft.json | 54 +- .../vender/line/arrows/ArrowNarrowLeft.tsx | 8 +- .../src/vender/line/arrows/ArrowUpRight.json | 74 +- .../src/vender/line/arrows/ArrowUpRight.tsx | 8 +- .../vender/line/arrows/ChevronDownDouble.json | 74 +- .../vender/line/arrows/ChevronDownDouble.tsx | 8 +- .../src/vender/line/arrows/ChevronRight.json | 74 +- .../src/vender/line/arrows/ChevronRight.tsx | 8 +- .../line/arrows/ChevronSelectorVertical.json | 54 +- .../line/arrows/ChevronSelectorVertical.tsx | 8 +- .../icons/src/vender/line/arrows/IconR.json | 48 +- .../icons/src/vender/line/arrows/IconR.tsx | 8 +- .../src/vender/line/arrows/RefreshCcw01.json | 54 +- .../src/vender/line/arrows/RefreshCcw01.tsx | 8 +- .../src/vender/line/arrows/RefreshCw05.json | 54 +- .../src/vender/line/arrows/RefreshCw05.tsx | 8 +- .../src/vender/line/arrows/ReverseLeft.json | 74 +- .../src/vender/line/arrows/ReverseLeft.tsx | 8 +- .../icons/src/vender/line/arrows/index.ts | 2 +- .../src/vender/line/communication/AiText.json | 74 +- .../src/vender/line/communication/AiText.tsx | 8 +- .../vender/line/communication/ChatBot.json | 182 +- .../src/vender/line/communication/ChatBot.tsx | 8 +- .../line/communication/ChatBotSlim.json | 132 +- .../vender/line/communication/ChatBotSlim.tsx | 8 +- .../vender/line/communication/CuteRobot.json | 74 +- .../vender/line/communication/CuteRobot.tsx | 8 +- .../communication/MessageCheckRemove.json | 74 +- .../line/communication/MessageCheckRemove.tsx | 8 +- .../line/communication/MessageFastPlus.json | 54 +- .../line/communication/MessageFastPlus.tsx | 8 +- .../src/vender/line/communication/index.ts | 2 +- .../line/development/ArtificialBrain.json | 54 +- .../line/development/ArtificialBrain.tsx | 8 +- .../line/development/BarChartSquare02.json | 74 +- .../line/development/BarChartSquare02.tsx | 8 +- .../vender/line/development/BracketsX.json | 54 +- .../src/vender/line/development/BracketsX.tsx | 8 +- .../vender/line/development/CodeBrowser.json | 74 +- .../vender/line/development/CodeBrowser.tsx | 8 +- .../vender/line/development/Container.json | 54 +- .../src/vender/line/development/Container.tsx | 8 +- .../vender/line/development/Database01.json | 54 +- .../vender/line/development/Database01.tsx | 8 +- .../vender/line/development/Database03.json | 54 +- .../vender/line/development/Database03.tsx | 8 +- .../vender/line/development/FileHeart02.json | 100 +- .../vender/line/development/FileHeart02.tsx | 8 +- .../vender/line/development/GitBranch01.json | 74 +- .../vender/line/development/GitBranch01.tsx | 8 +- .../line/development/PromptEngineering.json | 126 +- .../line/development/PromptEngineering.tsx | 8 +- .../line/development/PuzzlePiece01.json | 128 +- .../vender/line/development/PuzzlePiece01.tsx | 8 +- .../line/development/TerminalSquare.json | 74 +- .../line/development/TerminalSquare.tsx | 8 +- .../src/vender/line/development/Variable.json | 120 +- .../src/vender/line/development/Variable.tsx | 8 +- .../src/vender/line/development/Webhooks.json | 174 +- .../src/vender/line/development/Webhooks.tsx | 8 +- .../src/vender/line/editor/AlignLeft.json | 74 +- .../src/vender/line/editor/AlignLeft.tsx | 8 +- .../src/vender/line/editor/BezierCurve03.json | 72 +- .../src/vender/line/editor/BezierCurve03.tsx | 8 +- .../src/vender/line/editor/Collapse.json | 120 +- .../icons/src/vender/line/editor/Collapse.tsx | 8 +- .../icons/src/vender/line/editor/Colors.json | 74 +- .../icons/src/vender/line/editor/Colors.tsx | 8 +- .../vender/line/editor/ImageIndentLeft.json | 74 +- .../vender/line/editor/ImageIndentLeft.tsx | 8 +- .../src/vender/line/editor/LeftIndent02.json | 54 +- .../src/vender/line/editor/LeftIndent02.tsx | 8 +- .../vender/line/editor/LetterSpacing01.json | 74 +- .../vender/line/editor/LetterSpacing01.tsx | 8 +- .../src/vender/line/editor/TypeSquare.json | 72 +- .../src/vender/line/editor/TypeSquare.tsx | 8 +- .../src/vender/line/education/BookOpen01.json | 94 +- .../src/vender/line/education/BookOpen01.tsx | 8 +- .../icons/src/vender/line/files/Copy.json | 54 +- .../base/icons/src/vender/line/files/Copy.tsx | 8 +- .../src/vender/line/files/CopyCheck.json | 54 +- .../icons/src/vender/line/files/CopyCheck.tsx | 8 +- .../icons/src/vender/line/files/File02.json | 74 +- .../icons/src/vender/line/files/File02.tsx | 8 +- .../src/vender/line/files/FileArrow01.json | 74 +- .../src/vender/line/files/FileArrow01.tsx | 8 +- .../src/vender/line/files/FileCheck02.json | 74 +- .../src/vender/line/files/FileCheck02.tsx | 8 +- .../src/vender/line/files/FileDownload02.json | 54 +- .../src/vender/line/files/FileDownload02.tsx | 8 +- .../src/vender/line/files/FilePlus01.json | 74 +- .../src/vender/line/files/FilePlus01.tsx | 8 +- .../src/vender/line/files/FilePlus02.json | 54 +- .../src/vender/line/files/FilePlus02.tsx | 8 +- .../icons/src/vender/line/files/FileText.json | 74 +- .../icons/src/vender/line/files/FileText.tsx | 8 +- .../src/vender/line/files/FileUpload.json | 100 +- .../src/vender/line/files/FileUpload.tsx | 8 +- .../icons/src/vender/line/files/Folder.json | 74 +- .../icons/src/vender/line/files/Folder.tsx | 8 +- .../base/icons/src/vender/line/files/index.ts | 2 +- .../line/financeAndECommerce/Balance.json | 54 +- .../line/financeAndECommerce/Balance.tsx | 8 +- .../financeAndECommerce/CoinsStacked01.json | 74 +- .../financeAndECommerce/CoinsStacked01.tsx | 8 +- .../line/financeAndECommerce/GoldCoin.json | 236 +- .../line/financeAndECommerce/GoldCoin.tsx | 8 +- .../line/financeAndECommerce/ReceiptList.json | 54 +- .../line/financeAndECommerce/ReceiptList.tsx | 8 +- .../line/financeAndECommerce/Tag01.json | 128 +- .../vender/line/financeAndECommerce/Tag01.tsx | 8 +- .../line/financeAndECommerce/Tag03.json | 74 +- .../vender/line/financeAndECommerce/Tag03.tsx | 8 +- .../icons/src/vender/line/general/AtSign.json | 128 +- .../icons/src/vender/line/general/AtSign.tsx | 8 +- .../src/vender/line/general/Bookmark.json | 54 +- .../src/vender/line/general/Bookmark.tsx | 8 +- .../icons/src/vender/line/general/Check.json | 74 +- .../icons/src/vender/line/general/Check.tsx | 8 +- .../src/vender/line/general/CheckDone01.json | 74 +- .../src/vender/line/general/CheckDone01.tsx | 8 +- .../vender/line/general/ChecklistSquare.json | 68 +- .../vender/line/general/ChecklistSquare.tsx | 8 +- .../vender/line/general/CodeAssistant.json | 102 +- .../src/vender/line/general/CodeAssistant.tsx | 8 +- .../src/vender/line/general/DotsGrid.json | 264 +- .../src/vender/line/general/DotsGrid.tsx | 8 +- .../icons/src/vender/line/general/Edit02.json | 128 +- .../icons/src/vender/line/general/Edit02.tsx | 8 +- .../icons/src/vender/line/general/Edit04.json | 54 +- .../icons/src/vender/line/general/Edit04.tsx | 8 +- .../icons/src/vender/line/general/Edit05.json | 128 +- .../icons/src/vender/line/general/Edit05.tsx | 8 +- .../icons/src/vender/line/general/Hash02.json | 72 +- .../icons/src/vender/line/general/Hash02.tsx | 8 +- .../src/vender/line/general/InfoCircle.json | 128 +- .../src/vender/line/general/InfoCircle.tsx | 8 +- .../icons/src/vender/line/general/Link03.json | 110 +- .../icons/src/vender/line/general/Link03.tsx | 8 +- .../vender/line/general/LinkExternal02.json | 72 +- .../vender/line/general/LinkExternal02.tsx | 8 +- .../src/vender/line/general/LogIn04.json | 102 +- .../icons/src/vender/line/general/LogIn04.tsx | 8 +- .../src/vender/line/general/LogOut01.json | 74 +- .../src/vender/line/general/LogOut01.tsx | 8 +- .../src/vender/line/general/LogOut04.json | 102 +- .../src/vender/line/general/LogOut04.tsx | 8 +- .../src/vender/line/general/MagicEdit.json | 106 +- .../src/vender/line/general/MagicEdit.tsx | 8 +- .../icons/src/vender/line/general/Menu01.json | 74 +- .../icons/src/vender/line/general/Menu01.tsx | 8 +- .../icons/src/vender/line/general/Pin01.json | 74 +- .../icons/src/vender/line/general/Pin01.tsx | 8 +- .../icons/src/vender/line/general/Pin02.json | 54 +- .../icons/src/vender/line/general/Pin02.tsx | 8 +- .../icons/src/vender/line/general/Plus02.json | 74 +- .../icons/src/vender/line/general/Plus02.tsx | 8 +- .../src/vender/line/general/Refresh.json | 42 +- .../icons/src/vender/line/general/Refresh.tsx | 8 +- .../src/vender/line/general/SearchMenu.json | 150 +- .../src/vender/line/general/SearchMenu.tsx | 8 +- .../src/vender/line/general/Settings01.json | 168 +- .../src/vender/line/general/Settings01.tsx | 8 +- .../src/vender/line/general/Settings04.json | 74 +- .../src/vender/line/general/Settings04.tsx | 8 +- .../src/vender/line/general/Target04.json | 126 +- .../src/vender/line/general/Target04.tsx | 8 +- .../src/vender/line/general/Upload03.json | 128 +- .../src/vender/line/general/Upload03.tsx | 8 +- .../vender/line/general/UploadCloud01.json | 80 +- .../src/vender/line/general/UploadCloud01.tsx | 8 +- .../base/icons/src/vender/line/general/X.json | 74 +- .../base/icons/src/vender/line/general/X.tsx | 8 +- .../icons/src/vender/line/general/index.ts | 2 +- .../src/vender/line/images/ImagePlus.json | 74 +- .../src/vender/line/images/ImagePlus.tsx | 8 +- .../src/vender/line/layout/AlignLeft01.json | 74 +- .../src/vender/line/layout/AlignLeft01.tsx | 8 +- .../src/vender/line/layout/AlignRight01.json | 74 +- .../src/vender/line/layout/AlignRight01.tsx | 8 +- .../icons/src/vender/line/layout/Grid01.json | 162 +- .../icons/src/vender/line/layout/Grid01.tsx | 8 +- .../src/vender/line/layout/LayoutGrid02.json | 54 +- .../src/vender/line/layout/LayoutGrid02.tsx | 8 +- .../line/mediaAndDevices/Microphone01.json | 74 +- .../line/mediaAndDevices/Microphone01.tsx | 8 +- .../line/mediaAndDevices/PlayCircle.json | 168 +- .../line/mediaAndDevices/PlayCircle.tsx | 8 +- .../vender/line/mediaAndDevices/SlidersH.json | 54 +- .../vender/line/mediaAndDevices/SlidersH.tsx | 8 +- .../vender/line/mediaAndDevices/Speaker.json | 220 +- .../vender/line/mediaAndDevices/Speaker.tsx | 8 +- .../src/vender/line/mediaAndDevices/Stop.json | 128 +- .../src/vender/line/mediaAndDevices/Stop.tsx | 8 +- .../line/mediaAndDevices/StopCircle.json | 114 +- .../line/mediaAndDevices/StopCircle.tsx | 8 +- .../src/vender/line/mediaAndDevices/index.ts | 2 +- .../icons/src/vender/line/others/BubbleX.json | 110 +- .../icons/src/vender/line/others/BubbleX.tsx | 8 +- .../icons/src/vender/line/others/Colors.json | 128 +- .../icons/src/vender/line/others/Colors.tsx | 8 +- .../src/vender/line/others/DragHandle.json | 72 +- .../src/vender/line/others/DragHandle.tsx | 8 +- .../icons/src/vender/line/others/Env.json | 176 +- .../base/icons/src/vender/line/others/Env.tsx | 8 +- .../vender/line/others/GlobalVariable.json | 52 +- .../src/vender/line/others/GlobalVariable.tsx | 8 +- .../src/vender/line/others/Icon3Dots.json | 74 +- .../src/vender/line/others/Icon3Dots.tsx | 8 +- .../src/vender/line/others/LongArrowLeft.json | 50 +- .../src/vender/line/others/LongArrowLeft.tsx | 8 +- .../vender/line/others/LongArrowRight.json | 50 +- .../src/vender/line/others/LongArrowRight.tsx | 8 +- .../src/vender/line/others/SearchMenu.json | 150 +- .../src/vender/line/others/SearchMenu.tsx | 8 +- .../icons/src/vender/line/others/Tools.json | 234 +- .../icons/src/vender/line/others/Tools.tsx | 8 +- .../src/vender/line/shapes/CubeOutline.json | 192 +- .../src/vender/line/shapes/CubeOutline.tsx | 8 +- .../vender/line/time/ClockFastForward.json | 54 +- .../src/vender/line/time/ClockFastForward.tsx | 8 +- .../icons/src/vender/line/time/ClockPlay.json | 128 +- .../icons/src/vender/line/time/ClockPlay.tsx | 8 +- .../src/vender/line/time/ClockPlaySlim.json | 74 +- .../src/vender/line/time/ClockPlaySlim.tsx | 8 +- .../src/vender/line/time/ClockRefresh.json | 120 +- .../src/vender/line/time/ClockRefresh.tsx | 8 +- .../base/icons/src/vender/line/time/index.ts | 2 +- .../icons/src/vender/line/users/User01.json | 74 +- .../icons/src/vender/line/users/User01.tsx | 8 +- .../icons/src/vender/line/users/Users01.json | 74 +- .../icons/src/vender/line/users/Users01.tsx | 8 +- .../src/vender/line/weather/Stars02.json | 54 +- .../icons/src/vender/line/weather/Stars02.tsx | 8 +- .../icons/src/vender/other/AnthropicText.json | 1074 ++-- .../icons/src/vender/other/AnthropicText.tsx | 8 +- .../icons/src/vender/other/Generator.json | 70 +- .../base/icons/src/vender/other/Generator.tsx | 8 +- .../base/icons/src/vender/other/Group.json | 128 +- .../base/icons/src/vender/other/Group.tsx | 8 +- .../src/vender/other/HourglassShape.json | 50 +- .../icons/src/vender/other/HourglassShape.tsx | 8 +- .../base/icons/src/vender/other/Mcp.json | 66 +- .../base/icons/src/vender/other/Mcp.tsx | 8 +- .../src/vender/other/NoToolPlaceholder.json | 554 +-- .../src/vender/other/NoToolPlaceholder.tsx | 8 +- .../base/icons/src/vender/other/Openai.json | 156 +- .../base/icons/src/vender/other/Openai.tsx | 8 +- .../icons/src/vender/other/ReplayLine.json | 68 +- .../icons/src/vender/other/ReplayLine.tsx | 8 +- .../src/vender/other/SquareChecklist.json | 48 +- .../src/vender/other/SquareChecklist.tsx | 8 +- .../icons/src/vender/pipeline/InputField.json | 124 +- .../icons/src/vender/pipeline/InputField.tsx | 8 +- .../src/vender/pipeline/PipelineFill.json | 156 +- .../src/vender/pipeline/PipelineFill.tsx | 8 +- .../src/vender/pipeline/PipelineLine.json | 68 +- .../src/vender/pipeline/PipelineLine.tsx | 8 +- .../src/vender/plugin/BoxSparkleFill.json | 128 +- .../src/vender/plugin/BoxSparkleFill.tsx | 8 +- .../icons/src/vender/plugin/LeftCorner.json | 50 +- .../icons/src/vender/plugin/LeftCorner.tsx | 8 +- .../base/icons/src/vender/plugin/Trigger.json | 142 +- .../base/icons/src/vender/plugin/Trigger.tsx | 8 +- .../solid/FinanceAndECommerce/GoldCoin.json | 48 +- .../solid/FinanceAndECommerce/GoldCoin.tsx | 8 +- .../solid/FinanceAndECommerce/Scales02.json | 92 +- .../solid/FinanceAndECommerce/Scales02.tsx | 8 +- .../alertsAndFeedback/AlertTriangle.json | 72 +- .../solid/alertsAndFeedback/AlertTriangle.tsx | 8 +- .../solid/arrows/ArrowDownDoubleLine.json | 48 +- .../solid/arrows/ArrowDownDoubleLine.tsx | 8 +- .../solid/arrows/ArrowDownRoundFill.json | 50 +- .../solid/arrows/ArrowDownRoundFill.tsx | 8 +- .../solid/arrows/ArrowUpDoubleLine.json | 48 +- .../vender/solid/arrows/ArrowUpDoubleLine.tsx | 8 +- .../src/vender/solid/arrows/ChevronDown.json | 74 +- .../src/vender/solid/arrows/ChevronDown.tsx | 8 +- .../src/vender/solid/arrows/HighPriority.json | 102 +- .../src/vender/solid/arrows/HighPriority.tsx | 8 +- .../vender/solid/communication/AiText.json | 102 +- .../src/vender/solid/communication/AiText.tsx | 8 +- .../solid/communication/BubbleTextMod.json | 52 +- .../solid/communication/BubbleTextMod.tsx | 8 +- .../vender/solid/communication/ChatBot.json | 112 +- .../vender/solid/communication/ChatBot.tsx | 8 +- .../vender/solid/communication/CuteRobot.json | 72 +- .../vender/solid/communication/CuteRobot.tsx | 8 +- .../vender/solid/communication/EditList.json | 102 +- .../vender/solid/communication/EditList.tsx | 8 +- .../solid/communication/ListSparkle.json | 102 +- .../solid/communication/ListSparkle.tsx | 8 +- .../src/vender/solid/communication/Logic.json | 102 +- .../src/vender/solid/communication/Logic.tsx | 8 +- .../communication/MessageDotsCircle.json | 72 +- .../solid/communication/MessageDotsCircle.tsx | 8 +- .../solid/communication/MessageFast.json | 52 +- .../solid/communication/MessageFast.tsx | 8 +- .../communication/MessageHeartCircle.json | 72 +- .../communication/MessageHeartCircle.tsx | 8 +- .../communication/MessageSmileSquare.json | 72 +- .../communication/MessageSmileSquare.tsx | 8 +- .../vender/solid/communication/Send03.json | 68 +- .../src/vender/solid/communication/Send03.tsx | 8 +- .../solid/development/ApiConnection.json | 102 +- .../solid/development/ApiConnection.tsx | 8 +- .../solid/development/ApiConnectionMod.json | 72 +- .../solid/development/ApiConnectionMod.tsx | 8 +- .../solid/development/BarChartSquare02.json | 72 +- .../solid/development/BarChartSquare02.tsx | 8 +- .../vender/solid/development/Container.json | 84 +- .../vender/solid/development/Container.tsx | 8 +- .../vender/solid/development/Database02.json | 88 +- .../vender/solid/development/Database02.tsx | 8 +- .../vender/solid/development/Database03.json | 52 +- .../vender/solid/development/Database03.tsx | 8 +- .../vender/solid/development/FileHeart02.json | 96 +- .../vender/solid/development/FileHeart02.tsx | 8 +- .../solid/development/PatternRecognition.json | 192 +- .../solid/development/PatternRecognition.tsx | 8 +- .../solid/development/PromptEngineering.json | 102 +- .../solid/development/PromptEngineering.tsx | 8 +- .../solid/development/PuzzlePiece01.json | 72 +- .../solid/development/PuzzlePiece01.tsx | 8 +- .../vender/solid/development/Semantic.json | 102 +- .../src/vender/solid/development/Semantic.tsx | 8 +- .../solid/development/TerminalSquare.json | 72 +- .../solid/development/TerminalSquare.tsx | 8 +- .../vender/solid/development/Variable02.json | 120 +- .../vender/solid/development/Variable02.tsx | 8 +- .../src/vender/solid/development/index.ts | 2 +- .../src/vender/solid/editor/Brush01.json | 66 +- .../icons/src/vender/solid/editor/Brush01.tsx | 8 +- .../src/vender/solid/editor/Citations.json | 68 +- .../src/vender/solid/editor/Citations.tsx | 8 +- .../icons/src/vender/solid/editor/Colors.json | 120 +- .../icons/src/vender/solid/editor/Colors.tsx | 8 +- .../src/vender/solid/editor/Paragraph.json | 84 +- .../src/vender/solid/editor/Paragraph.tsx | 8 +- .../src/vender/solid/editor/TypeSquare.json | 52 +- .../src/vender/solid/editor/TypeSquare.tsx | 8 +- .../src/vender/solid/education/Beaker02.json | 72 +- .../src/vender/solid/education/Beaker02.tsx | 8 +- .../vender/solid/education/BubbleText.json | 72 +- .../src/vender/solid/education/BubbleText.tsx | 8 +- .../src/vender/solid/education/Heart02.json | 48 +- .../src/vender/solid/education/Heart02.tsx | 8 +- .../src/vender/solid/education/Unblur.json | 300 +- .../src/vender/solid/education/Unblur.tsx | 8 +- .../icons/src/vender/solid/files/File05.json | 106 +- .../icons/src/vender/solid/files/File05.tsx | 8 +- .../src/vender/solid/files/FileSearch02.json | 110 +- .../src/vender/solid/files/FileSearch02.tsx | 8 +- .../icons/src/vender/solid/files/FileZip.json | 90 +- .../icons/src/vender/solid/files/FileZip.tsx | 8 +- .../icons/src/vender/solid/files/Folder.json | 72 +- .../icons/src/vender/solid/files/Folder.tsx | 8 +- .../vender/solid/general/AnswerTriangle.json | 50 +- .../vender/solid/general/AnswerTriangle.tsx | 8 +- .../solid/general/ArrowDownRoundFill.json | 68 +- .../solid/general/ArrowDownRoundFill.tsx | 8 +- .../src/vender/solid/general/CheckCircle.json | 72 +- .../src/vender/solid/general/CheckCircle.tsx | 8 +- .../src/vender/solid/general/CheckDone01.json | 70 +- .../src/vender/solid/general/CheckDone01.tsx | 8 +- .../src/vender/solid/general/Download02.json | 54 +- .../src/vender/solid/general/Download02.tsx | 8 +- .../src/vender/solid/general/Edit03.json | 110 +- .../icons/src/vender/solid/general/Edit03.tsx | 8 +- .../src/vender/solid/general/Edit04.json | 74 +- .../icons/src/vender/solid/general/Edit04.tsx | 8 +- .../icons/src/vender/solid/general/Eye.json | 70 +- .../icons/src/vender/solid/general/Eye.tsx | 8 +- .../src/vender/solid/general/Github.json | 68 +- .../icons/src/vender/solid/general/Github.tsx | 8 +- .../solid/general/MessageClockCircle.json | 68 +- .../solid/general/MessageClockCircle.tsx | 8 +- .../src/vender/solid/general/PlusCircle.json | 72 +- .../src/vender/solid/general/PlusCircle.tsx | 8 +- .../solid/general/QuestionTriangle.json | 86 +- .../vender/solid/general/QuestionTriangle.tsx | 8 +- .../src/vender/solid/general/SearchMd.json | 72 +- .../src/vender/solid/general/SearchMd.tsx | 8 +- .../src/vender/solid/general/Target04.json | 88 +- .../src/vender/solid/general/Target04.tsx | 8 +- .../src/vender/solid/general/Tool03.json | 120 +- .../icons/src/vender/solid/general/Tool03.tsx | 8 +- .../src/vender/solid/general/XCircle.json | 54 +- .../src/vender/solid/general/XCircle.tsx | 8 +- .../src/vender/solid/general/ZapFast.json | 154 +- .../src/vender/solid/general/ZapFast.tsx | 8 +- .../src/vender/solid/general/ZapNarrow.json | 72 +- .../src/vender/solid/general/ZapNarrow.tsx | 8 +- .../icons/src/vender/solid/layout/Grid01.json | 154 +- .../icons/src/vender/solid/layout/Grid01.tsx | 8 +- .../mediaAndDevices/AudioSupportIcon.json | 48 +- .../mediaAndDevices/AudioSupportIcon.tsx | 8 +- .../mediaAndDevices/DocumentSupportIcon.json | 48 +- .../mediaAndDevices/DocumentSupportIcon.tsx | 8 +- .../solid/mediaAndDevices/MagicBox.json | 124 +- .../vender/solid/mediaAndDevices/MagicBox.tsx | 8 +- .../solid/mediaAndDevices/MagicEyes.json | 72 +- .../solid/mediaAndDevices/MagicEyes.tsx | 8 +- .../solid/mediaAndDevices/MagicWand.json | 142 +- .../solid/mediaAndDevices/MagicWand.tsx | 8 +- .../solid/mediaAndDevices/Microphone01.json | 106 +- .../solid/mediaAndDevices/Microphone01.tsx | 8 +- .../vender/solid/mediaAndDevices/Play.json | 72 +- .../src/vender/solid/mediaAndDevices/Play.tsx | 8 +- .../vender/solid/mediaAndDevices/Robot.json | 72 +- .../vender/solid/mediaAndDevices/Robot.tsx | 8 +- .../solid/mediaAndDevices/Sliders02.json | 150 +- .../solid/mediaAndDevices/Sliders02.tsx | 8 +- .../vender/solid/mediaAndDevices/Speaker.json | 220 +- .../vender/solid/mediaAndDevices/Speaker.tsx | 8 +- .../solid/mediaAndDevices/StopCircle.json | 72 +- .../solid/mediaAndDevices/StopCircle.tsx | 8 +- .../mediaAndDevices/VideoSupportIcon.json | 48 +- .../mediaAndDevices/VideoSupportIcon.tsx | 8 +- .../src/vender/solid/security/Lock01.json | 72 +- .../src/vender/solid/security/Lock01.tsx | 8 +- .../icons/src/vender/solid/shapes/Corner.json | 50 +- .../icons/src/vender/solid/shapes/Corner.tsx | 8 +- .../icons/src/vender/solid/shapes/Star04.json | 68 +- .../icons/src/vender/solid/shapes/Star04.tsx | 8 +- .../icons/src/vender/solid/shapes/Star06.json | 120 +- .../icons/src/vender/solid/shapes/Star06.tsx | 8 +- .../icons/src/vender/solid/users/User01.json | 110 +- .../icons/src/vender/solid/users/User01.tsx | 8 +- .../src/vender/solid/users/UserEdit02.json | 180 +- .../src/vender/solid/users/UserEdit02.tsx | 8 +- .../icons/src/vender/solid/users/Users01.json | 154 +- .../icons/src/vender/solid/users/Users01.tsx | 8 +- .../src/vender/solid/users/UsersPlus.json | 150 +- .../src/vender/solid/users/UsersPlus.tsx | 8 +- .../src/vender/system/AutoUpdateLine.json | 70 +- .../src/vender/system/AutoUpdateLine.tsx | 8 +- .../base/icons/src/vender/workflow/Agent.json | 102 +- .../base/icons/src/vender/workflow/Agent.tsx | 8 +- .../icons/src/vender/workflow/Answer.json | 72 +- .../base/icons/src/vender/workflow/Answer.tsx | 8 +- .../src/vender/workflow/ApiAggregate.json | 48 +- .../src/vender/workflow/ApiAggregate.tsx | 8 +- .../icons/src/vender/workflow/Assigner.json | 132 +- .../icons/src/vender/workflow/Assigner.tsx | 8 +- .../icons/src/vender/workflow/Asterisk.json | 48 +- .../icons/src/vender/workflow/Asterisk.tsx | 8 +- .../vender/workflow/CalendarCheckLine.json | 48 +- .../src/vender/workflow/CalendarCheckLine.tsx | 8 +- .../base/icons/src/vender/workflow/Code.json | 72 +- .../base/icons/src/vender/workflow/Code.tsx | 8 +- .../icons/src/vender/workflow/Datasource.json | 48 +- .../icons/src/vender/workflow/Datasource.tsx | 8 +- .../src/vender/workflow/DocsExtractor.json | 124 +- .../src/vender/workflow/DocsExtractor.tsx | 8 +- .../base/icons/src/vender/workflow/End.json | 72 +- .../base/icons/src/vender/workflow/End.tsx | 8 +- .../base/icons/src/vender/workflow/Home.json | 72 +- .../base/icons/src/vender/workflow/Home.tsx | 8 +- .../base/icons/src/vender/workflow/Http.json | 138 +- .../base/icons/src/vender/workflow/Http.tsx | 8 +- .../icons/src/vender/workflow/IfElse.json | 72 +- .../base/icons/src/vender/workflow/IfElse.tsx | 8 +- .../icons/src/vender/workflow/Iteration.json | 68 +- .../icons/src/vender/workflow/Iteration.tsx | 8 +- .../src/vender/workflow/IterationStart.json | 68 +- .../src/vender/workflow/IterationStart.tsx | 8 +- .../base/icons/src/vender/workflow/Jinja.json | 192 +- .../base/icons/src/vender/workflow/Jinja.tsx | 8 +- .../src/vender/workflow/KnowledgeBase.json | 68 +- .../src/vender/workflow/KnowledgeBase.tsx | 8 +- .../vender/workflow/KnowledgeRetrieval.json | 72 +- .../vender/workflow/KnowledgeRetrieval.tsx | 8 +- .../icons/src/vender/workflow/ListFilter.json | 72 +- .../icons/src/vender/workflow/ListFilter.tsx | 8 +- .../base/icons/src/vender/workflow/Llm.json | 72 +- .../base/icons/src/vender/workflow/Llm.tsx | 8 +- .../base/icons/src/vender/workflow/Loop.json | 72 +- .../base/icons/src/vender/workflow/Loop.tsx | 8 +- .../icons/src/vender/workflow/LoopEnd.json | 72 +- .../icons/src/vender/workflow/LoopEnd.tsx | 8 +- .../vender/workflow/ParameterExtractor.json | 528 +- .../vender/workflow/ParameterExtractor.tsx | 8 +- .../vender/workflow/QuestionClassifier.json | 72 +- .../vender/workflow/QuestionClassifier.tsx | 8 +- .../icons/src/vender/workflow/Schedule.json | 88 +- .../icons/src/vender/workflow/Schedule.tsx | 8 +- .../vender/workflow/TemplatingTransform.json | 304 +- .../vender/workflow/TemplatingTransform.tsx | 8 +- .../icons/src/vender/workflow/TriggerAll.json | 142 +- .../icons/src/vender/workflow/TriggerAll.tsx | 8 +- .../icons/src/vender/workflow/VariableX.json | 72 +- .../icons/src/vender/workflow/VariableX.tsx | 8 +- .../src/vender/workflow/WebhookLine.json | 48 +- .../icons/src/vender/workflow/WebhookLine.tsx | 8 +- .../src/vender/workflow/WindowCursor.json | 120 +- .../src/vender/workflow/WindowCursor.tsx | 8 +- .../base/icons/src/vender/workflow/index.ts | 4 +- web/app/components/base/icons/utils.spec.ts | 5 +- web/app/components/base/icons/utils.ts | 26 +- .../components/base/image-gallery/index.tsx | 35 +- .../base/image-uploader/audio-preview.tsx | 10 +- .../image-uploader/chat-image-uploader.tsx | 16 +- .../components/base/image-uploader/hooks.ts | 20 +- .../base/image-uploader/image-link-input.tsx | 14 +- .../image-uploader/image-list.stories.tsx | 6 +- .../base/image-uploader/image-list.tsx | 25 +- .../base/image-uploader/image-preview.tsx | 106 +- .../text-generation-image-uploader.tsx | 34 +- .../base/image-uploader/uploader.tsx | 10 +- .../components/base/image-uploader/utils.ts | 5 +- .../base/image-uploader/video-preview.tsx | 10 +- .../base/inline-delete-confirm/index.spec.tsx | 76 +- .../inline-delete-confirm/index.stories.tsx | 2 +- .../base/inline-delete-confirm/index.tsx | 10 +- .../base/input-number/index.spec.tsx | 6 +- .../base/input-number/index.stories.tsx | 44 +- .../components/base/input-number/index.tsx | 95 +- .../base/input-with-copy/index.spec.tsx | 63 +- .../components/base/input-with-copy/index.tsx | 33 +- web/app/components/base/input/index.spec.tsx | 20 +- .../components/base/input/index.stories.tsx | 7 +- web/app/components/base/input/index.tsx | 29 +- .../base/linked-apps-panel/index.stories.tsx | 2 +- .../base/linked-apps-panel/index.tsx | 20 +- .../base/list-empty/horizontal-line.tsx | 8 +- web/app/components/base/list-empty/index.tsx | 27 +- .../base/list-empty/vertical-line.tsx | 8 +- .../components/base/loading/index.spec.tsx | 5 +- web/app/components/base/loading/index.tsx | 17 +- web/app/components/base/logo/dify-logo.tsx | 7 +- .../components/base/logo/index.stories.tsx | 10 +- .../base/logo/logo-embedded-chat-avatar.tsx | 2 +- .../base/logo/logo-embedded-chat-header.tsx | 24 +- web/app/components/base/logo/logo-site.tsx | 6 +- .../base/markdown-blocks/audio-block.tsx | 3 +- .../base/markdown-blocks/button.tsx | 39 +- .../base/markdown-blocks/code-block.tsx | 79 +- .../components/base/markdown-blocks/form.tsx | 30 +- .../components/base/markdown-blocks/img.tsx | 2 +- .../components/base/markdown-blocks/index.ts | 20 +- .../components/base/markdown-blocks/link.tsx | 4 +- .../base/markdown-blocks/paragraph.tsx | 2 +- .../base/markdown-blocks/plugin-img.tsx | 7 +- .../base/markdown-blocks/plugin-paragraph.tsx | 5 +- .../base/markdown-blocks/pre-code.tsx | 6 +- .../markdown-blocks/think-block.stories.tsx | 2 +- .../base/markdown-blocks/think-block.tsx | 10 +- .../components/base/markdown-blocks/utils.ts | 3 +- .../base/markdown-blocks/video-block.tsx | 3 +- .../base/markdown/error-boundary.tsx | 14 +- web/app/components/base/markdown/index.tsx | 10 +- .../base/markdown/markdown-utils.ts | 5 +- .../base/markdown/react-markdown-wrapper.tsx | 6 +- web/app/components/base/mermaid/index.tsx | 52 +- web/app/components/base/mermaid/utils.spec.ts | 53 +- web/app/components/base/mermaid/utils.ts | 45 +- .../base/message-log-modal/index.stories.tsx | 8 +- .../base/message-log-modal/index.tsx | 32 +- .../components/base/modal-like-wrap/index.tsx | 33 +- web/app/components/base/modal/index.tsx | 70 +- web/app/components/base/modal/modal.tsx | 38 +- .../base/new-audio-button/index.stories.tsx | 2 +- .../base/new-audio-button/index.tsx | 20 +- web/app/components/base/node-status/index.tsx | 22 +- .../base/notion-connector/index.tsx | 22 +- web/app/components/base/notion-icon/index.tsx | 8 +- .../base/notion-page-selector/base.tsx | 68 +- .../credential-selector/index.tsx | 35 +- .../notion-page-selector/index.stories.tsx | 9 +- .../page-selector/index.tsx | 51 +- .../search-input/index.tsx | 14 +- web/app/components/base/pagination/hook.ts | 3 +- web/app/components/base/pagination/index.tsx | 60 +- .../components/base/pagination/pagination.tsx | 12 +- web/app/components/base/pagination/type.ts | 4 +- .../base/param-item/index.stories.tsx | 4 +- web/app/components/base/param-item/index.tsx | 32 +- .../base/param-item/score-threshold-item.tsx | 8 +- .../components/base/param-item/top-k-item.tsx | 8 +- .../components/base/popover/index.stories.tsx | 4 +- web/app/components/base/popover/index.tsx | 38 +- .../base/portal-to-follow-elem/index.spec.tsx | 49 +- .../portal-to-follow-elem/index.stories.tsx | 2 +- .../base/portal-to-follow-elem/index.tsx | 12 +- .../components/base/premium-badge/index.tsx | 17 +- .../progress-bar/progress-circle.stories.tsx | 3 +- .../base/progress-bar/progress-circle.tsx | 2 +- .../base/prompt-editor/constants.tsx | 5 +- .../components/base/prompt-editor/hooks.ts | 43 +- .../base/prompt-editor/index.stories.tsx | 24 +- .../components/base/prompt-editor/index.tsx | 135 +- .../plugins/component-picker-block/hooks.tsx | 95 +- .../plugins/component-picker-block/index.tsx | 62 +- .../component-picker-block/prompt-option.tsx | 5 +- .../variable-option.tsx | 9 +- .../plugins/context-block/component.tsx | 59 +- .../context-block-replacement-block.tsx | 12 +- .../plugins/context-block/index.tsx | 18 +- .../plugins/context-block/node.tsx | 4 +- .../plugins/current-block/component.tsx | 13 +- .../current-block-replacement-block.tsx | 12 +- .../plugins/current-block/index.tsx | 16 +- .../plugins/current-block/node.tsx | 4 +- .../plugins/error-message-block/component.tsx | 11 +- .../error-message-block-replacement-block.tsx | 12 +- .../plugins/error-message-block/index.tsx | 16 +- .../plugins/history-block/component.tsx | 52 +- .../history-block-replacement-block.tsx | 14 +- .../plugins/history-block/index.tsx | 18 +- .../plugins/history-block/node.tsx | 4 +- .../plugins/last-run-block/component.tsx | 11 +- .../plugins/last-run-block/index.tsx | 16 +- .../last-run-block-replacement-block.tsx | 12 +- .../plugins/on-blur-or-focus-block.tsx | 6 +- .../prompt-editor/plugins/placeholder.tsx | 9 +- .../plugins/query-block/component.tsx | 10 +- .../plugins/query-block/index.tsx | 14 +- .../query-block-replacement-block.tsx | 12 +- .../prompt-editor/plugins/update-block.tsx | 4 +- .../plugins/variable-block/index.tsx | 6 +- .../plugins/variable-value-block/index.tsx | 4 +- .../workflow-variable-block/component.tsx | 53 +- .../plugins/workflow-variable-block/index.tsx | 16 +- .../plugins/workflow-variable-block/node.tsx | 7 +- ...kflow-variable-block-replacement-block.tsx | 18 +- .../components/base/prompt-editor/types.ts | 4 +- .../components/base/prompt-editor/utils.ts | 6 +- .../components/base/prompt-log-modal/card.tsx | 16 +- .../base/prompt-log-modal/index.stories.tsx | 6 +- .../base/prompt-log-modal/index.tsx | 28 +- web/app/components/base/qrcode/index.tsx | 25 +- .../base/radio-card/index.stories.tsx | 79 +- web/app/components/base/radio-card/index.tsx | 24 +- .../base/radio-card/simple/index.tsx | 12 +- .../base/radio/component/group/index.tsx | 2 +- .../base/radio/component/radio/index.tsx | 27 +- .../components/base/radio/index.stories.tsx | 37 +- web/app/components/base/radio/index.tsx | 4 +- web/app/components/base/radio/ui.tsx | 10 +- .../base/search-input/index.stories.tsx | 188 +- .../components/base/search-input/index.tsx | 13 +- .../base/segmented-control/index.spec.tsx | 19 +- .../base/segmented-control/index.stories.tsx | 4 +- .../base/segmented-control/index.tsx | 19 +- web/app/components/base/select/custom.tsx | 81 +- .../components/base/select/index.stories.tsx | 45 +- web/app/components/base/select/index.tsx | 254 +- .../components/base/select/locale-signin.tsx | 29 +- web/app/components/base/select/locale.tsx | 29 +- web/app/components/base/select/pure.tsx | 42 +- .../base/simple-pie-chart/index.tsx | 10 +- web/app/components/base/skeleton/index.tsx | 13 +- .../components/base/slider/index.stories.tsx | 105 +- web/app/components/base/slider/index.tsx | 26 +- .../components/base/sort/index.stories.tsx | 4 +- web/app/components/base/sort/index.tsx | 39 +- .../components/base/spinner/index.spec.tsx | 3 +- .../components/base/spinner/index.stories.tsx | 4 +- web/app/components/base/spinner/index.tsx | 6 +- web/app/components/base/svg-gallery/index.tsx | 27 +- web/app/components/base/svg/index.stories.tsx | 4 +- web/app/components/base/svg/index.tsx | 6 +- .../components/base/switch/index.stories.tsx | 16 +- web/app/components/base/switch/index.tsx | 23 +- .../base/tab-header/index.stories.tsx | 6 +- web/app/components/base/tab-header/index.tsx | 10 +- .../base/tab-slider-new/index.stories.tsx | 2 +- .../components/base/tab-slider-new/index.tsx | 2 +- .../base/tab-slider-plain/index.tsx | 6 +- web/app/components/base/tab-slider/index.tsx | 20 +- .../base/tag-input/index.stories.tsx | 30 +- web/app/components/base/tag-input/index.tsx | 22 +- .../components/base/tag-management/filter.tsx | 97 +- .../base/tag-management/index.stories.tsx | 10 +- .../components/base/tag-management/index.tsx | 28 +- .../components/base/tag-management/panel.tsx | 89 +- .../base/tag-management/selector.tsx | 21 +- .../components/base/tag-management/store.ts | 2 +- .../base/tag-management/tag-item-editor.tsx | 51 +- .../base/tag-management/tag-remove-modal.tsx | 36 +- .../base/tag-management/trigger.tsx | 58 +- web/app/components/base/tag/index.tsx | 12 +- .../components/base/text-generation/hooks.ts | 5 +- .../components/base/text-generation/types.ts | 3 +- .../base/textarea/index.stories.tsx | 22 +- web/app/components/base/textarea/index.tsx | 7 +- web/app/components/base/theme-selector.tsx | 73 +- web/app/components/base/theme-switcher.tsx | 16 +- .../timezone-label/__tests__/index.test.tsx | 7 +- .../components/base/timezone-label/index.tsx | 5 +- web/app/components/base/toast/index.spec.tsx | 43 +- web/app/components/base/toast/index.tsx | 110 +- web/app/components/base/tooltip/content.tsx | 8 +- .../components/base/tooltip/index.spec.tsx | 35 +- web/app/components/base/tooltip/index.tsx | 33 +- .../base/video-gallery/VideoPlayer.tsx | 23 +- .../components/base/video-gallery/index.tsx | 5 +- .../base/voice-input/index.stories.tsx | 317 +- web/app/components/base/voice-input/index.tsx | 36 +- web/app/components/base/voice-input/utils.ts | 4 +- .../base/with-input-validation/index.spec.tsx | 19 +- .../with-input-validation/index.stories.tsx | 46 +- .../base/with-input-validation/index.tsx | 2 +- web/app/components/base/zendesk/index.tsx | 8 +- web/app/components/base/zendesk/utils.ts | 6 +- .../billing/annotation-full/index.spec.tsx | 16 +- .../billing/annotation-full/index.tsx | 26 +- .../billing/annotation-full/modal.spec.tsx | 27 +- .../billing/annotation-full/modal.tsx | 30 +- .../billing/annotation-full/usage.tsx | 6 +- .../apps-full-in-dialog/index.spec.tsx | 274 ++ .../billing/apps-full-in-dialog/index.tsx | 47 +- .../billing/billing-page/index.spec.tsx | 84 + .../components/billing/billing-page/index.tsx | 26 +- web/app/components/billing/config.ts | 4 +- .../billing/header-billing-btn/index.spec.tsx | 92 + .../billing/header-billing-btn/index.tsx | 8 +- .../billing/partner-stack/index.spec.tsx | 44 + .../billing/partner-stack/index.tsx | 5 +- .../partner-stack/use-ps-info.spec.tsx | 197 + .../billing/partner-stack/use-ps-info.ts | 10 +- .../billing/plan-upgrade-modal/index.spec.tsx | 22 +- .../billing/plan-upgrade-modal/index.tsx | 31 +- .../billing/plan/assets/enterprise.spec.tsx | 5 +- .../billing/plan/assets/enterprise.tsx | 164 +- .../billing/plan/assets/index.spec.tsx | 6 +- .../components/billing/plan/assets/index.tsx | 6 +- .../billing/plan/assets/professional.spec.tsx | 5 +- .../billing/plan/assets/professional.tsx | 164 +- .../billing/plan/assets/sandbox.spec.tsx | 7 +- .../billing/plan/assets/sandbox.tsx | 166 +- .../billing/plan/assets/team.spec.tsx | 5 +- .../components/billing/plan/assets/team.tsx | 164 +- .../components/billing/plan/index.spec.tsx | 130 + web/app/components/billing/plan/index.tsx | 93 +- .../billing/pricing/assets/cloud.tsx | 26 +- .../billing/pricing/assets/community.tsx | 170 +- .../pricing/assets/enterprise-noise.tsx | 20 +- .../billing/pricing/assets/enterprise.tsx | 170 +- .../billing/pricing/assets/index.spec.tsx | 64 + .../billing/pricing/assets/index.tsx | 16 +- .../billing/pricing/assets/noise-bottom.tsx | 20 +- .../billing/pricing/assets/noise-top.tsx | 20 +- .../billing/pricing/assets/premium-noise.tsx | 20 +- .../billing/pricing/assets/premium.tsx | 170 +- .../billing/pricing/assets/professional.tsx | 170 +- .../billing/pricing/assets/sandbox.tsx | 170 +- .../billing/pricing/assets/self-hosted.tsx | 26 +- .../billing/pricing/assets/team.tsx | 170 +- .../billing/pricing/footer.spec.tsx | 73 + web/app/components/billing/pricing/footer.tsx | 31 +- .../billing/pricing/header.spec.tsx | 77 + web/app/components/billing/pricing/header.tsx | 30 +- .../components/billing/pricing/index.spec.tsx | 122 + web/app/components/billing/pricing/index.tsx | 27 +- .../pricing/plan-switcher/index.spec.tsx | 114 + .../billing/pricing/plan-switcher/index.tsx | 22 +- .../plan-range-switcher.spec.tsx | 86 + .../plan-switcher/plan-range-switcher.tsx | 10 +- .../pricing/plan-switcher/tab.spec.tsx | 95 + .../billing/pricing/plan-switcher/tab.tsx | 7 +- .../plans/cloud-plan-item/button.spec.tsx | 12 +- .../pricing/plans/cloud-plan-item/button.tsx | 12 +- .../plans/cloud-plan-item/index.spec.tsx | 57 +- .../pricing/plans/cloud-plan-item/index.tsx | 73 +- .../plans/cloud-plan-item/list/index.spec.tsx | 10 +- .../plans/cloud-plan-item/list/index.tsx | 79 +- .../cloud-plan-item/list/item/index.spec.tsx | 4 +- .../plans/cloud-plan-item/list/item/index.tsx | 6 +- .../list/item/tooltip.spec.tsx | 2 +- .../cloud-plan-item/list/item/tooltip.tsx | 10 +- .../billing/pricing/plans/index.spec.tsx | 36 +- .../billing/pricing/plans/index.tsx | 43 +- .../self-hosted-plan-item/button.spec.tsx | 46 +- .../plans/self-hosted-plan-item/button.tsx | 24 +- .../self-hosted-plan-item/index.spec.tsx | 55 +- .../plans/self-hosted-plan-item/index.tsx | 55 +- .../self-hosted-plan-item/list/index.spec.tsx | 13 +- .../self-hosted-plan-item/list/index.tsx | 20 +- .../self-hosted-plan-item/list/item.spec.tsx | 4 +- .../plans/self-hosted-plan-item/list/item.tsx | 10 +- .../billing/priority-label/index.spec.tsx | 125 + .../billing/priority-label/index.tsx | 31 +- .../billing/progress-bar/index.spec.tsx | 25 + .../components/billing/progress-bar/index.tsx | 5 +- .../trigger-events-limit-modal/index.spec.tsx | 70 + .../trigger-events-limit-modal/index.tsx | 12 +- web/app/components/billing/type.ts | 6 +- .../billing/upgrade-btn/index.spec.tsx | 544 +++ .../components/billing/upgrade-btn/index.tsx | 21 +- .../billing/usage-info/apps-info.spec.tsx | 35 + .../billing/usage-info/apps-info.tsx | 8 +- .../billing/usage-info/index.spec.tsx | 114 + .../components/billing/usage-info/index.tsx | 46 +- .../billing/usage-info/vector-space-info.tsx | 12 +- web/app/components/billing/utils/index.ts | 2 +- .../billing/vector-space-full/index.spec.tsx | 58 + .../billing/vector-space-full/index.tsx | 20 +- .../components/browser-initializer.spec.ts | 78 + web/app/components/browser-initializer.tsx | 14 + .../custom/custom-page/index.spec.tsx | 34 +- .../components/custom/custom-page/index.tsx | 26 +- .../custom-web-app-brand/index.spec.tsx | 147 + .../custom/custom-web-app-brand/index.tsx | 200 +- web/app/components/datasets/api/index.tsx | 2 +- web/app/components/datasets/chunk.tsx | 64 +- .../datasets/common/check-rerank-model.ts | 9 +- .../datasets/common/chunking-mode-label.tsx | 8 +- .../datasets/common/credential-icon.tsx | 9 +- .../datasets/common/document-file-icon.tsx | 4 +- .../common/document-picker/document-list.tsx | 13 +- .../common/document-picker/index.spec.tsx | 159 +- .../datasets/common/document-picker/index.tsx | 82 +- .../preview-document-picker.spec.tsx | 46 +- .../preview-document-picker.tsx | 54 +- .../auto-disabled-document.tsx | 16 +- .../index-failed.tsx | 17 +- .../status-with-action.tsx | 17 +- .../index.tsx | 20 +- .../datasets/common/image-list/index.tsx | 13 +- .../datasets/common/image-list/more.tsx | 13 +- .../datasets/common/image-previewer/index.tsx | 63 +- .../common/image-uploader/hooks/use-upload.ts | 35 +- .../image-uploader-in-chunk/image-input.tsx | 31 +- .../image-uploader-in-chunk/image-item.tsx | 36 +- .../image-uploader-in-chunk/index.tsx | 19 +- .../image-input.tsx | 36 +- .../image-item.tsx | 36 +- .../index.tsx | 28 +- .../datasets/common/image-uploader/store.tsx | 6 +- .../datasets/common/image-uploader/utils.ts | 2 +- .../retrieval-method-config/index.spec.tsx | 70 +- .../common/retrieval-method-config/index.tsx | 107 +- .../common/retrieval-method-info/index.tsx | 44 +- .../common/retrieval-param-config/index.tsx | 110 +- .../dsl-confirm-modal.tsx | 30 +- .../create-from-dsl-modal/header.tsx | 10 +- .../create-from-dsl-modal/index.tsx | 80 +- .../create-from-dsl-modal/tab/index.tsx | 10 +- .../create-from-dsl-modal/tab/item.tsx | 6 +- .../create-from-dsl-modal/uploader.tsx | 52 +- .../datasets/create-from-pipeline/footer.tsx | 21 +- .../datasets/create-from-pipeline/header.tsx | 20 +- .../datasets/create-from-pipeline/index.tsx | 8 +- .../list/built-in-pipeline-list.tsx | 14 +- .../create-from-pipeline/list/create-card.tsx | 31 +- .../list/customized-list.tsx | 10 +- .../create-from-pipeline/list/index.tsx | 2 +- .../list/template-card/actions.tsx | 40 +- .../list/template-card/content.tsx | 26 +- .../details/chunk-structure-card.tsx | 23 +- .../list/template-card/details/hooks.tsx | 17 +- .../list/template-card/details/index.tsx | 68 +- .../list/template-card/edit-pipeline-info.tsx | 62 +- .../list/template-card/index.tsx | 48 +- .../list/template-card/operations.tsx | 30 +- .../create/embedding-process/index.tsx | 235 +- .../index.spec.tsx | 778 +++ .../empty-dataset-creation-modal/index.tsx | 35 +- .../create/file-preview/index.spec.tsx | 877 ++++ .../datasets/create/file-preview/index.tsx | 27 +- .../datasets/create/file-uploader/index.tsx | 88 +- web/app/components/datasets/create/icons.ts | 6 +- .../components/datasets/create/index.spec.tsx | 1285 +++++ web/app/components/datasets/create/index.tsx | 39 +- .../create/notion-page-preview/index.spec.tsx | 1150 +++++ .../create/notion-page-preview/index.tsx | 25 +- .../datasets/create/step-one/index.tsx | 112 +- .../datasets/create/step-one/upgrade-card.tsx | 21 +- .../datasets/create/step-three/index.spec.tsx | 845 ++++ .../datasets/create/step-three/index.tsx | 69 +- .../datasets/create/step-two/index.tsx | 949 ++-- .../datasets/create/step-two/inputs.tsx | 127 +- .../step-two/language-select/index.spec.tsx | 596 +++ .../create/step-two/language-select/index.tsx | 32 +- .../datasets/create/step-two/option-card.tsx | 62 +- .../step-two/preview-item/index.spec.tsx | 803 +++ .../create/step-two/preview-item/index.tsx | 26 +- .../datasets/create/stepper/index.spec.tsx | 737 +++ .../datasets/create/stepper/index.tsx | 35 +- .../datasets/create/stepper/step.tsx | 43 +- .../stop-embedding-modal/index.spec.tsx | 739 +++ .../create/stop-embedding-modal/index.tsx | 18 +- .../datasets/create/top-bar/index.spec.tsx | 540 +++ .../datasets/create/top-bar/index.tsx | 54 +- .../datasets/create/website/base.spec.tsx | 555 +++ .../website/base/checkbox-with-label.tsx | 12 +- .../website/base/crawled-result-item.tsx | 27 +- .../create/website/base/crawled-result.tsx | 23 +- .../datasets/create/website/base/crawling.tsx | 19 +- .../create/website/base/error-message.tsx | 12 +- .../datasets/create/website/base/field.tsx | 19 +- .../datasets/create/website/base/header.tsx | 35 +- .../datasets/create/website/base/input.tsx | 7 +- .../create/website/base/options-wrap.tsx | 22 +- .../create/website/base/url-input.tsx | 15 +- .../create/website/firecrawl/index.tsx | 84 +- .../create/website/firecrawl/options.tsx | 43 +- .../datasets/create/website/index.tsx | 115 +- .../create/website/jina-reader/base.spec.tsx | 396 ++ .../website/jina-reader/base/url-input.tsx | 16 +- .../create/website/jina-reader/index.spec.tsx | 1631 +++++++ .../create/website/jina-reader/index.tsx | 84 +- .../create/website/jina-reader/options.tsx | 27 +- .../datasets/create/website/no-data.tsx | 66 +- .../datasets/create/website/preview.tsx | 20 +- .../create/website/watercrawl/index.spec.tsx | 1810 +++++++ .../create/website/watercrawl/index.tsx | 84 +- .../create/website/watercrawl/options.tsx | 45 +- .../actions/index.spec.tsx | 825 ++++ .../create-from-pipeline/actions/index.tsx | 47 +- .../data-source-options/datasource-icon.tsx | 7 +- .../data-source-options/hooks.tsx | 9 +- .../data-source-options/index.spec.tsx | 1661 +++++++ .../data-source-options/index.tsx | 8 +- .../data-source-options/option-card.tsx | 6 +- .../base/credential-selector/index.spec.tsx | 1055 ++++ .../base/credential-selector/index.tsx | 15 +- .../base/credential-selector/item.tsx | 11 +- .../base/credential-selector/list.tsx | 4 +- .../base/credential-selector/trigger.tsx | 10 +- .../data-source/base/header.spec.tsx | 658 +++ .../data-source/base/header.tsx | 38 +- .../data-source/local-file/index.tsx | 114 +- .../online-documents/index.spec.tsx | 1324 +++++ .../data-source/online-documents/index.tsx | 81 +- .../page-selector/index.spec.tsx | 1634 +++++++ .../online-documents/page-selector/index.tsx | 10 +- .../online-documents/page-selector/item.tsx | 77 +- .../online-documents/page-selector/utils.ts | 2 +- .../data-source/online-documents/title.tsx | 6 +- .../online-drive/connect/index.spec.tsx | 622 +++ .../online-drive/connect/index.tsx | 28 +- .../file-list/header/breadcrumbs/bucket.tsx | 19 +- .../file-list/header/breadcrumbs/drive.tsx | 10 +- .../breadcrumbs/dropdown/index.spec.tsx | 868 ++++ .../header/breadcrumbs/dropdown/index.tsx | 17 +- .../header/breadcrumbs/dropdown/item.tsx | 5 +- .../header/breadcrumbs/dropdown/menu.tsx | 4 +- .../header/breadcrumbs/index.spec.tsx | 1079 +++++ .../file-list/header/breadcrumbs/index.tsx | 20 +- .../file-list/header/breadcrumbs/item.tsx | 9 +- .../file-list/header/index.spec.tsx | 727 +++ .../online-drive/file-list/header/index.tsx | 12 +- .../online-drive/file-list/index.spec.tsx | 755 +++ .../online-drive/file-list/index.tsx | 6 +- .../file-list/list/empty-folder.tsx | 6 +- .../file-list/list/empty-search-result.tsx | 20 +- .../online-drive/file-list/list/file-icon.tsx | 9 +- .../file-list/list/index.spec.tsx | 1940 ++++++++ .../online-drive/file-list/list/index.tsx | 30 +- .../online-drive/file-list/list/item.tsx | 45 +- .../data-source/online-drive/header.tsx | 30 +- .../data-source/online-drive/index.spec.tsx | 1902 ++++++++ .../data-source/online-drive/index.tsx | 43 +- .../data-source/online-drive/utils.ts | 10 +- .../data-source/store/index.ts | 12 +- .../base/checkbox-with-label.tsx | 8 +- .../base/crawled-result-item.tsx | 52 +- .../website-crawl/base/crawled-result.tsx | 24 +- .../website-crawl/base/crawling.tsx | 34 +- .../website-crawl/base/error-message.tsx | 17 +- .../website-crawl/base/index.spec.tsx | 946 ++++ .../website-crawl/base/options/index.spec.tsx | 1130 +++++ .../website-crawl/base/options/index.tsx | 40 +- .../data-source/website-crawl/index.spec.tsx | 1501 ++++++ .../data-source/website-crawl/index.tsx | 59 +- .../documents/create-from-pipeline/hooks.ts | 17 +- .../documents/create-from-pipeline/index.tsx | 100 +- .../create-from-pipeline/left-header.tsx | 30 +- .../preview/chunk-preview.spec.tsx | 461 ++ .../preview/chunk-preview.tsx | 248 +- .../preview/file-preview.spec.tsx | 320 ++ .../preview/file-preview.tsx | 37 +- .../create-from-pipeline/preview/loading.tsx | 74 +- .../preview/online-document-preview.spec.tsx | 358 ++ .../preview/online-document-preview.tsx | 39 +- .../preview/web-preview.spec.tsx | 256 + .../preview/web-preview.tsx | 30 +- .../process-documents/actions.tsx | 20 +- .../process-documents/components.spec.tsx | 861 ++++ .../process-documents/form.tsx | 10 +- .../process-documents/header.tsx | 24 +- .../process-documents/hooks.ts | 2 +- .../process-documents/index.spec.tsx | 595 +++ .../process-documents/index.tsx | 10 +- .../embedding-process/index.spec.tsx | 1262 +++++ .../processing/embedding-process/index.tsx | 113 +- .../embedding-process/rule-detail.spec.tsx | 476 ++ .../embedding-process/rule-detail.tsx | 50 +- .../processing/index.spec.tsx | 810 ++++ .../create-from-pipeline/processing/index.tsx | 38 +- .../create-from-pipeline/step-indicator.tsx | 6 +- .../detail/batch-modal/csv-downloader.tsx | 74 +- .../detail/batch-modal/csv-uploader.tsx | 57 +- .../documents/detail/batch-modal/index.tsx | 29 +- .../detail/completed/child-segment-detail.tsx | 58 +- .../detail/completed/child-segment-list.tsx | 153 +- .../completed/common/action-buttons.tsx | 52 +- .../detail/completed/common/add-another.tsx | 13 +- .../detail/completed/common/batch-action.tsx | 91 +- .../detail/completed/common/chunk-content.tsx | 53 +- .../documents/detail/completed/common/dot.tsx | 4 +- .../detail/completed/common/drawer.tsx | 47 +- .../detail/completed/common/empty.tsx | 47 +- .../completed/common/full-screen-drawer.tsx | 9 +- .../detail/completed/common/keywords.tsx | 28 +- .../completed/common/regeneration-modal.tsx | 52 +- .../completed/common/segment-index-tag.tsx | 6 +- .../documents/detail/completed/common/tag.tsx | 10 +- .../detail/completed/display-toggle.tsx | 21 +- .../documents/detail/completed/index.tsx | 252 +- .../detail/completed/new-child-segment.tsx | 102 +- .../completed/segment-card/chunk-content.tsx | 28 +- .../completed/segment-card/index.spec.tsx | 87 +- .../detail/completed/segment-card/index.tsx | 213 +- .../detail/completed/segment-detail.tsx | 72 +- .../detail/completed/segment-list.tsx | 38 +- .../skeleton/full-doc-list-skeleton.tsx | 14 +- .../skeleton/general-list-skeleton.tsx | 66 +- .../skeleton/paragraph-list-skeleton.tsx | 66 +- .../skeleton/parent-chunk-card-skeleton.tsx | 38 +- .../detail/completed/status-item.tsx | 11 +- .../documents/detail/document-title.tsx | 2 +- .../documents/detail/embedding/index.tsx | 188 +- .../detail/embedding/skeleton/index.tsx | 60 +- .../datasets/documents/detail/index.tsx | 120 +- .../documents/detail/metadata/index.tsx | 319 +- .../datasets/documents/detail/new-segment.tsx | 107 +- .../documents/detail/segment-add/index.tsx | 87 +- .../detail/settings/document-settings.tsx | 39 +- .../documents/detail/settings/index.tsx | 2 +- .../settings/pipeline-settings/index.spec.tsx | 61 +- .../settings/pipeline-settings/index.tsx | 38 +- .../pipeline-settings/left-header.tsx | 25 +- .../process-documents/actions.tsx | 10 +- .../process-documents/index.spec.tsx | 55 +- .../process-documents/index.tsx | 8 +- .../hooks/use-document-list-query-state.ts | 5 +- .../components/datasets/documents/index.tsx | 228 +- .../components/datasets/documents/list.tsx | 331 +- .../datasets/documents/operations.tsx | 297 +- .../datasets/documents/rename-modal.tsx | 23 +- .../datasets/documents/status-item/hooks.ts | 16 +- .../documents/status-item/index.spec.tsx | 57 +- .../datasets/documents/status-item/index.tsx | 112 +- .../external-api/external-api-modal/Form.tsx | 20 +- .../external-api/external-api-modal/index.tsx | 111 +- .../external-api/external-api-panel/index.tsx | 57 +- .../external-knowledge-api-card/index.tsx | 41 +- .../connector/index.spec.tsx | 47 +- .../connector/index.tsx | 7 +- .../create/ExternalApiSelect.tsx | 37 +- .../create/ExternalApiSelection.tsx | 58 +- .../create/InfoPanel.tsx | 34 +- .../create/KnowledgeBaseInfo.tsx | 28 +- .../create/RetrievalSettings.tsx | 27 +- .../create/index.spec.tsx | 68 +- .../external-knowledge-base/create/index.tsx | 65 +- .../components/datasets/extra-info/index.tsx | 8 +- .../datasets/extra-info/service-api/card.tsx | 89 +- .../datasets/extra-info/service-api/index.tsx | 22 +- .../datasets/extra-info/statistics.tsx | 48 +- .../formatted-text/flavours/edit-slice.tsx | 80 +- .../formatted-text/flavours/preview-slice.tsx | 25 +- .../formatted-text/flavours/shared.tsx | 65 +- .../datasets/formatted-text/formatted.tsx | 14 +- .../components/child-chunks-item.tsx | 13 +- .../components/chunk-detail-modal.tsx | 72 +- .../hit-testing/components/empty-records.tsx | 14 +- .../datasets/hit-testing/components/mask.tsx | 7 +- .../components/query-input/index.tsx | 102 +- .../components/query-input/textarea.tsx | 46 +- .../hit-testing/components/records.tsx | 47 +- .../components/result-item-external.tsx | 30 +- .../components/result-item-footer.tsx | 12 +- .../components/result-item-meta.tsx | 10 +- .../hit-testing/components/result-item.tsx | 52 +- .../datasets/hit-testing/components/score.tsx | 11 +- .../components/datasets/hit-testing/index.tsx | 117 +- .../modify-external-retrieval-modal.tsx | 30 +- .../hit-testing/modify-retrieval-modal.tsx | 83 +- .../datasets/hit-testing/style.module.css | 43 - .../datasets/list/dataset-card/index.tsx | 156 +- .../list/dataset-card/operation-item.tsx | 8 +- .../datasets/list/dataset-card/operations.tsx | 20 +- .../datasets/list/dataset-footer/index.tsx | 17 +- web/app/components/datasets/list/datasets.tsx | 10 +- web/app/components/datasets/list/index.tsx | 52 +- .../datasets/list/new-dataset-card/index.tsx | 24 +- .../datasets/list/new-dataset-card/option.tsx | 10 +- .../datasets/metadata/add-metadata-button.tsx | 14 +- .../datasets/metadata/base/date-picker.tsx | 18 +- .../metadata/edit-metadata-batch/add-row.tsx | 10 +- .../metadata/edit-metadata-batch/edit-row.tsx | 46 +- .../edit-metadata-batch/edited-beacon.tsx | 33 +- .../edit-metadata-batch/input-combined.tsx | 14 +- .../input-has-set-multiple-value.tsx | 14 +- .../metadata/edit-metadata-batch/label.tsx | 11 +- .../metadata/edit-metadata-batch/modal.tsx | 90 +- .../hooks/use-batch-edit-document-metadata.ts | 13 +- .../metadata/hooks/use-check-metadata-name.ts | 8 +- .../hooks/use-edit-dataset-metadata.ts | 17 +- .../metadata/hooks/use-metadata-document.ts | 19 +- .../metadata-dataset/create-content.tsx | 41 +- .../create-metadata-modal.tsx | 10 +- .../dataset-metadata-drawer.tsx | 108 +- .../metadata/metadata-dataset/field.tsx | 6 +- .../select-metadata-modal.tsx | 55 +- .../metadata-dataset/select-metadata.tsx | 46 +- .../metadata/metadata-document/field.tsx | 8 +- .../metadata/metadata-document/index.tsx | 116 +- .../metadata/metadata-document/info-group.tsx | 61 +- .../metadata/metadata-document/no-data.tsx | 18 +- .../datasets/metadata/utils/get-icon.ts | 2 +- .../datasets/no-linked-apps-panel.tsx | 21 +- .../components/datasets/preview/container.tsx | 32 +- .../components/datasets/preview/header.tsx | 22 +- .../datasets/rename-modal/index.tsx | 60 +- .../settings/chunk-structure/hooks.tsx | 17 +- .../settings/chunk-structure/index.tsx | 8 +- .../datasets/settings/form/index.tsx | 358 +- .../datasets/settings/index-method/index.tsx | 38 +- .../settings/index-method/keyword-number.tsx | 25 +- .../datasets/settings/option-card.tsx | 36 +- .../settings/permission-selector/index.tsx | 96 +- .../permission-selector/member-item.tsx | 20 +- .../permission-selector/permission-item.tsx | 8 +- .../datasets/settings/utils/index.tsx | 3 +- web/app/components/develop/ApiServer.tsx | 19 +- web/app/components/develop/code.tsx | 50 +- web/app/components/develop/doc.tsx | 172 +- web/app/components/develop/index.tsx | 14 +- web/app/components/develop/md.tsx | 15 +- .../develop/secret-key/input-copy.tsx | 26 +- .../develop/secret-key/secret-key-button.tsx | 12 +- .../secret-key/secret-key-generate.tsx | 22 +- .../develop/secret-key/secret-key-modal.tsx | 68 +- web/app/components/develop/tag.tsx | 8 +- .../components/devtools/react-scan/loader.tsx | 21 + .../react-scan/scan.tsx} | 2 +- .../components/devtools/tanstack/devtools.tsx | 23 + .../components/devtools/tanstack/loader.tsx | 21 + .../explore/app-card/index.spec.tsx | 18 +- web/app/components/explore/app-card/index.tsx | 50 +- .../explore/app-list/index.spec.tsx | 279 ++ web/app/components/explore/app-list/index.tsx | 97 +- web/app/components/explore/category.spec.tsx | 79 + web/app/components/explore/category.tsx | 16 +- .../explore/create-app-modal/index.spec.tsx | 596 +++ .../explore/create-app-modal/index.tsx | 120 +- web/app/components/explore/index.spec.tsx | 140 + web/app/components/explore/index.tsx | 31 +- .../explore/installed-app/index.spec.tsx | 684 +++ .../explore/installed-app/index.tsx | 72 +- .../explore/item-operation/index.spec.tsx | 109 + .../explore/item-operation/index.tsx | 35 +- .../sidebar/app-nav-item/index.spec.tsx | 99 + .../explore/sidebar/app-nav-item/index.tsx | 29 +- .../components/explore/sidebar/index.spec.tsx | 164 + web/app/components/explore/sidebar/index.tsx | 39 +- .../components/goto-anything/actions/app.tsx | 15 +- .../actions/commands/account.tsx | 10 +- .../actions/commands/community.tsx | 10 +- .../goto-anything/actions/commands/docs.tsx | 14 +- .../goto-anything/actions/commands/forum.tsx | 10 +- .../goto-anything/actions/commands/index.ts | 18 +- .../actions/commands/language.tsx | 6 +- .../actions/commands/registry.ts | 2 +- .../goto-anything/actions/commands/slash.tsx | 27 +- .../goto-anything/actions/commands/theme.tsx | 35 +- .../goto-anything/actions/commands/zen.tsx | 12 +- .../components/goto-anything/actions/index.ts | 12 +- .../goto-anything/actions/knowledge.tsx | 7 +- .../goto-anything/actions/plugin.tsx | 4 +- .../components/goto-anything/actions/types.ts | 6 +- .../goto-anything/command-selector.spec.tsx | 84 + .../goto-anything/command-selector.tsx | 76 +- .../components/goto-anything/context.spec.tsx | 60 + web/app/components/goto-anything/context.tsx | 3 +- .../components/goto-anything/index.spec.tsx | 175 + web/app/components/goto-anything/index.tsx | 317 +- .../components/header/account-about/index.tsx | 95 +- .../header/account-dropdown/compliance.tsx | 210 +- .../header/account-dropdown/index.tsx | 218 +- .../header/account-dropdown/support.tsx | 184 +- .../workplace-selector/index.tsx | 47 +- .../Integrations-page/index.tsx | 66 +- .../api-based-extension-page/empty.tsx | 19 +- .../api-based-extension-page/index.tsx | 22 +- .../api-based-extension-page/item.tsx | 36 +- .../api-based-extension-page/modal.tsx | 73 +- .../api-based-extension-page/selector.tsx | 86 +- .../header/account-setting/collapse/index.tsx | 14 +- .../data-source-page-new/card.tsx | 54 +- .../data-source-page-new/configure.tsx | 62 +- .../data-source-page-new/hooks/index.ts | 2 +- .../hooks/use-data-source-auth-update.ts | 3 +- .../data-source-page-new/index.tsx | 8 +- .../install-from-marketplace.tsx | 54 +- .../data-source-page-new/item.tsx | 42 +- .../data-source-page-new/operator.tsx | 86 +- .../data-source-notion/index.tsx | 35 +- .../data-source-notion/operate/index.tsx | 46 +- .../config-firecrawl-modal.tsx | 86 +- .../config-jina-reader-modal.tsx | 80 +- .../config-watercrawl-modal.tsx | 86 +- .../data-source-website/index.tsx | 44 +- .../data-source-page/panel/config-item.tsx | 38 +- .../data-source-page/panel/index.tsx | 94 +- .../header/account-setting/index.tsx | 96 +- .../key-validator/KeyInput.tsx | 13 +- .../account-setting/key-validator/Operate.tsx | 54 +- .../key-validator/ValidateStatus.tsx | 15 +- .../account-setting/key-validator/hooks.ts | 9 +- .../account-setting/key-validator/index.tsx | 23 +- .../account-setting/language-page/index.tsx | 23 +- .../edit-workspace-modal/index.tsx | 48 +- .../account-setting/members-page/index.tsx | 178 +- .../members-page/invite-modal/index.tsx | 86 +- .../invite-modal/role-selector.tsx | 118 +- .../members-page/invited-modal/index.tsx | 102 +- .../invited-modal/invitation-link.tsx | 19 +- .../members-page/operation/index.spec.tsx | 91 + .../members-page/operation/index.tsx | 155 +- .../operation/transfer-ownership.tsx | 14 +- .../transfer-ownership-modal/index.tsx | 109 +- .../member-selector.tsx | 56 +- .../header/account-setting/menu-dialog.tsx | 13 +- .../model-provider-page/declarations.ts | 10 +- .../model-provider-page/hooks.spec.ts | 79 +- .../model-provider-page/hooks.ts | 134 +- .../model-provider-page/index.tsx | 59 +- .../install-from-marketplace.tsx | 56 +- .../add-credential-in-load-balancing.tsx | 39 +- .../model-auth/add-custom-model.tsx | 69 +- .../model-auth/authorized/authorized-item.tsx | 18 +- .../model-auth/authorized/credential-item.tsx | 45 +- .../model-auth/authorized/index.tsx | 81 +- .../model-auth/config-model.tsx | 28 +- .../model-auth/config-provider.tsx | 34 +- .../model-auth/credential-selector.tsx | 42 +- .../model-auth/hooks/index.ts | 6 +- .../model-auth/hooks/use-auth-service.ts | 6 +- .../model-auth/hooks/use-auth.ts | 30 +- .../model-auth/hooks/use-credential-data.ts | 4 +- .../model-auth/hooks/use-credential-status.ts | 2 +- .../hooks/use-model-form-schemas.ts | 8 +- .../model-provider-page/model-auth/index.tsx | 8 +- .../manage-custom-model-credentials.tsx | 34 +- .../switch-credential-in-load-balancing.tsx | 60 +- .../model-provider-page/model-badge/index.tsx | 7 +- .../model-provider-page/model-icon/index.tsx | 23 +- .../model-provider-page/model-modal/Form.tsx | 141 +- .../model-modal/Input.test.tsx | 4 +- .../model-provider-page/model-modal/Input.tsx | 6 +- .../__snapshots__/Input.test.tsx.snap | 2 +- .../model-provider-page/model-modal/index.tsx | 170 +- .../model-provider-page/model-name/index.tsx | 14 +- .../agent-model-trigger.tsx | 155 +- .../configuration-button.tsx | 7 +- .../model-parameter-modal/index.tsx | 99 +- .../model-parameter-modal/model-display.tsx | 30 +- .../model-parameter-modal/parameter-item.tsx | 88 +- .../presets-parameter.tsx | 25 +- .../status-indicators.tsx | 82 +- .../model-parameter-modal/trigger.tsx | 52 +- .../deprecated-model-trigger.tsx | 16 +- .../model-selector/empty-trigger.tsx | 24 +- .../model-selector/feature-icon.tsx | 48 +- .../model-selector/index.tsx | 22 +- .../model-selector/model-trigger.tsx | 32 +- .../model-selector/popup-item.tsx | 65 +- .../model-selector/popup.tsx | 57 +- .../provider-added-card/add-model-button.tsx | 6 +- .../provider-added-card/cooldown-timer.tsx | 10 +- .../provider-added-card/credential-panel.tsx | 37 +- .../provider-added-card/index.tsx | 78 +- .../provider-added-card/model-list-item.tsx | 76 +- .../provider-added-card/model-list.tsx | 45 +- .../model-load-balancing-configs.tsx | 119 +- .../model-load-balancing-modal.tsx | 237 +- .../provider-added-card/priority-selector.tsx | 39 +- .../provider-added-card/priority-use-tip.tsx | 6 +- .../provider-added-card/quota-panel.tsx | 24 +- .../provider-icon/index.tsx | 24 +- .../system-model-selector/index.tsx | 134 +- .../model-provider-page/utils.ts | 14 +- .../plugin-page/SerpapiPlugin.tsx | 22 +- .../account-setting/plugin-page/index.tsx | 32 +- .../account-setting/plugin-page/utils.ts | 2 +- web/app/components/header/app-back/index.tsx | 17 +- web/app/components/header/app-nav/index.tsx | 28 +- .../components/header/app-selector/index.tsx | 79 +- .../components/header/dataset-nav/index.tsx | 31 +- web/app/components/header/env-nav/index.tsx | 15 +- .../components/header/explore-nav/index.tsx | 26 +- .../header/github-star/index.spec.tsx | 4 +- .../components/header/github-star/index.tsx | 4 +- web/app/components/header/header-wrapper.tsx | 15 +- web/app/components/header/index.tsx | 68 +- web/app/components/header/indicator/index.tsx | 10 +- .../components/header/license-env/index.tsx | 28 +- .../components/header/maintenance-notice.tsx | 12 +- web/app/components/header/nav/index.tsx | 26 +- .../header/nav/nav-selector/index.tsx | 96 +- .../components/header/plan-badge/index.tsx | 75 +- .../header/plugins-nav/downloading-icon.tsx | 6 +- .../components/header/plugins-nav/index.tsx | 36 +- web/app/components/header/tools-nav/index.tsx | 27 +- web/app/components/i18n-server.tsx | 6 +- web/app/components/i18n.tsx | 14 +- .../plugins/base/badges/icon-with-tooltip.tsx | 9 +- .../plugins/base/badges/partner.tsx | 2 +- .../plugins/base/badges/verified.tsx | 2 +- .../plugins/base/deprecation-notice.tsx | 53 +- .../plugins/base/key-value-item.tsx | 17 +- .../plugins/card/base/card-icon.tsx | 26 +- .../plugins/card/base/corner-mark.tsx | 2 +- .../plugins/card/base/description.tsx | 5 +- .../plugins/card/base/download-count.tsx | 2 +- .../components/plugins/card/base/org-info.tsx | 7 +- .../plugins/card/base/placeholder.tsx | 25 +- .../components/plugins/card/base/title.tsx | 2 +- .../plugins/card/card-more-info.tsx | 2 +- .../components/plugins/card/index.spec.tsx | 1742 +++++++ web/app/components/plugins/card/index.tsx | 39 +- web/app/components/plugins/constants.ts | 8 +- web/app/components/plugins/hooks.ts | 15 +- .../install-plugin/base/check-task-status.ts | 4 +- .../plugins/install-plugin/base/installed.tsx | 28 +- .../install-plugin/base/loading-error.tsx | 33 +- .../plugins/install-plugin/base/loading.tsx | 12 +- .../plugins/install-plugin/base/version.tsx | 22 +- .../plugins/install-plugin/hooks.ts | 18 +- .../hooks/use-check-installed.tsx | 7 +- .../hooks/use-install-plugin-limit.tsx | 6 +- .../hooks/use-refresh-plugin-list.tsx | 16 +- .../install-bundle/index.spec.tsx | 1431 ++++++ .../install-plugin/install-bundle/index.tsx | 23 +- .../install-bundle/item/github-item.tsx | 10 +- .../install-bundle/item/loaded-item.tsx | 15 +- .../install-bundle/item/marketplace-item.tsx | 7 +- .../install-bundle/item/package-item.tsx | 9 +- .../install-bundle/ready-to-install.tsx | 7 +- .../install-bundle/steps/install-multi.tsx | 39 +- .../install-bundle/steps/install.tsx | 71 +- .../install-bundle/steps/installed.tsx | 24 +- .../install-from-github/index.spec.tsx | 2136 ++++++++ .../install-from-github/index.tsx | 155 +- .../install-from-github/steps/loaded.spec.tsx | 525 ++ .../install-from-github/steps/loaded.tsx | 63 +- .../steps/selectPackage.spec.tsx | 877 ++++ .../steps/selectPackage.tsx | 66 +- .../install-from-github/steps/setURL.spec.tsx | 180 + .../install-from-github/steps/setURL.tsx | 36 +- .../install-from-local-package/index.spec.tsx | 2097 ++++++++ .../install-from-local-package/index.tsx | 77 +- .../ready-to-install.spec.tsx | 471 ++ .../ready-to-install.tsx | 15 +- .../steps/install.spec.tsx | 626 +++ .../steps/install.tsx | 73 +- .../steps/uploading.spec.tsx | 356 ++ .../steps/uploading.tsx | 36 +- .../install-from-marketplace/index.spec.tsx | 928 ++++ .../install-from-marketplace/index.tsx | 101 +- .../steps/install.spec.tsx | 729 +++ .../steps/install.tsx | 73 +- .../plugins/install-plugin/utils.ts | 2 +- .../plugins/marketplace/context.tsx | 73 +- .../marketplace/description/index.spec.tsx | 683 +++ .../plugins/marketplace/description/index.tsx | 14 +- .../plugins/marketplace/empty/index.spec.tsx | 836 ++++ .../plugins/marketplace/empty/index.tsx | 27 +- .../plugins/marketplace/empty/line.tsx | 24 +- .../components/plugins/marketplace/hooks.ts | 32 +- .../plugins/marketplace/index.spec.tsx | 3154 ++++++++++++ .../components/plugins/marketplace/index.tsx | 13 +- .../plugins/marketplace/list/card-wrapper.tsx | 68 +- .../plugins/marketplace/list/index.spec.tsx | 1702 +++++++ .../plugins/marketplace/list/index.tsx | 12 +- .../marketplace/list/list-with-collection.tsx | 30 +- .../plugins/marketplace/list/list-wrapper.tsx | 25 +- .../marketplace/plugin-type-switch.tsx | 39 +- .../marketplace/search-box/index.spec.tsx | 1291 +++++ .../plugins/marketplace/search-box/index.tsx | 40 +- .../search-box/search-box-wrapper.tsx | 6 +- .../marketplace/search-box/tags-filter.tsx | 24 +- .../search-box/trigger/marketplace.tsx | 25 +- .../search-box/trigger/tool-selector.tsx | 17 +- .../marketplace/sort-dropdown/index.spec.tsx | 742 +++ .../marketplace/sort-dropdown/index.tsx | 32 +- .../sticky-search-and-switch-wrapper.tsx | 4 +- .../components/plugins/marketplace/utils.ts | 26 +- .../authorize/add-api-key-button.tsx | 8 +- .../authorize/add-oauth-button.tsx | 82 +- .../plugin-auth/authorize/api-key-modal.tsx | 36 +- .../authorize/authorize-components.spec.tsx | 2252 +++++++++ .../plugin-auth/authorize/index.spec.tsx | 786 +++ .../plugins/plugin-auth/authorize/index.tsx | 32 +- .../authorize/oauth-client-settings.tsx | 54 +- .../authorized-in-data-source-node.tsx | 14 +- .../plugin-auth/authorized-in-node.tsx | 28 +- .../plugins/plugin-auth/authorized/index.tsx | 106 +- .../plugins/plugin-auth/authorized/item.tsx | 78 +- .../plugin-auth/hooks/use-credential.ts | 5 +- .../plugins/plugin-auth/hooks/use-get-api.ts | 6 +- .../hooks/use-plugin-auth-action.ts | 8 +- .../plugin-auth/hooks/use-plugin-auth.ts | 4 +- .../plugins/plugin-auth/index.spec.tsx | 2035 ++++++++ .../components/plugins/plugin-auth/index.tsx | 16 +- .../plugin-auth/plugin-auth-in-agent.tsx | 33 +- .../plugin-auth-in-datasource-node.tsx | 14 +- .../plugins/plugin-auth/plugin-auth.tsx | 4 +- .../plugin-detail-panel/action-list.tsx | 15 +- .../agent-strategy-list.tsx | 15 +- .../app-selector/app-inputs-form.tsx | 16 +- .../app-selector/app-inputs-panel.tsx | 29 +- .../app-selector/app-picker.tsx | 51 +- .../app-selector/app-trigger.tsx | 19 +- .../app-selector/index.tsx | 40 +- .../datasource-action-list.tsx | 15 +- .../plugin-detail-panel/detail-header.tsx | 201 +- .../plugin-detail-panel/endpoint-card.tsx | 85 +- .../plugin-detail-panel/endpoint-list.tsx | 63 +- .../plugin-detail-panel/endpoint-modal.tsx | 59 +- .../plugins/plugin-detail-panel/index.tsx | 41 +- .../plugin-detail-panel/model-list.tsx | 16 +- .../model-selector/index.spec.tsx | 1422 ++++++ .../model-selector/index.tsx | 112 +- .../model-selector/llm-params-panel.spec.tsx | 717 +++ .../model-selector/llm-params-panel.tsx | 28 +- .../model-selector/tts-params-panel.spec.tsx | 623 +++ .../model-selector/tts-params-panel.tsx | 23 +- .../multiple-tool-selector/index.spec.tsx | 1028 ++++ .../multiple-tool-selector/index.tsx | 57 +- .../operation-dropdown.tsx | 43 +- .../plugins/plugin-detail-panel/store.ts | 2 +- .../plugin-detail-panel/strategy-detail.tsx | 91 +- .../plugin-detail-panel/strategy-item.tsx | 11 +- .../create/common-modal.spec.tsx | 1888 ++++++++ .../subscription-list/create/common-modal.tsx | 285 +- .../subscription-list/create/index.spec.tsx | 1478 ++++++ .../subscription-list/create/index.tsx | 244 +- .../create/oauth-client.spec.tsx | 1254 +++++ .../subscription-list/create/oauth-client.tsx | 138 +- .../subscription-list/delete-confirm.spec.tsx | 92 + .../subscription-list/delete-confirm.tsx | 57 +- .../edit/apikey-edit-modal.spec.tsx | 101 + .../edit/apikey-edit-modal.tsx | 349 ++ .../subscription-list/edit/index.spec.tsx | 1558 ++++++ .../subscription-list/edit/index.tsx | 28 + .../edit/manual-edit-modal.spec.tsx | 98 + .../edit/manual-edit-modal.tsx | 164 + .../edit/oauth-edit-modal.spec.tsx | 98 + .../edit/oauth-edit-modal.tsx | 178 + .../subscription-list/index.spec.tsx | 213 + .../subscription-list/index.tsx | 20 +- .../subscription-list/list-view.spec.tsx | 63 + .../subscription-list/list-view.tsx | 22 +- .../subscription-list/log-viewer.spec.tsx | 179 + .../subscription-list/log-viewer.tsx | 72 +- .../subscription-list/selector-entry.spec.tsx | 91 + .../subscription-list/selector-entry.tsx | 81 +- .../subscription-list/selector-view.spec.tsx | 139 + .../subscription-list/selector-view.tsx | 62 +- .../subscription-card.spec.tsx | 91 + .../subscription-list/subscription-card.tsx | 64 +- .../subscription-list/types.ts | 9 + .../use-subscription-list.spec.ts | 67 + .../tool-selector/index.tsx | 147 +- .../tool-selector/reasoning-config-form.tsx | 133 +- .../tool-selector/schema-modal.tsx | 31 +- .../tool-selector/tool-credentials-form.tsx | 85 +- .../tool-selector/tool-item.tsx | 80 +- .../tool-selector/tool-trigger.tsx | 21 +- .../trigger/event-detail-drawer.tsx | 110 +- .../trigger/event-list.tsx | 23 +- .../plugins/plugin-item/action.spec.tsx | 937 ++++ .../components/plugins/plugin-item/action.tsx | 57 +- .../plugins/plugin-item/index.spec.tsx | 1016 ++++ .../components/plugins/plugin-item/index.tsx | 156 +- .../plugin-mutation-model/index.spec.tsx | 1162 +++++ .../plugins/plugin-mutation-model/index.tsx | 27 +- .../plugins/plugin-page/context.tsx | 12 +- .../plugins/plugin-page/debug-info.tsx | 47 +- .../plugins/plugin-page/empty/index.spec.tsx | 583 +++ .../plugins/plugin-page/empty/index.tsx | 92 +- .../filter-management/category-filter.tsx | 45 +- .../filter-management/index.spec.tsx | 1175 +++++ .../plugin-page/filter-management/index.tsx | 11 +- .../filter-management/search-box.tsx | 9 +- .../plugin-page/filter-management/store.ts | 2 +- .../filter-management/tag-filter.tsx | 45 +- .../components/plugins/plugin-page/index.tsx | 150 +- .../plugin-page/install-plugin-dropdown.tsx | 66 +- .../plugins/plugin-page/list/index.spec.tsx | 702 +++ .../plugins/plugin-page/list/index.tsx | 6 +- .../plugins/plugin-page/plugin-info.tsx | 18 +- .../plugins/plugin-page/plugin-tasks/hooks.ts | 2 +- .../plugin-page/plugin-tasks/index.tsx | 154 +- .../plugins/plugin-page/plugins-panel.tsx | 52 +- .../plugin-page/use-reference-setting.ts | 12 +- .../plugins/plugin-page/use-uploader.ts | 16 +- web/app/components/plugins/provider-card.tsx | 53 +- .../plugins/readme-panel/entrance.tsx | 22 +- .../plugins/readme-panel/index.spec.tsx | 893 ++++ .../components/plugins/readme-panel/index.tsx | 69 +- .../components/plugins/readme-panel/store.ts | 12 +- .../auto-update-setting/config.ts | 1 + .../auto-update-setting/index.spec.tsx | 1792 +++++++ .../auto-update-setting/index.tsx | 77 +- .../no-data-placeholder.tsx | 16 +- .../no-plugin-selected.tsx | 8 +- .../auto-update-setting/plugins-picker.tsx | 46 +- .../auto-update-setting/plugins-selected.tsx | 15 +- .../auto-update-setting/strategy-picker.tsx | 48 +- .../auto-update-setting/tool-item.tsx | 24 +- .../auto-update-setting/tool-picker.tsx | 55 +- .../auto-update-setting/utils.ts | 5 +- .../reference-setting-modal/index.spec.tsx | 1042 ++++ .../{modal.tsx => index.tsx} | 51 +- .../plugins/reference-setting-modal/label.tsx | 8 +- web/app/components/plugins/types.ts | 46 +- .../update-plugin/downgrade-warning.tsx | 18 +- .../plugins/update-plugin/from-github.tsx | 2 +- .../update-plugin/from-market-place.tsx | 53 +- .../plugins/update-plugin/index.spec.tsx | 1237 +++++ .../plugins/update-plugin/index.tsx | 2 +- .../update-plugin/plugin-version-picker.tsx | 40 +- web/app/components/plugins/utils.ts | 7 +- .../components/chunk-card-list/chunk-card.tsx | 28 +- .../components/chunk-card-list/index.tsx | 7 +- .../components/chunk-card-list/q-a-item.tsx | 8 +- .../rag-pipeline/components/conversion.tsx | 59 +- .../rag-pipeline/components/panel/index.tsx | 4 +- .../input-field/editor/form/hidden-fields.tsx | 4 +- .../panel/input-field/editor/form/hooks.ts | 123 +- .../panel/input-field/editor/form/index.tsx | 34 +- .../editor/form/initial-fields.tsx | 3 +- .../panel/input-field/editor/form/schema.ts | 22 +- .../editor/form/show-all-settings.tsx | 22 +- .../panel/input-field/editor/index.tsx | 24 +- .../panel/input-field/editor/utils.ts | 2 +- .../input-field/field-list/field-item.tsx | 78 +- .../field-list/field-list-container.tsx | 12 +- .../panel/input-field/field-list/hooks.ts | 26 +- .../panel/input-field/field-list/index.tsx | 19 +- .../panel/input-field/field-list/types.ts | 4 +- .../panel/input-field/footer-tip.tsx | 8 +- .../components/panel/input-field/hooks.ts | 5 +- .../components/panel/input-field/index.tsx | 74 +- .../label-right-content/datasource.tsx | 12 +- .../label-right-content/global-inputs.tsx | 14 +- .../panel/input-field/preview/data-source.tsx | 16 +- .../panel/input-field/preview/form.tsx | 6 +- .../panel/input-field/preview/index.tsx | 28 +- .../input-field/preview/process-documents.tsx | 8 +- .../components/panel/test-run/header.tsx | 21 +- .../components/panel/test-run/index.tsx | 22 +- .../test-run/preparation/actions/index.tsx | 10 +- .../preparation/data-source-options/index.tsx | 4 +- .../data-source-options/option-card.tsx | 11 +- .../document-processing/actions.tsx | 16 +- .../preparation/document-processing/index.tsx | 9 +- .../document-processing/options.tsx | 14 +- .../test-run/preparation/footer-tips.tsx | 6 +- .../panel/test-run/preparation/hooks.ts | 12 +- .../panel/test-run/preparation/index.tsx | 42 +- .../test-run/preparation/step-indicator.tsx | 16 +- .../panel/test-run/result/index.tsx | 14 +- .../test-run/result/result-preview/index.tsx | 31 +- .../test-run/result/result-preview/utils.ts | 5 +- .../panel/test-run/result/tabs/index.tsx | 18 +- .../panel/test-run/result/tabs/tab.tsx | 7 +- .../publish-as-knowledge-pipeline-modal.tsx | 66 +- .../rag-pipeline/components/publish-toast.tsx | 30 +- .../components/rag-pipeline-children.tsx | 14 +- .../components/rag-pipeline-header/index.tsx | 4 +- .../input-field-button.tsx | 12 +- .../rag-pipeline-header/publisher/index.tsx | 14 +- .../rag-pipeline-header/publisher/popup.tsx | 196 +- .../rag-pipeline-header/run-mode.tsx | 43 +- .../components/rag-pipeline-main.tsx | 8 +- .../rag-pipeline/components/screenshot.tsx | 10 +- .../components/update-dsl-modal.tsx | 138 +- .../components/rag-pipeline/hooks/index.ts | 10 +- .../components/rag-pipeline/hooks/use-DSL.ts | 12 +- .../hooks/use-available-nodes-meta-data.ts | 14 +- .../rag-pipeline/hooks/use-configs-map.ts | 2 +- .../hooks/use-input-field-panel.ts | 2 +- .../rag-pipeline/hooks/use-input-fields.ts | 5 +- .../hooks/use-nodes-sync-draft.ts | 10 +- .../rag-pipeline/hooks/use-pipeline-config.ts | 8 +- .../rag-pipeline/hooks/use-pipeline-init.ts | 6 +- .../hooks/use-pipeline-refresh-draft.ts | 4 +- .../rag-pipeline/hooks/use-pipeline-run.ts | 19 +- .../hooks/use-pipeline-start-run.tsx | 2 +- .../hooks/use-pipeline-template.ts | 6 +- .../rag-pipeline/hooks/use-pipeline.tsx | 7 +- .../hooks/use-rag-pipeline-search.tsx | 99 +- web/app/components/rag-pipeline/index.tsx | 12 +- .../components/rag-pipeline/store/index.ts | 8 +- .../components/rag-pipeline/utils/nodes.ts | 12 +- web/app/components/sentry-initializer.tsx | 6 +- .../share/text-generation/index.tsx | 233 +- .../share/text-generation/info-modal.tsx | 25 +- .../share/text-generation/menu-dropdown.tsx | 49 +- .../text-generation/no-data/index.spec.tsx | 4 +- .../share/text-generation/no-data/index.tsx | 10 +- .../share/text-generation/result/content.tsx | 11 +- .../share/text-generation/result/header.tsx | 32 +- .../share/text-generation/result/index.tsx | 94 +- .../run-batch/csv-download/index.spec.tsx | 10 +- .../run-batch/csv-download/index.tsx | 32 +- .../run-batch/csv-reader/index.spec.tsx | 20 +- .../run-batch/csv-reader/index.tsx | 35 +- .../text-generation/run-batch/index.spec.tsx | 43 +- .../share/text-generation/run-batch/index.tsx | 17 +- .../run-batch/res-download/index.spec.tsx | 6 +- .../run-batch/res-download/index.tsx | 14 +- .../text-generation/run-once/index.spec.tsx | 242 + .../share/text-generation/run-once/index.tsx | 244 +- web/app/components/signin/countdown.tsx | 21 +- web/app/components/splash.tsx | 4 +- .../config-credentials.spec.tsx | 60 + .../config-credentials.tsx | 119 +- .../edit-custom-collection-modal/examples.ts | 2 +- .../get-schema.spec.tsx | 55 + .../get-schema.tsx | 57 +- .../index.spec.tsx | 154 + .../edit-custom-collection-modal/index.tsx | 211 +- .../test-api.spec.tsx | 87 + .../edit-custom-collection-modal/test-api.tsx | 82 +- web/app/components/tools/labels/filter.tsx | 72 +- web/app/components/tools/labels/selector.tsx | 51 +- web/app/components/tools/labels/store.ts | 2 +- web/app/components/tools/marketplace/hooks.ts | 4 +- .../tools/marketplace/index.spec.tsx | 368 ++ .../components/tools/marketplace/index.tsx | 50 +- web/app/components/tools/mcp/create-card.tsx | 34 +- .../components/tools/mcp/detail/content.tsx | 135 +- .../tools/mcp/detail/list-loading.tsx | 44 +- .../tools/mcp/detail/operation-dropdown.tsx | 25 +- .../tools/mcp/detail/provider-detail.tsx | 8 +- .../components/tools/mcp/detail/tool-item.tsx | 45 +- .../components/tools/mcp/headers-input.tsx | 77 +- web/app/components/tools/mcp/index.tsx | 13 +- .../components/tools/mcp/mcp-server-modal.tsx | 59 +- .../tools/mcp/mcp-server-param-item.tsx | 21 +- .../components/tools/mcp/mcp-service-card.tsx | 138 +- web/app/components/tools/mcp/modal.tsx | 186 +- .../components/tools/mcp/provider-card.tsx | 58 +- web/app/components/tools/provider-list.tsx | 66 +- .../tools/provider/custom-create-card.tsx | 41 +- web/app/components/tools/provider/detail.tsx | 188 +- web/app/components/tools/provider/empty.tsx | 25 +- .../components/tools/provider/tool-item.tsx | 13 +- .../setting/build-in/config-credentials.tsx | 120 +- .../components/tools/utils/to-form-schema.ts | 12 +- .../tools/workflow-tool/configure-button.tsx | 118 +- .../confirm-modal/index.spec.tsx | 20 +- .../workflow-tool/confirm-modal/index.tsx | 30 +- .../components/tools/workflow-tool/index.tsx | 239 +- .../tools/workflow-tool/method-selector.tsx | 51 +- .../tools/workflow-tool/utils.test.ts | 47 + .../components/tools/workflow-tool/utils.ts | 28 + .../components/workflow-children.tsx | 27 +- .../chat-variable-trigger.spec.tsx | 72 + .../workflow-header/features-trigger.spec.tsx | 479 ++ .../workflow-header/features-trigger.tsx | 64 +- .../components/workflow-header/index.spec.tsx | 164 + .../components/workflow-header/index.tsx | 12 +- .../workflow-app/components/workflow-main.tsx | 6 +- .../workflow-onboarding-modal/index.spec.tsx | 38 +- .../workflow-onboarding-modal/index.tsx | 24 +- .../start-node-option.spec.tsx | 6 +- .../start-node-option.tsx | 7 +- .../start-node-selection-panel.spec.tsx | 53 +- .../start-node-selection-panel.tsx | 25 +- .../components/workflow-panel.tsx | 8 +- .../components/workflow-app/hooks/index.ts | 20 +- .../components/workflow-app/hooks/use-DSL.ts | 12 +- .../hooks/use-available-nodes-meta-data.ts | 32 +- .../workflow-app/hooks/use-configs-map.ts | 2 +- .../hooks/use-nodes-sync-draft.ts | 10 +- .../workflow-app/hooks/use-workflow-init.ts | 14 +- .../hooks/use-workflow-refresh-draft.ts | 4 +- .../workflow-app/hooks/use-workflow-run.ts | 44 +- .../hooks/use-workflow-start-run.tsx | 6 +- .../hooks/use-workflow-template.ts | 16 +- web/app/components/workflow-app/index.tsx | 46 +- .../__tests__/trigger-status-sync.test.tsx | 37 +- web/app/components/workflow/block-icon.tsx | 48 +- .../block-selector/all-start-blocks.tsx | 84 +- .../workflow/block-selector/all-tools.tsx | 91 +- .../workflow/block-selector/blocks.tsx | 51 +- .../workflow/block-selector/constants.tsx | 12 +- .../workflow/block-selector/data-sources.tsx | 30 +- .../block-selector/featured-tools.tsx | 102 +- .../block-selector/featured-triggers.tsx | 99 +- .../workflow/block-selector/hooks.ts | 23 +- .../workflow/block-selector/index-bar.tsx | 8 +- .../workflow/block-selector/index.tsx | 4 +- .../workflow/block-selector/main.tsx | 74 +- .../market-place-plugin/action.tsx | 26 +- .../market-place-plugin/item.tsx | 32 +- .../market-place-plugin/list.tsx | 42 +- .../rag-tool-recommendations/index.tsx | 49 +- .../rag-tool-recommendations/list.tsx | 56 +- .../uninstalled-item.tsx | 26 +- .../workflow/block-selector/start-blocks.tsx | 59 +- .../workflow/block-selector/tabs.tsx | 36 +- .../workflow/block-selector/tool-picker.tsx | 49 +- .../block-selector/tool/action-item.tsx | 36 +- .../tool/tool-list-flat-view/list.tsx | 13 +- .../tool/tool-list-tree-view/item.tsx | 11 +- .../tool/tool-list-tree-view/list.tsx | 17 +- .../workflow/block-selector/tool/tool.tsx | 62 +- .../workflow/block-selector/tools.tsx | 69 +- .../trigger-plugin/action-item.tsx | 32 +- .../block-selector/trigger-plugin/item.tsx | 33 +- .../block-selector/trigger-plugin/list.tsx | 6 +- .../workflow/block-selector/types.ts | 69 +- .../use-check-vertical-scrollbar.ts | 3 +- .../block-selector/use-sticky-scroll.ts | 2 +- .../workflow/block-selector/utils.ts | 2 +- .../block-selector/view-type-select.tsx | 27 +- .../workflow/candidate-node-main.tsx | 18 +- .../components/workflow/candidate-node.tsx | 2 +- web/app/components/workflow/constants.ts | 65 +- web/app/components/workflow/constants/node.ts | 34 +- web/app/components/workflow/context.tsx | 4 +- .../workflow/custom-connection-line.tsx | 8 +- .../custom-edge-linear-gradient-render.tsx | 6 +- web/app/components/workflow/custom-edge.tsx | 27 +- .../datasets-detail-store/provider.tsx | 14 +- .../workflow/datasets-detail-store/store.ts | 4 +- .../workflow/dsl-export-confirm-modal.tsx | 55 +- web/app/components/workflow/features.tsx | 11 +- .../workflow/header/chat-variable-button.tsx | 10 +- .../components/workflow/header/checklist.tsx | 99 +- .../workflow/header/editing-title.tsx | 16 +- .../components/workflow/header/env-button.tsx | 12 +- .../header/global-variable-button.tsx | 12 +- .../workflow/header/header-in-normal.tsx | 30 +- .../workflow/header/header-in-restoring.tsx | 45 +- .../header/header-in-view-history.tsx | 24 +- web/app/components/workflow/header/index.tsx | 14 +- .../workflow/header/restoring-title.tsx | 20 +- .../workflow/header/run-and-history.tsx | 18 +- .../components/workflow/header/run-mode.tsx | 90 +- .../workflow/header/running-title.tsx | 12 +- .../header/scroll-to-selected-node-button.tsx | 13 +- .../workflow/header/test-run-menu.tsx | 25 +- .../components/workflow/header/undo-redo.tsx | 57 +- .../header/version-history-button.tsx | 52 +- .../workflow/header/view-history.tsx | 99 +- .../workflow/header/view-workflow-history.tsx | 240 +- .../components/workflow/help-line/index.tsx | 10 +- .../workflow/hooks-store/provider.tsx | 2 +- .../components/workflow/hooks-store/store.ts | 34 +- web/app/components/workflow/hooks/index.ts | 34 +- .../hooks/use-auto-generate-webhook-url.ts | 2 +- .../workflow/hooks/use-available-blocks.ts | 3 +- .../workflow/hooks/use-checklist.ts | 114 +- .../workflow/hooks/use-config-vision.ts | 6 +- .../hooks/use-dynamic-test-run-options.tsx | 63 +- .../use-edges-interactions-without-sync.ts | 2 +- .../workflow/hooks/use-edges-interactions.ts | 12 +- .../hooks/use-fetch-workflow-inspect-vars.ts | 27 +- .../components/workflow/hooks/use-helpline.ts | 4 +- .../hooks/use-inspect-vars-crud-common.ts | 21 +- .../workflow/hooks/use-inspect-vars-crud.ts | 10 +- .../workflow/hooks/use-node-data-update.ts | 6 +- .../hooks/use-node-plugin-installation.ts | 11 +- .../hooks/use-nodes-available-var-list.ts | 4 +- .../use-nodes-interactions-without-sync.ts | 4 +- .../workflow/hooks/use-nodes-interactions.ts | 308 +- .../workflow/hooks/use-nodes-layout.ts | 16 +- .../workflow/hooks/use-nodes-meta-data.ts | 8 +- .../workflow/hooks/use-nodes-sync-draft.ts | 2 +- .../hooks/use-selection-interactions.ts | 10 +- .../workflow/hooks/use-shortcuts.ts | 21 +- .../workflow/hooks/use-tool-icon.ts | 18 +- .../workflow/hooks/use-workflow-history.ts | 35 +- .../hooks/use-workflow-interactions.ts | 33 +- .../hooks/use-workflow-run-event/index.ts | 16 +- .../use-workflow-agent-log.ts | 4 +- .../use-workflow-failed.ts | 2 +- .../use-workflow-finished.ts | 6 +- .../use-workflow-node-finished.ts | 8 +- .../use-workflow-node-iteration-finished.ts | 6 +- .../use-workflow-node-iteration-next.ts | 4 +- .../use-workflow-node-iteration-started.ts | 12 +- .../use-workflow-node-loop-finished.ts | 4 +- .../use-workflow-node-loop-next.ts | 4 +- .../use-workflow-node-loop-started.ts | 8 +- .../use-workflow-node-retry.ts | 6 +- .../use-workflow-node-started.ts | 10 +- .../use-workflow-started.ts | 6 +- .../use-workflow-text-chunk.ts | 4 +- .../use-workflow-text-replace.ts | 4 +- .../workflow/hooks/use-workflow-search.tsx | 122 +- .../workflow/hooks/use-workflow-variables.ts | 18 +- .../components/workflow/hooks/use-workflow.ts | 48 +- web/app/components/workflow/index.tsx | 141 +- .../components/workflow/node-contextmenu.tsx | 10 +- .../nodes/_base/components/add-button.tsx | 10 +- .../add-variable-popup-with-position.tsx | 16 +- .../_base/components/add-variable-popup.tsx | 14 +- .../components/agent-strategy-selector.tsx | 293 +- .../nodes/_base/components/agent-strategy.tsx | 269 +- .../components/before-run-form/bool-input.tsx | 13 +- .../components/before-run-form/form-item.tsx | 162 +- .../_base/components/before-run-form/form.tsx | 15 +- .../components/before-run-form/index.tsx | 52 +- .../components/before-run-form/panel-wrap.tsx | 29 +- .../components/code-generator-button.tsx | 20 +- .../components/collapse/field-collapse.tsx | 6 +- .../nodes/_base/components/collapse/index.tsx | 8 +- .../nodes/_base/components/config-vision.tsx | 61 +- .../nodes/_base/components/editor/base.tsx | 55 +- .../code-editor/editor-support-vars.tsx | 17 +- .../components/editor/code-editor/index.tsx | 76 +- .../_base/components/editor/text-editor.tsx | 5 +- .../nodes/_base/components/editor/wrap.tsx | 2 +- .../_base/components/entry-node-container.tsx | 4 +- .../components/error-handle/default-value.tsx | 28 +- .../error-handle/error-handle-on-node.tsx | 26 +- .../error-handle/error-handle-on-panel.tsx | 37 +- .../error-handle/error-handle-tip.tsx | 17 +- .../error-handle-type-selector.tsx | 45 +- .../error-handle/fail-branch-card.tsx | 22 +- .../_base/components/error-handle/hooks.ts | 12 +- .../_base/components/error-handle/utils.ts | 2 +- .../workflow/nodes/_base/components/field.tsx | 21 +- .../nodes/_base/components/file-type-item.tsx | 57 +- .../_base/components/file-upload-setting.tsx | 61 +- .../_base/components/form-input-boolean.tsx | 12 +- .../_base/components/form-input-item.tsx | 140 +- .../components/form-input-type-switch.tsx | 14 +- .../workflow/nodes/_base/components/group.tsx | 22 +- .../nodes/_base/components/help-link.tsx | 14 +- .../nodes/_base/components/info-panel.tsx | 8 +- .../_base/components/input-field/add.tsx | 2 +- .../components/input-number-with-slider.tsx | 11 +- .../components/input-support-select-var.tsx | 27 +- .../_base/components/input-var-type-icon.tsx | 2 +- .../components/install-plugin-button.tsx | 34 +- .../components/layout/box-group-field.tsx | 2 +- .../_base/components/layout/box-group.tsx | 8 +- .../nodes/_base/components/layout/box.tsx | 5 +- .../_base/components/layout/field-title.tsx | 8 +- .../nodes/_base/components/layout/field.tsx | 2 +- .../_base/components/layout/group-field.tsx | 2 +- .../nodes/_base/components/layout/group.tsx | 5 +- .../nodes/_base/components/layout/index.tsx | 8 +- .../components/list-no-data-placeholder.tsx | 4 +- .../mcp-tool-not-support-tooltip.tsx | 16 +- .../nodes/_base/components/memory-config.tsx | 66 +- .../mixed-variable-text-input/index.tsx | 16 +- .../mixed-variable-text-input/placeholder.tsx | 23 +- .../nodes/_base/components/next-step/add.tsx | 32 +- .../_base/components/next-step/container.tsx | 11 +- .../_base/components/next-step/index.tsx | 30 +- .../nodes/_base/components/next-step/item.tsx | 24 +- .../nodes/_base/components/next-step/line.tsx | 16 +- .../_base/components/next-step/operator.tsx | 44 +- .../nodes/_base/components/node-control.tsx | 39 +- .../nodes/_base/components/node-handle.tsx | 36 +- .../nodes/_base/components/node-resizer.tsx | 15 +- .../_base/components/node-status-icon.tsx | 2 +- .../nodes/_base/components/option-card.tsx | 27 +- .../nodes/_base/components/output-vars.tsx | 20 +- .../panel-operator/change-block.tsx | 18 +- .../_base/components/panel-operator/index.tsx | 14 +- .../panel-operator/panel-operator-popup.tsx | 77 +- .../nodes/_base/components/prompt/editor.tsx | 248 +- .../readonly-input-with-select-var.tsx | 35 +- .../nodes/_base/components/remove-button.tsx | 6 +- .../components/remove-effect-var-confirm.tsx | 8 +- .../nodes/_base/components/retry/hooks.ts | 4 +- .../_base/components/retry/retry-on-node.tsx | 33 +- .../_base/components/retry/retry-on-panel.tsx | 54 +- .../nodes/_base/components/selector.tsx | 42 +- .../nodes/_base/components/setting-item.tsx | 27 +- .../workflow/nodes/_base/components/split.tsx | 4 +- .../components/support-var-input/index.tsx | 26 +- .../components/switch-plugin-version.tsx | 157 +- .../components/title-description-input.tsx | 6 +- .../_base/components/toggle-expand-btn.tsx | 5 +- .../nodes/_base/components/variable-tag.tsx | 12 +- .../variable/assigned-var-reference-popup.tsx | 37 +- .../components/variable/constant-field.tsx | 15 +- .../variable/manage-input-field.tsx | 20 +- .../components/variable/match-schema-type.ts | 14 +- .../object-child-tree-panel/picker/field.tsx | 32 +- .../object-child-tree-panel/picker/index.tsx | 21 +- .../object-child-tree-panel/show/field.tsx | 32 +- .../object-child-tree-panel/show/index.tsx | 8 +- .../tree-indent-line.tsx | 8 +- .../components/variable/output-var-list.tsx | 35 +- .../variable/use-match-schema-type.ts | 5 +- .../nodes/_base/components/variable/utils.ts | 318 +- .../variable/var-full-path-panel.tsx | 18 +- .../_base/components/variable/var-list.tsx | 60 +- .../variable/var-reference-picker.tsx | 400 +- .../variable/var-reference-popup.tsx | 96 +- .../variable/var-reference-vars.tsx | 190 +- .../components/variable/var-type-picker.tsx | 28 +- .../variable-label/base/variable-icon.tsx | 6 +- .../variable-label/base/variable-label.tsx | 22 +- .../variable-label/base/variable-name.tsx | 2 +- .../base/variable-node-label.tsx | 8 +- .../variable/variable-label/hooks.ts | 8 +- .../variable/variable-label/index.tsx | 4 +- .../variable-icon-with-color.tsx | 6 +- .../variable-label-in-editor.tsx | 4 +- .../variable-label/variable-label-in-node.tsx | 4 +- .../variable-label-in-select.tsx | 2 +- .../variable-label/variable-label-in-text.tsx | 4 +- .../components/workflow-panel/index.spec.tsx | 34 +- .../_base/components/workflow-panel/index.tsx | 151 +- .../workflow-panel/last-run/index.tsx | 28 +- .../workflow-panel/last-run/no-data.tsx | 20 +- .../workflow-panel/last-run/use-last-run.ts | 58 +- .../_base/components/workflow-panel/tab.tsx | 12 +- .../workflow-panel/trigger-subscription.tsx | 24 +- .../_base/hooks/use-available-var-list.ts | 6 +- .../nodes/_base/hooks/use-node-crud.ts | 3 +- .../nodes/_base/hooks/use-node-help-link.ts | 2 +- .../nodes/_base/hooks/use-one-step-run.ts | 79 +- .../nodes/_base/hooks/use-output-var-list.ts | 22 +- .../nodes/_base/hooks/use-toggle-expend.ts | 7 +- .../nodes/_base/hooks/use-var-list.ts | 4 +- .../components/workflow/nodes/_base/node.tsx | 130 +- .../nodes/agent/components/model-bar.tsx | 78 +- .../nodes/agent/components/tool-icon.tsx | 116 +- .../workflow/nodes/agent/default.ts | 33 +- .../components/workflow/nodes/agent/node.tsx | 118 +- .../components/workflow/nodes/agent/panel.tsx | 185 +- .../components/workflow/nodes/agent/types.ts | 2 +- .../workflow/nodes/agent/use-config.ts | 24 +- .../nodes/agent/use-single-run-form-params.ts | 16 +- .../workflow/nodes/answer/default.ts | 4 +- .../components/workflow/nodes/answer/node.tsx | 24 +- .../workflow/nodes/answer/panel.tsx | 15 +- .../workflow/nodes/answer/use-config.ts | 10 +- .../components/operation-selector.tsx | 84 +- .../assigner/components/var-list/index.tsx | 82 +- .../components/var-list/use-var-list.ts | 4 +- .../workflow/nodes/assigner/default.ts | 16 +- .../workflow/nodes/assigner/hooks.ts | 10 +- .../workflow/nodes/assigner/node.tsx | 33 +- .../workflow/nodes/assigner/panel.tsx | 26 +- .../workflow/nodes/assigner/use-config.ts | 20 +- .../assigner/use-single-run-form-params.ts | 12 +- .../workflow/nodes/assigner/utils.ts | 12 +- .../workflow/nodes/code/code-parser.spec.ts | 42 +- .../workflow/nodes/code/code-parser.ts | 6 +- .../components/workflow/nodes/code/default.ts | 15 +- .../workflow/nodes/code/dependency-picker.tsx | 45 +- .../components/workflow/nodes/code/node.tsx | 2 +- .../components/workflow/nodes/code/panel.tsx | 59 +- .../components/workflow/nodes/code/types.ts | 2 +- .../workflow/nodes/code/use-config.ts | 20 +- .../nodes/code/use-single-run-form-params.ts | 6 +- .../components/workflow/nodes/components.ts | 76 +- .../components/workflow/nodes/constants.ts | 10 +- .../nodes/data-source-empty/default.ts | 2 +- .../workflow/nodes/data-source-empty/hooks.ts | 9 +- .../nodes/data-source-empty/index.tsx | 27 +- .../nodes/data-source/before-run-form.tsx | 23 +- .../workflow/nodes/data-source/default.ts | 32 +- .../data-source/hooks/use-before-run-form.ts | 21 +- .../nodes/data-source/hooks/use-config.ts | 14 +- .../workflow/nodes/data-source/node.tsx | 18 +- .../workflow/nodes/data-source/panel.tsx | 38 +- .../workflow/nodes/data-source/types.ts | 5 +- .../workflow/nodes/data-source/utils.ts | 2 +- .../nodes/document-extractor/default.ts | 9 +- .../nodes/document-extractor/node.tsx | 15 +- .../nodes/document-extractor/panel.tsx | 37 +- .../nodes/document-extractor/use-config.ts | 10 +- .../use-single-run-form-params.ts | 12 +- .../components/workflow/nodes/end/default.ts | 6 +- .../components/workflow/nodes/end/node.tsx | 6 +- .../components/workflow/nodes/end/panel.tsx | 20 +- .../workflow/nodes/end/use-config.ts | 5 +- .../nodes/http/components/api-input.tsx | 37 +- .../http/components/authorization/index.tsx | 65 +- .../components/authorization/radio-group.tsx | 7 +- .../nodes/http/components/curl-panel.tsx | 29 +- .../nodes/http/components/edit-body/index.tsx | 47 +- .../components/key-value/bulk-edit/index.tsx | 23 +- .../nodes/http/components/key-value/index.tsx | 22 +- .../key-value/key-value-edit/index.tsx | 19 +- .../key-value/key-value-edit/input-item.tsx | 70 +- .../key-value/key-value-edit/item.tsx | 106 +- .../nodes/http/components/timeout/index.tsx | 30 +- .../components/workflow/nodes/http/default.ts | 17 +- .../nodes/http/hooks/use-key-value-list.ts | 6 +- .../components/workflow/nodes/http/node.tsx | 15 +- .../components/workflow/nodes/http/panel.tsx | 97 +- .../workflow/nodes/http/use-config.ts | 21 +- .../nodes/http/use-single-run-form-params.ts | 6 +- .../components/workflow/nodes/http/utils.ts | 3 +- .../if-else/components/condition-add.tsx | 26 +- .../components/condition-files-list-value.tsx | 64 +- .../condition-list/condition-input.tsx | 12 +- .../condition-list/condition-item.tsx | 154 +- .../condition-list/condition-operator.tsx | 31 +- .../condition-list/condition-var-selector.tsx | 8 +- .../components/condition-list/index.tsx | 36 +- .../components/condition-number-input.tsx | 67 +- .../if-else/components/condition-value.tsx | 44 +- .../if-else/components/condition-wrap.tsx | 111 +- .../workflow/nodes/if-else/default.ts | 27 +- .../workflow/nodes/if-else/node.tsx | 77 +- .../workflow/nodes/if-else/panel.tsx | 34 +- .../workflow/nodes/if-else/use-config.ts | 24 +- .../if-else/use-is-var-file-attribute.ts | 6 +- .../if-else/use-single-run-form-params.ts | 10 +- .../workflow/nodes/if-else/utils.ts | 26 +- web/app/components/workflow/nodes/index.tsx | 8 +- .../workflow/nodes/iteration-start/default.ts | 2 +- .../workflow/nodes/iteration-start/index.tsx | 24 +- .../workflow/nodes/iteration/add-block.tsx | 41 +- .../workflow/nodes/iteration/default.ts | 17 +- .../workflow/nodes/iteration/node.tsx | 25 +- .../workflow/nodes/iteration/panel.tsx | 87 +- .../workflow/nodes/iteration/use-config.ts | 28 +- .../nodes/iteration/use-interactions.ts | 22 +- .../iteration/use-single-run-form-params.ts | 22 +- .../components/chunk-structure/hooks.tsx | 17 +- .../components/chunk-structure/index.tsx | 26 +- .../chunk-structure/instruction/index.tsx | 38 +- .../chunk-structure/instruction/line.tsx | 26 +- .../components/chunk-structure/selector.tsx | 27 +- .../components/embedding-model.tsx | 10 +- .../components/index-method.tsx | 57 +- .../knowledge-base/components/option-card.tsx | 29 +- .../components/retrieval-setting/hooks.tsx | 48 +- .../components/retrieval-setting/index.tsx | 29 +- .../reranking-model-selector.tsx | 8 +- .../search-method-option.tsx | 54 +- .../top-k-and-score-threshold.tsx | 34 +- .../components/retrieval-setting/type.ts | 2 +- .../workflow/nodes/knowledge-base/default.ts | 20 +- .../nodes/knowledge-base/hooks/use-config.ts | 26 +- .../hooks/use-settings-display.ts | 12 +- .../workflow/nodes/knowledge-base/node.tsx | 22 +- .../workflow/nodes/knowledge-base/panel.tsx | 51 +- .../workflow/nodes/knowledge-base/types.ts | 12 +- .../use-single-run-form-params.ts | 14 +- .../components/add-dataset.tsx | 9 +- .../components/dataset-item.tsx | 72 +- .../components/dataset-list.tsx | 42 +- .../components/metadata/add-condition.tsx | 44 +- .../condition-common-variable-selector.tsx | 40 +- .../condition-list/condition-date.tsx | 16 +- .../condition-list/condition-item.tsx | 81 +- .../condition-list/condition-number.tsx | 24 +- .../condition-list/condition-operator.tsx | 40 +- .../condition-list/condition-string.tsx | 20 +- .../condition-list/condition-value-method.tsx | 22 +- .../condition-variable-selector.tsx | 28 +- .../metadata/condition-list/index.tsx | 17 +- .../metadata/condition-list/utils.ts | 24 +- .../metadata/metadata-filter/index.tsx | 38 +- .../metadata-filter-selector.tsx | 42 +- .../components/metadata/metadata-icon.tsx | 4 +- .../components/metadata/metadata-panel.tsx | 22 +- .../components/metadata/metadata-trigger.tsx | 30 +- .../components/retrieval-config.tsx | 60 +- .../nodes/knowledge-retrieval/default.ts | 15 +- .../nodes/knowledge-retrieval/hooks.ts | 4 +- .../nodes/knowledge-retrieval/node.tsx | 19 +- .../nodes/knowledge-retrieval/panel.tsx | 58 +- .../nodes/knowledge-retrieval/types.ts | 9 +- .../nodes/knowledge-retrieval/use-config.ts | 66 +- .../use-single-run-form-params.ts | 18 +- .../nodes/knowledge-retrieval/utils.ts | 12 +- .../components/extract-input.tsx | 21 +- .../components/filter-condition.tsx | 57 +- .../list-operator/components/limit-config.tsx | 37 +- .../components/sub-variable-picker.tsx | 53 +- .../workflow/nodes/list-operator/default.ts | 20 +- .../workflow/nodes/list-operator/node.tsx | 15 +- .../workflow/nodes/list-operator/panel.tsx | 149 +- .../nodes/list-operator/use-config.ts | 14 +- .../llm/components/config-prompt-item.tsx | 49 +- .../nodes/llm/components/config-prompt.tsx | 206 +- .../json-schema-config-modal/code-editor.tsx | 45 +- .../error-message.tsx | 13 +- .../json-schema-config-modal/index.tsx | 7 +- .../json-importer.tsx | 50 +- .../json-schema-config.tsx | 84 +- .../json-schema-generator/assets/index.tsx | 4 +- .../generated-result.tsx | 58 +- .../json-schema-generator/index.tsx | 40 +- .../json-schema-generator/prompt-editor.tsx | 63 +- .../schema-editor.tsx | 11 +- .../visual-editor/add-field.tsx | 19 +- .../visual-editor/card.tsx | 17 +- .../visual-editor/context.tsx | 4 +- .../visual-editor/edit-card/actions.tsx | 30 +- .../edit-card/advanced-actions.tsx | 29 +- .../edit-card/advanced-options.tsx | 28 +- .../edit-card/auto-width-input.tsx | 9 +- .../visual-editor/edit-card/index.tsx | 95 +- .../edit-card/required-switch.tsx | 10 +- .../visual-editor/edit-card/type-selector.tsx | 42 +- .../visual-editor/hooks.ts | 30 +- .../visual-editor/index.tsx | 4 +- .../visual-editor/schema-node.tsx | 101 +- .../visual-editor/store.ts | 2 +- .../llm/components/prompt-generator-btn.tsx | 24 +- .../components/reasoning-format-config.tsx | 14 +- .../llm/components/resolution-picker.tsx | 15 +- .../nodes/llm/components/structure-output.tsx | 51 +- .../nodes/llm/components/tools-config.tsx | 58 - .../workflow/nodes/llm/constants.ts | 41 - .../components/workflow/nodes/llm/default.ts | 31 +- .../components/workflow/nodes/llm/node.tsx | 45 +- .../components/workflow/nodes/llm/panel.tsx | 183 +- .../components/workflow/nodes/llm/types.ts | 2 - .../workflow/nodes/llm/use-config.ts | 132 +- .../nodes/llm/use-single-run-form-params.ts | 36 +- .../components/workflow/nodes/llm/utils.ts | 32 +- .../workflow/nodes/loop-end/default.ts | 4 +- .../workflow/nodes/loop-start/default.ts | 2 +- .../workflow/nodes/loop-start/index.tsx | 24 +- .../workflow/nodes/loop/add-block.tsx | 43 +- .../nodes/loop/components/condition-add.tsx | 26 +- .../components/condition-files-list-value.tsx | 52 +- .../condition-list/condition-input.tsx | 12 +- .../condition-list/condition-item.tsx | 152 +- .../condition-list/condition-operator.tsx | 31 +- .../condition-list/condition-var-selector.tsx | 8 +- .../loop/components/condition-list/index.tsx | 36 +- .../components/condition-number-input.tsx | 67 +- .../nodes/loop/components/condition-value.tsx | 32 +- .../nodes/loop/components/condition-wrap.tsx | 72 +- .../loop/components/loop-variables/empty.tsx | 4 +- .../components/loop-variables/form-item.tsx | 42 +- .../loop/components/loop-variables/index.tsx | 4 +- .../loop-variables/input-mode-selec.tsx | 2 +- .../loop/components/loop-variables/item.tsx | 36 +- .../components/workflow/nodes/loop/default.ts | 43 +- .../workflow/nodes/loop/insert-block.tsx | 12 +- .../components/workflow/nodes/loop/node.tsx | 15 +- .../components/workflow/nodes/loop/panel.tsx | 48 +- .../workflow/nodes/loop/use-config.ts | 34 +- .../workflow/nodes/loop/use-interactions.ts | 16 +- .../nodes/loop/use-is-var-file-attribute.ts | 2 +- .../nodes/loop/use-single-run-form-params.ts | 12 +- .../components/workflow/nodes/loop/utils.ts | 24 +- .../extract-parameter/import-from-tool.tsx | 31 +- .../components/extract-parameter/item.tsx | 35 +- .../components/extract-parameter/list.tsx | 17 +- .../components/extract-parameter/update.tsx | 80 +- .../components/reasoning-mode-picker.tsx | 17 +- .../nodes/parameter-extractor/default.ts | 24 +- .../nodes/parameter-extractor/node.tsx | 8 +- .../nodes/parameter-extractor/panel.tsx | 150 +- .../nodes/parameter-extractor/use-config.ts | 28 +- .../use-single-run-form-params.ts | 28 +- .../components/advanced-setting.tsx | 31 +- .../components/class-item.tsx | 15 +- .../components/class-list.tsx | 48 +- .../nodes/question-classifier/default.ts | 16 +- .../nodes/question-classifier/node.tsx | 57 +- .../nodes/question-classifier/panel.tsx | 48 +- .../nodes/question-classifier/use-config.ts | 29 +- .../use-single-run-form-params.ts | 28 +- .../nodes/start/components/var-item.tsx | 88 +- .../nodes/start/components/var-list.tsx | 59 +- .../workflow/nodes/start/default.ts | 2 +- .../components/workflow/nodes/start/node.tsx | 29 +- .../components/workflow/nodes/start/panel.tsx | 50 +- .../workflow/nodes/start/use-config.ts | 42 +- .../nodes/start/use-single-run-form-params.ts | 14 +- .../nodes/template-transform/default.ts | 13 +- .../nodes/template-transform/node.tsx | 2 +- .../nodes/template-transform/panel.tsx | 53 +- .../nodes/template-transform/use-config.ts | 12 +- .../use-single-run-form-params.ts | 6 +- .../__tests__/output-schema-utils.test.ts | 2 +- .../nodes/tool/components/copy-id.tsx | 25 +- .../nodes/tool/components/input-var-list.tsx | 45 +- .../mixed-variable-text-input/index.tsx | 18 +- .../mixed-variable-text-input/placeholder.tsx | 23 +- .../nodes/tool/components/tool-form/index.tsx | 8 +- .../nodes/tool/components/tool-form/item.tsx | 46 +- .../components/workflow/nodes/tool/default.ts | 38 +- .../components/workflow/nodes/tool/node.tsx | 33 +- .../nodes/tool/output-schema-utils.ts | 7 +- .../components/workflow/nodes/tool/panel.tsx | 48 +- .../components/workflow/nodes/tool/types.ts | 2 +- .../workflow/nodes/tool/use-config.ts | 35 +- .../nodes/tool/use-get-data-for-check-more.ts | 2 +- .../nodes/tool/use-single-run-form-params.ts | 18 +- .../components/trigger-form/index.tsx | 6 +- .../components/trigger-form/item.tsx | 48 +- .../workflow/nodes/trigger-plugin/default.ts | 31 +- .../hooks/use-trigger-auth-flow.ts | 44 +- .../workflow/nodes/trigger-plugin/node.tsx | 17 +- .../workflow/nodes/trigger-plugin/panel.tsx | 46 +- .../workflow/nodes/trigger-plugin/types.ts | 4 +- .../nodes/trigger-plugin/use-check-params.ts | 6 +- .../nodes/trigger-plugin/use-config.ts | 26 +- .../trigger-plugin/utils/form-helpers.ts | 4 +- .../components/frequency-selector.tsx | 17 +- .../components/mode-switcher.tsx | 12 +- .../components/mode-toggle.tsx | 8 +- .../components/monthly-days-selector.tsx | 34 +- .../components/next-execution-times.tsx | 6 +- .../components/on-minute-selector.tsx | 4 +- .../components/weekday-selector.tsx | 4 +- .../nodes/trigger-schedule/default.ts | 53 +- .../workflow/nodes/trigger-schedule/node.tsx | 8 +- .../workflow/nodes/trigger-schedule/panel.tsx | 94 +- .../nodes/trigger-schedule/use-config.ts | 4 +- .../utils/execution-time-calculator.ts | 32 +- .../utils/integration.spec.ts | 21 +- .../components/generic-table.tsx | 35 +- .../components/header-table.tsx | 14 +- .../components/paragraph-input.tsx | 5 +- .../components/parameter-table.tsx | 27 +- .../workflow/nodes/trigger-webhook/default.ts | 9 +- .../workflow/nodes/trigger-webhook/node.tsx | 2 +- .../workflow/nodes/trigger-webhook/panel.tsx | 61 +- .../workflow/nodes/trigger-webhook/types.ts | 2 +- .../nodes/trigger-webhook/use-config.ts | 37 +- .../trigger-webhook/utils/raw-variable.ts | 3 +- .../utils/render-output-vars.tsx | 17 +- .../components/add-variable/index.tsx | 35 +- .../components/node-group-item.tsx | 42 +- .../components/node-variable-item.tsx | 46 +- .../components/var-group-item.tsx | 136 +- .../components/var-list/index.tsx | 23 +- .../components/var-list/use-var-list.ts | 4 +- .../nodes/variable-assigner/default.ts | 21 +- .../workflow/nodes/variable-assigner/hooks.ts | 26 +- .../workflow/nodes/variable-assigner/node.tsx | 12 +- .../nodes/variable-assigner/panel.tsx | 111 +- .../nodes/variable-assigner/use-config.ts | 18 +- .../use-single-run-form-params.ts | 12 +- .../workflow/note-node/constants.ts | 2 +- .../components/workflow/note-node/hooks.ts | 4 +- .../components/workflow/note-node/index.tsx | 31 +- .../note-node/note-editor/context.tsx | 12 +- .../workflow/note-node/note-editor/editor.tsx | 30 +- .../plugins/format-detector-plugin/hooks.ts | 20 +- .../plugins/link-editor-plugin/component.tsx | 60 +- .../plugins/link-editor-plugin/hooks.ts | 26 +- .../plugins/link-editor-plugin/index.tsx | 2 +- .../note-editor/toolbar/color-picker.tsx | 24 +- .../note-node/note-editor/toolbar/command.tsx | 24 +- .../note-node/note-editor/toolbar/divider.tsx | 2 +- .../toolbar/font-size-selector.tsx | 29 +- .../note-node/note-editor/toolbar/hooks.ts | 30 +- .../note-node/note-editor/toolbar/index.tsx | 24 +- .../note-editor/toolbar/operator.tsx | 40 +- .../workflow/note-node/note-editor/utils.ts | 4 +- .../workflow/operator/add-block.tsx | 41 +- .../components/workflow/operator/control.tsx | 46 +- web/app/components/workflow/operator/hooks.ts | 8 +- .../components/workflow/operator/index.tsx | 22 +- .../workflow/operator/more-actions.tsx | 74 +- .../workflow/operator/tip-popup.tsx | 12 +- .../workflow/operator/zoom-in-out.tsx | 61 +- .../components/workflow/panel-contextmenu.tsx | 42 +- .../workflow/panel/chat-record/index.tsx | 42 +- .../workflow/panel/chat-record/user-input.tsx | 10 +- .../components/array-bool-list.tsx | 19 +- .../components/array-value-list.tsx | 21 +- .../components/bool-value.tsx | 15 +- .../components/object-value-item.tsx | 27 +- .../components/object-value-list.tsx | 12 +- .../components/variable-item.tsx | 35 +- .../components/variable-modal-trigger.tsx | 21 +- .../components/variable-modal.tsx | 123 +- .../components/variable-type-select.tsx | 37 +- .../panel/chat-variable-panel/index.tsx | 96 +- .../panel/debug-and-preview/chat-wrapper.tsx | 52 +- .../conversation-variable-modal.tsx | 96 +- .../panel/debug-and-preview/empty.tsx | 10 +- .../workflow/panel/debug-and-preview/hooks.ts | 40 +- .../panel/debug-and-preview/index.spec.tsx | 15 +- .../panel/debug-and-preview/index.tsx | 57 +- .../panel/debug-and-preview/user-input.tsx | 10 +- .../workflow/panel/env-panel/env-item.tsx | 45 +- .../workflow/panel/env-panel/index.tsx | 38 +- .../panel/env-panel/variable-modal.tsx | 151 +- .../panel/env-panel/variable-trigger.tsx | 21 +- .../panel/global-variable-panel/index.tsx | 69 +- .../panel/global-variable-panel/item.tsx | 27 +- web/app/components/workflow/panel/index.tsx | 11 +- .../workflow/panel/inputs-panel.tsx | 40 +- web/app/components/workflow/panel/record.tsx | 10 +- .../context-menu/index.tsx | 30 +- .../context-menu/menu-item.tsx | 8 +- .../context-menu/use-context-menu.ts | 32 +- .../delete-confirm-modal.tsx | 41 +- .../panel/version-history-panel/empty.tsx | 31 +- .../filter/filter-item.tsx | 11 +- .../filter/filter-switch.tsx | 15 +- .../version-history-panel/filter/index.tsx | 26 +- .../filter/use-filter.ts | 4 +- .../version-history-panel/index.spec.tsx | 156 + .../panel/version-history-panel/index.tsx | 166 +- .../version-history-panel/loading/index.tsx | 10 +- .../version-history-panel/loading/item.tsx | 19 +- .../restore-confirm-modal.tsx | 41 +- .../version-history-item.tsx | 43 +- .../workflow/panel/workflow-preview.tsx | 75 +- .../workflow/plugin-dependency/hooks.ts | 4 +- .../workflow/plugin-dependency/index.tsx | 2 +- .../workflow/plugin-dependency/store.ts | 2 +- .../workflow/run/agent-log/agent-log-item.tsx | 51 +- .../run/agent-log/agent-log-nav-more.tsx | 18 +- .../workflow/run/agent-log/agent-log-nav.tsx | 48 +- .../run/agent-log/agent-log-trigger.tsx | 29 +- .../run/agent-log/agent-result-panel.tsx | 41 +- web/app/components/workflow/run/hooks.ts | 12 +- web/app/components/workflow/run/index.tsx | 41 +- .../iteration-log/iteration-log-trigger.tsx | 37 +- .../iteration-log/iteration-result-panel.tsx | 56 +- .../run/loop-log/loop-log-trigger.tsx | 30 +- .../run/loop-log/loop-result-panel.tsx | 60 +- .../workflow/run/loop-result-panel.tsx | 62 +- web/app/components/workflow/run/meta.tsx | 56 +- web/app/components/workflow/run/node.tsx | 99 +- .../components/workflow/run/output-panel.tsx | 28 +- .../components/workflow/run/result-panel.tsx | 47 +- .../components/workflow/run/result-text.tsx | 32 +- .../run/retry-log/retry-log-trigger.tsx | 16 +- .../run/retry-log/retry-result-panel.tsx | 18 +- .../workflow/run/special-result-panel.tsx | 11 +- .../workflow/run/status-container.tsx | 8 +- web/app/components/workflow/run/status.tsx | 63 +- .../components/workflow/run/tracing-panel.tsx | 27 +- .../run/utils/format-log/agent/index.spec.ts | 4 +- .../run/utils/format-log/agent/index.ts | 6 +- .../utils/format-log/graph-to-log-struct.ts | 50 +- .../workflow/run/utils/format-log/index.ts | 6 +- .../utils/format-log/iteration/index.spec.ts | 4 +- .../run/utils/format-log/iteration/index.ts | 5 +- .../run/utils/format-log/loop/index.spec.ts | 6 +- .../run/utils/format-log/loop/index.ts | 5 +- .../run/utils/format-log/parallel/index.ts | 16 +- .../run/utils/format-log/retry/index.spec.ts | 4 +- .../workflow/selection-contextmenu.tsx | 101 +- .../components/workflow/shortcuts-name.tsx | 5 +- .../components/workflow/simple-node/index.tsx | 49 +- .../store/__tests__/trigger-status.test.ts | 2 +- web/app/components/workflow/store/index.ts | 2 +- .../workflow/debug/inspect-vars-slice.ts | 16 +- .../workflow/store/workflow/index.ts | 86 +- .../workflow/store/workflow/node-slice.ts | 8 +- .../store/workflow/workflow-draft-slice.ts | 51 +- .../workflow/store/workflow/workflow-slice.ts | 8 +- .../workflow/syncing-data-modal.tsx | 2 +- web/app/components/workflow/types.ts | 25 +- .../components/workflow/update-dsl-modal.tsx | 176 +- .../components/workflow/utils/data-source.ts | 2 +- .../components/workflow/utils/elk-layout.ts | 14 +- .../workflow/utils/gen-node-meta-data.ts | 2 +- web/app/components/workflow/utils/index.ts | 16 +- .../workflow/utils/node-navigation.ts | 4 +- web/app/components/workflow/utils/node.ts | 16 +- web/app/components/workflow/utils/tool.ts | 6 +- web/app/components/workflow/utils/variable.ts | 6 +- .../workflow/utils/workflow-entry.ts | 6 +- .../workflow/utils/workflow-init.spec.ts | 4 +- .../workflow/utils/workflow-init.ts | 40 +- web/app/components/workflow/utils/workflow.ts | 16 +- .../variable-inspect/display-content.tsx | 100 +- .../workflow/variable-inspect/empty.tsx | 23 +- .../workflow/variable-inspect/group.tsx | 59 +- .../workflow/variable-inspect/index.tsx | 11 +- .../variable-inspect/large-data-alert.tsx | 16 +- .../workflow/variable-inspect/left.tsx | 28 +- .../workflow/variable-inspect/listening.tsx | 83 +- .../workflow/variable-inspect/panel.tsx | 62 +- .../workflow/variable-inspect/right.tsx | 158 +- .../workflow/variable-inspect/trigger.tsx | 51 +- .../variable-inspect/value-content.tsx | 115 +- .../workflow/workflow-history-store.tsx | 15 +- .../components/custom-edge.tsx | 11 +- .../components/error-handle-on-node.tsx | 24 +- .../components/node-handle.tsx | 14 +- .../components/nodes/base.tsx | 52 +- .../components/nodes/constants.ts | 2 +- .../components/nodes/if-else/node.tsx | 75 +- .../components/nodes/index.tsx | 2 +- .../nodes/iteration-start/index.tsx | 16 +- .../components/nodes/iteration/node.tsx | 13 +- .../components/nodes/loop-start/index.tsx | 16 +- .../components/nodes/loop/hooks.ts | 6 +- .../components/nodes/loop/node.tsx | 10 +- .../nodes/question-classifier/node.tsx | 20 +- .../components/note-node/index.tsx | 21 +- .../components/zoom-in-out.tsx | 46 +- .../workflow/workflow-preview/index.tsx | 66 +- web/app/dev-preview/page.tsx | 68 - web/app/education-apply/constants.ts | 1 - .../education-apply/education-apply-page.tsx | 122 +- .../education-apply/expire-notice-modal.tsx | 86 +- web/app/education-apply/hooks.ts | 35 +- web/app/education-apply/role-selector.tsx | 12 +- web/app/education-apply/search-input.tsx | 16 +- web/app/education-apply/user-info.tsx | 30 +- .../education-apply/verify-state-modal.tsx | 39 +- .../forgot-password/ChangePasswordForm.tsx | 91 +- .../forgot-password/ForgotPasswordForm.tsx | 93 +- web/app/forgot-password/page.tsx | 22 +- web/app/init/InitPasswordPopup.tsx | 60 +- web/app/init/page.tsx | 4 +- web/app/install/installForm.tsx | 220 +- web/app/install/page.tsx | 18 +- web/app/layout.tsx | 56 +- web/app/page.tsx | 4 +- .../repos/[owner]/[repo]/releases/route.ts | 7 +- web/app/reset-password/check-code/page.tsx | 76 +- web/app/reset-password/layout.tsx | 51 +- web/app/reset-password/page.tsx | 78 +- web/app/reset-password/set-password/page.tsx | 83 +- web/app/routePrefixHandle.tsx | 4 +- web/app/signin/_header.tsx | 36 +- web/app/signin/check-code/page.tsx | 95 +- .../signin/components/mail-and-code-auth.tsx | 38 +- .../components/mail-and-password-auth.tsx | 154 +- web/app/signin/components/social-auth.tsx | 80 +- web/app/signin/components/sso-auth.tsx | 8 +- web/app/signin/invite-settings/page.tsx | 211 +- web/app/signin/layout.tsx | 38 +- web/app/signin/normal-form.tsx | 289 +- web/app/signin/one-more-step.tsx | 106 +- web/app/signin/page.tsx | 4 +- web/app/signin/split.tsx | 7 +- web/app/signin/utils/post-login-redirect.ts | 7 +- web/app/signup/check-code/page.tsx | 76 +- web/app/signup/components/input-mail.tsx | 134 +- web/app/signup/layout.tsx | 34 +- web/app/signup/page.tsx | 8 +- web/app/signup/set-password/page.tsx | 63 +- web/app/styles/globals.css | 1 + web/app/styles/monaco-sticky-fix.css | 16 + web/bin/uglify-embed.js | 6 +- web/config/index.spec.ts | 17 +- web/config/index.ts | 42 +- web/context/access-control-store.ts | 4 +- web/context/app-context.tsx | 93 +- web/context/dataset-detail.ts | 6 +- web/context/datasets-context.tsx | 4 +- web/context/debug-configuration.ts | 17 +- web/context/event-emitter.tsx | 4 +- web/context/explore-context.ts | 4 +- web/context/external-api-panel-context.tsx | 3 +- .../external-knowledge-api-context.tsx | 2 +- web/context/global-public-context.tsx | 12 +- .../hooks/use-trigger-events-limit-modal.ts | 5 +- web/context/i18n.ts | 4 +- web/context/mitt-context.tsx | 2 +- web/context/modal-context.test.tsx | 50 +- web/context/modal-context.tsx | 169 +- web/context/provider-context-mock.spec.tsx | 8 +- web/context/provider-context-mock.tsx | 2 +- web/context/provider-context.tsx | 56 +- web/context/query-client.tsx | 12 +- web/context/web-app-context.tsx | 20 +- web/context/workspace-context.tsx | 10 +- web/eslint-rules/index.js | 18 + web/eslint-rules/namespaces.js | 87 + web/eslint-rules/rules/no-as-any-in-t.js | 110 + .../rules/no-legacy-namespace-prefix.js | 432 ++ web/eslint-rules/rules/require-ns-option.js | 56 + web/eslint.config.mjs | 242 +- web/hooks/use-app-favicon.ts | 12 +- web/hooks/use-async-window-open.spec.ts | 42 +- web/hooks/use-breakpoints.spec.ts | 4 +- web/hooks/use-breakpoints.ts | 2 +- web/hooks/use-document-title.spec.ts | 8 +- web/hooks/use-document-title.ts | 4 +- web/hooks/use-format-time-from-now.spec.ts | 76 +- web/hooks/use-format-time-from-now.ts | 26 +- web/hooks/use-import-dsl.ts | 36 +- web/hooks/use-knowledge.ts | 16 +- web/hooks/use-metadata.ts | 368 +- web/hooks/use-mitt.ts | 16 +- web/hooks/use-moderate.ts | 2 +- web/hooks/use-pay.tsx | 26 +- web/hooks/use-query-params.spec.tsx | 647 +++ web/hooks/use-query-params.ts | 222 + web/hooks/use-tab-searchparams.spec.ts | 544 --- web/hooks/use-tab-searchparams.ts | 47 - web/hooks/use-theme.ts | 2 +- web/hooks/use-timestamp.spec.ts | 4 +- web/hooks/use-timestamp.ts | 4 +- web/i18n-config/DEV.md | 13 +- web/i18n-config/README.md | 97 +- web/i18n-config/check-i18n-sync.js | 130 - web/i18n-config/generate-i18n-types.js | 145 - web/i18n-config/i18next-config.ts | 170 +- web/i18n-config/index.ts | 16 +- web/i18n-config/language.ts | 67 +- web/i18n-config/languages.json | 158 - web/i18n-config/languages.ts | 160 + web/i18n-config/server.ts | 22 +- web/i18n/ar-TN/app-annotation.json | 70 + web/i18n/ar-TN/app-annotation.ts | 98 - web/i18n/ar-TN/app-api.json | 72 + web/i18n/ar-TN/app-api.ts | 85 - web/i18n/ar-TN/app-debug.json | 391 ++ web/i18n/ar-TN/app-debug.ts | 571 --- web/i18n/ar-TN/app-log.json | 84 + web/i18n/ar-TN/app-log.ts | 112 - web/i18n/ar-TN/app-overview.json | 121 + web/i18n/ar-TN/app-overview.ts | 189 - web/i18n/ar-TN/app.json | 283 ++ web/i18n/ar-TN/app.ts | 351 -- web/i18n/ar-TN/billing.json | 185 + web/i18n/ar-TN/billing.ts | 221 - web/i18n/ar-TN/common.json | 622 +++ web/i18n/ar-TN/common.ts | 791 --- web/i18n/ar-TN/custom.json | 22 + web/i18n/ar-TN/custom.ts | 32 - web/i18n/ar-TN/dataset-creation.json | 185 + web/i18n/ar-TN/dataset-creation.ts | 217 - web/i18n/ar-TN/dataset-documents.json | 332 ++ web/i18n/ar-TN/dataset-documents.ts | 408 -- web/i18n/ar-TN/dataset-hit-testing.json | 28 + web/i18n/ar-TN/dataset-hit-testing.ts | 40 - web/i18n/ar-TN/dataset-pipeline.json | 95 + web/i18n/ar-TN/dataset-pipeline.ts | 165 - web/i18n/ar-TN/dataset-settings.json | 44 + web/i18n/ar-TN/dataset-settings.ts | 52 - web/i18n/ar-TN/dataset.json | 185 + web/i18n/ar-TN/dataset.ts | 251 - web/i18n/ar-TN/education.json | 44 + web/i18n/ar-TN/education.ts | 76 - web/i18n/ar-TN/explore.json | 29 + web/i18n/ar-TN/explore.ts | 44 - web/i18n/ar-TN/layout.json | 4 + web/i18n/ar-TN/layout.ts | 8 - web/i18n/ar-TN/login.json | 115 + web/i18n/ar-TN/login.ts | 126 - web/i18n/ar-TN/oauth.json | 19 + web/i18n/ar-TN/oauth.ts | 27 - web/i18n/ar-TN/pipeline.json | 24 + web/i18n/ar-TN/pipeline.ts | 40 - web/i18n/ar-TN/plugin-tags.json | 22 + web/i18n/ar-TN/plugin-tags.ts | 26 - web/i18n/ar-TN/plugin-trigger.json | 118 + web/i18n/ar-TN/plugin-trigger.ts | 186 - web/i18n/ar-TN/plugin.json | 251 + web/i18n/ar-TN/plugin.ts | 325 -- web/i18n/ar-TN/register.json | 1 + web/i18n/ar-TN/register.ts | 4 - web/i18n/ar-TN/run-log.json | 23 + web/i18n/ar-TN/run-log.ts | 31 - web/i18n/ar-TN/share.json | 62 + web/i18n/ar-TN/share.ts | 86 - web/i18n/ar-TN/time.json | 32 + web/i18n/ar-TN/time.ts | 45 - web/i18n/ar-TN/tools.json | 211 + web/i18n/ar-TN/tools.ts | 264 - web/i18n/ar-TN/workflow.json | 1051 ++++ web/i18n/ar-TN/workflow.ts | 1296 ----- web/i18n/de-DE/app-annotation.json | 70 + web/i18n/de-DE/app-annotation.ts | 98 - web/i18n/de-DE/app-api.json | 72 + web/i18n/de-DE/app-api.ts | 85 - web/i18n/de-DE/app-debug.json | 391 ++ web/i18n/de-DE/app-debug.ts | 575 --- web/i18n/de-DE/app-log.json | 84 + web/i18n/de-DE/app-log.ts | 112 - web/i18n/de-DE/app-overview.json | 121 + web/i18n/de-DE/app-overview.ts | 189 - web/i18n/de-DE/app.json | 283 ++ web/i18n/de-DE/app.ts | 353 -- web/i18n/de-DE/billing.json | 185 + web/i18n/de-DE/billing.ts | 221 - web/i18n/de-DE/common.json | 622 +++ web/i18n/de-DE/common.ts | 791 --- web/i18n/de-DE/custom.json | 22 + web/i18n/de-DE/custom.ts | 32 - web/i18n/de-DE/dataset-creation.json | 185 + web/i18n/de-DE/dataset-creation.ts | 217 - web/i18n/de-DE/dataset-documents.json | 332 ++ web/i18n/de-DE/dataset-documents.ts | 408 -- web/i18n/de-DE/dataset-hit-testing.json | 28 + web/i18n/de-DE/dataset-hit-testing.ts | 40 - web/i18n/de-DE/dataset-pipeline.json | 95 + web/i18n/de-DE/dataset-pipeline.ts | 163 - web/i18n/de-DE/dataset-settings.json | 44 + web/i18n/de-DE/dataset-settings.ts | 52 - web/i18n/de-DE/dataset.json | 185 + web/i18n/de-DE/dataset.ts | 253 - web/i18n/de-DE/education.json | 44 + web/i18n/de-DE/education.ts | 76 - web/i18n/de-DE/explore.json | 29 + web/i18n/de-DE/explore.ts | 44 - web/i18n/de-DE/layout.json | 4 + web/i18n/de-DE/layout.ts | 8 - web/i18n/de-DE/login.json | 115 + web/i18n/de-DE/login.ts | 126 - web/i18n/de-DE/oauth.json | 19 + web/i18n/de-DE/oauth.ts | 27 - web/i18n/de-DE/pipeline.json | 24 + web/i18n/de-DE/pipeline.ts | 40 - web/i18n/de-DE/plugin-tags.json | 22 + web/i18n/de-DE/plugin-tags.ts | 26 - web/i18n/de-DE/plugin-trigger.json | 118 + web/i18n/de-DE/plugin-trigger.ts | 186 - web/i18n/de-DE/plugin.json | 251 + web/i18n/de-DE/plugin.ts | 358 -- web/i18n/de-DE/register.json | 1 + web/i18n/de-DE/register.ts | 4 - web/i18n/de-DE/run-log.json | 23 + web/i18n/de-DE/run-log.ts | 31 - web/i18n/de-DE/share.json | 62 + web/i18n/de-DE/share.ts | 86 - web/i18n/de-DE/time.json | 32 + web/i18n/de-DE/time.ts | 44 - web/i18n/de-DE/tools.json | 211 + web/i18n/de-DE/tools.ts | 264 - web/i18n/de-DE/workflow.json | 1051 ++++ web/i18n/de-DE/workflow.ts | 1296 ----- web/i18n/en-US/app-annotation.json | 70 + web/i18n/en-US/app-annotation.ts | 98 - web/i18n/en-US/app-api.json | 72 + web/i18n/en-US/app-api.ts | 85 - web/i18n/en-US/app-debug.json | 391 ++ web/i18n/en-US/app-debug.ts | 575 --- web/i18n/en-US/app-log.json | 84 + web/i18n/en-US/app-log.ts | 112 - web/i18n/en-US/app-overview.json | 121 + web/i18n/en-US/app-overview.ts | 189 - web/i18n/en-US/app.json | 283 ++ web/i18n/en-US/app.ts | 351 -- web/i18n/en-US/billing.json | 185 + web/i18n/en-US/billing.ts | 240 - web/i18n/en-US/common.json | 622 +++ web/i18n/en-US/common.ts | 791 --- web/i18n/en-US/custom.json | 22 + web/i18n/en-US/custom.ts | 32 - web/i18n/en-US/dataset-creation.json | 185 + web/i18n/en-US/dataset-creation.ts | 217 - web/i18n/en-US/dataset-documents.json | 332 ++ web/i18n/en-US/dataset-documents.ts | 408 -- web/i18n/en-US/dataset-hit-testing.json | 28 + web/i18n/en-US/dataset-hit-testing.ts | 40 - web/i18n/en-US/dataset-pipeline.json | 95 + web/i18n/en-US/dataset-pipeline.ts | 163 - web/i18n/en-US/dataset-settings.json | 44 + web/i18n/en-US/dataset-settings.ts | 52 - web/i18n/en-US/dataset.json | 185 + web/i18n/en-US/dataset.ts | 251 - web/i18n/en-US/education.json | 44 + web/i18n/en-US/education.ts | 76 - web/i18n/en-US/explore.json | 29 + web/i18n/en-US/explore.ts | 44 - web/i18n/en-US/layout.json | 4 + web/i18n/en-US/layout.ts | 8 - web/i18n/en-US/login.json | 115 + web/i18n/en-US/login.ts | 126 - web/i18n/en-US/oauth.json | 19 + web/i18n/en-US/oauth.ts | 27 - web/i18n/en-US/pipeline.json | 24 + web/i18n/en-US/pipeline.ts | 40 - web/i18n/en-US/plugin-tags.json | 22 + web/i18n/en-US/plugin-tags.ts | 26 - web/i18n/en-US/plugin-trigger.json | 118 + web/i18n/en-US/plugin-trigger.ts | 186 - web/i18n/en-US/plugin.json | 251 + web/i18n/en-US/plugin.ts | 325 -- web/i18n/en-US/register.json | 1 + web/i18n/en-US/register.ts | 4 - web/i18n/en-US/run-log.json | 23 + web/i18n/en-US/run-log.ts | 31 - web/i18n/en-US/share.json | 62 + web/i18n/en-US/share.ts | 86 - web/i18n/en-US/time.json | 32 + web/i18n/en-US/time.ts | 45 - web/i18n/en-US/tools.json | 211 + web/i18n/en-US/tools.ts | 264 - web/i18n/en-US/workflow.json | 1051 ++++ web/i18n/en-US/workflow.ts | 1300 ----- web/i18n/es-ES/app-annotation.json | 70 + web/i18n/es-ES/app-annotation.ts | 98 - web/i18n/es-ES/app-api.json | 72 + web/i18n/es-ES/app-api.ts | 85 - web/i18n/es-ES/app-debug.json | 391 ++ web/i18n/es-ES/app-debug.ts | 568 --- web/i18n/es-ES/app-log.json | 84 + web/i18n/es-ES/app-log.ts | 112 - web/i18n/es-ES/app-overview.json | 121 + web/i18n/es-ES/app-overview.ts | 189 - web/i18n/es-ES/app.json | 283 ++ web/i18n/es-ES/app.ts | 351 -- web/i18n/es-ES/billing.json | 185 + web/i18n/es-ES/billing.ts | 221 - web/i18n/es-ES/common.json | 622 +++ web/i18n/es-ES/common.ts | 791 --- web/i18n/es-ES/custom.json | 22 + web/i18n/es-ES/custom.ts | 32 - web/i18n/es-ES/dataset-creation.json | 185 + web/i18n/es-ES/dataset-creation.ts | 217 - web/i18n/es-ES/dataset-documents.json | 332 ++ web/i18n/es-ES/dataset-documents.ts | 408 -- web/i18n/es-ES/dataset-hit-testing.json | 28 + web/i18n/es-ES/dataset-hit-testing.ts | 40 - web/i18n/es-ES/dataset-pipeline.json | 95 + web/i18n/es-ES/dataset-pipeline.ts | 163 - web/i18n/es-ES/dataset-settings.json | 44 + web/i18n/es-ES/dataset-settings.ts | 52 - web/i18n/es-ES/dataset.json | 185 + web/i18n/es-ES/dataset.ts | 253 - web/i18n/es-ES/education.json | 44 + web/i18n/es-ES/education.ts | 76 - web/i18n/es-ES/explore.json | 29 + web/i18n/es-ES/explore.ts | 44 - web/i18n/es-ES/layout.json | 4 + web/i18n/es-ES/layout.ts | 8 - web/i18n/es-ES/login.json | 115 + web/i18n/es-ES/login.ts | 126 - web/i18n/es-ES/oauth.json | 19 + web/i18n/es-ES/oauth.ts | 27 - web/i18n/es-ES/pipeline.json | 24 + web/i18n/es-ES/pipeline.ts | 40 - web/i18n/es-ES/plugin-tags.json | 22 + web/i18n/es-ES/plugin-tags.ts | 26 - web/i18n/es-ES/plugin-trigger.json | 118 + web/i18n/es-ES/plugin-trigger.ts | 186 - web/i18n/es-ES/plugin.json | 251 + web/i18n/es-ES/plugin.ts | 359 -- web/i18n/es-ES/register.json | 1 + web/i18n/es-ES/register.ts | 4 - web/i18n/es-ES/run-log.json | 23 + web/i18n/es-ES/run-log.ts | 31 - web/i18n/es-ES/share.json | 62 + web/i18n/es-ES/share.ts | 86 - web/i18n/es-ES/time.json | 32 + web/i18n/es-ES/time.ts | 44 - web/i18n/es-ES/tools.json | 211 + web/i18n/es-ES/tools.ts | 264 - web/i18n/es-ES/workflow.json | 1051 ++++ web/i18n/es-ES/workflow.ts | 1296 ----- web/i18n/fa-IR/app-annotation.json | 70 + web/i18n/fa-IR/app-annotation.ts | 98 - web/i18n/fa-IR/app-api.json | 72 + web/i18n/fa-IR/app-api.ts | 85 - web/i18n/fa-IR/app-debug.json | 391 ++ web/i18n/fa-IR/app-debug.ts | 564 --- web/i18n/fa-IR/app-log.json | 84 + web/i18n/fa-IR/app-log.ts | 112 - web/i18n/fa-IR/app-overview.json | 121 + web/i18n/fa-IR/app-overview.ts | 189 - web/i18n/fa-IR/app.json | 283 ++ web/i18n/fa-IR/app.ts | 351 -- web/i18n/fa-IR/billing.json | 185 + web/i18n/fa-IR/billing.ts | 221 - web/i18n/fa-IR/common.json | 622 +++ web/i18n/fa-IR/common.ts | 791 --- web/i18n/fa-IR/custom.json | 22 + web/i18n/fa-IR/custom.ts | 32 - web/i18n/fa-IR/dataset-creation.json | 185 + web/i18n/fa-IR/dataset-creation.ts | 217 - web/i18n/fa-IR/dataset-documents.json | 332 ++ web/i18n/fa-IR/dataset-documents.ts | 407 -- web/i18n/fa-IR/dataset-hit-testing.json | 28 + web/i18n/fa-IR/dataset-hit-testing.ts | 40 - web/i18n/fa-IR/dataset-pipeline.json | 95 + web/i18n/fa-IR/dataset-pipeline.ts | 163 - web/i18n/fa-IR/dataset-settings.json | 44 + web/i18n/fa-IR/dataset-settings.ts | 52 - web/i18n/fa-IR/dataset.json | 185 + web/i18n/fa-IR/dataset.ts | 253 - web/i18n/fa-IR/education.json | 44 + web/i18n/fa-IR/education.ts | 76 - web/i18n/fa-IR/explore.json | 29 + web/i18n/fa-IR/explore.ts | 44 - web/i18n/fa-IR/layout.json | 4 + web/i18n/fa-IR/layout.ts | 8 - web/i18n/fa-IR/login.json | 115 + web/i18n/fa-IR/login.ts | 126 - web/i18n/fa-IR/oauth.json | 19 + web/i18n/fa-IR/oauth.ts | 27 - web/i18n/fa-IR/pipeline.json | 24 + web/i18n/fa-IR/pipeline.ts | 40 - web/i18n/fa-IR/plugin-tags.json | 22 + web/i18n/fa-IR/plugin-tags.ts | 26 - web/i18n/fa-IR/plugin-trigger.json | 118 + web/i18n/fa-IR/plugin-trigger.ts | 186 - web/i18n/fa-IR/plugin.json | 251 + web/i18n/fa-IR/plugin.ts | 351 -- web/i18n/fa-IR/register.json | 1 + web/i18n/fa-IR/register.ts | 4 - web/i18n/fa-IR/run-log.json | 23 + web/i18n/fa-IR/run-log.ts | 31 - web/i18n/fa-IR/share.json | 62 + web/i18n/fa-IR/share.ts | 82 - web/i18n/fa-IR/time.json | 32 + web/i18n/fa-IR/time.ts | 44 - web/i18n/fa-IR/tools.json | 211 + web/i18n/fa-IR/tools.ts | 264 - web/i18n/fa-IR/workflow.json | 1051 ++++ web/i18n/fa-IR/workflow.ts | 1296 ----- web/i18n/fr-FR/app-annotation.json | 70 + web/i18n/fr-FR/app-annotation.ts | 98 - web/i18n/fr-FR/app-api.json | 72 + web/i18n/fr-FR/app-api.ts | 85 - web/i18n/fr-FR/app-debug.json | 391 ++ web/i18n/fr-FR/app-debug.ts | 575 --- web/i18n/fr-FR/app-log.json | 84 + web/i18n/fr-FR/app-log.ts | 112 - web/i18n/fr-FR/app-overview.json | 121 + web/i18n/fr-FR/app-overview.ts | 189 - web/i18n/fr-FR/app.json | 283 ++ web/i18n/fr-FR/app.ts | 351 -- web/i18n/fr-FR/billing.json | 185 + web/i18n/fr-FR/billing.ts | 221 - web/i18n/fr-FR/common.json | 622 +++ web/i18n/fr-FR/common.ts | 791 --- web/i18n/fr-FR/custom.json | 22 + web/i18n/fr-FR/custom.ts | 32 - web/i18n/fr-FR/dataset-creation.json | 185 + web/i18n/fr-FR/dataset-creation.ts | 217 - web/i18n/fr-FR/dataset-documents.json | 332 ++ web/i18n/fr-FR/dataset-documents.ts | 408 -- web/i18n/fr-FR/dataset-hit-testing.json | 28 + web/i18n/fr-FR/dataset-hit-testing.ts | 40 - web/i18n/fr-FR/dataset-pipeline.json | 95 + web/i18n/fr-FR/dataset-pipeline.ts | 163 - web/i18n/fr-FR/dataset-settings.json | 44 + web/i18n/fr-FR/dataset-settings.ts | 52 - web/i18n/fr-FR/dataset.json | 185 + web/i18n/fr-FR/dataset.ts | 253 - web/i18n/fr-FR/education.json | 44 + web/i18n/fr-FR/education.ts | 76 - web/i18n/fr-FR/explore.json | 29 + web/i18n/fr-FR/explore.ts | 44 - web/i18n/fr-FR/layout.json | 4 + web/i18n/fr-FR/layout.ts | 8 - web/i18n/fr-FR/login.json | 115 + web/i18n/fr-FR/login.ts | 126 - web/i18n/fr-FR/oauth.json | 19 + web/i18n/fr-FR/oauth.ts | 27 - web/i18n/fr-FR/pipeline.json | 24 + web/i18n/fr-FR/pipeline.ts | 40 - web/i18n/fr-FR/plugin-tags.json | 22 + web/i18n/fr-FR/plugin-tags.ts | 26 - web/i18n/fr-FR/plugin-trigger.json | 118 + web/i18n/fr-FR/plugin-trigger.ts | 186 - web/i18n/fr-FR/plugin.json | 251 + web/i18n/fr-FR/plugin.ts | 358 -- web/i18n/fr-FR/register.json | 1 + web/i18n/fr-FR/register.ts | 4 - web/i18n/fr-FR/run-log.json | 23 + web/i18n/fr-FR/run-log.ts | 31 - web/i18n/fr-FR/share.json | 62 + web/i18n/fr-FR/share.ts | 86 - web/i18n/fr-FR/time.json | 32 + web/i18n/fr-FR/time.ts | 44 - web/i18n/fr-FR/tools.json | 211 + web/i18n/fr-FR/tools.ts | 264 - web/i18n/fr-FR/workflow.json | 1051 ++++ web/i18n/fr-FR/workflow.ts | 1296 ----- web/i18n/hi-IN/app-annotation.json | 70 + web/i18n/hi-IN/app-annotation.ts | 98 - web/i18n/hi-IN/app-api.json | 72 + web/i18n/hi-IN/app-api.ts | 85 - web/i18n/hi-IN/app-debug.json | 391 ++ web/i18n/hi-IN/app-debug.ts | 624 --- web/i18n/hi-IN/app-log.json | 84 + web/i18n/hi-IN/app-log.ts | 114 - web/i18n/hi-IN/app-overview.json | 121 + web/i18n/hi-IN/app-overview.ts | 207 - web/i18n/hi-IN/app.json | 283 ++ web/i18n/hi-IN/app.ts | 351 -- web/i18n/hi-IN/billing.json | 185 + web/i18n/hi-IN/billing.ts | 232 - web/i18n/hi-IN/common.json | 622 +++ web/i18n/hi-IN/common.ts | 813 ---- web/i18n/hi-IN/custom.json | 22 + web/i18n/hi-IN/custom.ts | 32 - web/i18n/hi-IN/dataset-creation.json | 185 + web/i18n/hi-IN/dataset-creation.ts | 236 - web/i18n/hi-IN/dataset-documents.json | 332 ++ web/i18n/hi-IN/dataset-documents.ts | 409 -- web/i18n/hi-IN/dataset-hit-testing.json | 28 + web/i18n/hi-IN/dataset-hit-testing.ts | 40 - web/i18n/hi-IN/dataset-pipeline.json | 95 + web/i18n/hi-IN/dataset-pipeline.ts | 163 - web/i18n/hi-IN/dataset-settings.json | 44 + web/i18n/hi-IN/dataset-settings.ts | 57 - web/i18n/hi-IN/dataset.json | 185 + web/i18n/hi-IN/dataset.ts | 258 - web/i18n/hi-IN/education.json | 44 + web/i18n/hi-IN/education.ts | 76 - web/i18n/hi-IN/explore.json | 29 + web/i18n/hi-IN/explore.ts | 45 - web/i18n/hi-IN/layout.json | 4 + web/i18n/hi-IN/layout.ts | 8 - web/i18n/hi-IN/login.json | 115 + web/i18n/hi-IN/login.ts | 131 - web/i18n/hi-IN/oauth.json | 19 + web/i18n/hi-IN/oauth.ts | 27 - web/i18n/hi-IN/pipeline.json | 24 + web/i18n/hi-IN/pipeline.ts | 40 - web/i18n/hi-IN/plugin-tags.json | 22 + web/i18n/hi-IN/plugin-tags.ts | 26 - web/i18n/hi-IN/plugin-trigger.json | 118 + web/i18n/hi-IN/plugin-trigger.ts | 186 - web/i18n/hi-IN/plugin.json | 251 + web/i18n/hi-IN/plugin.ts | 355 -- web/i18n/hi-IN/register.json | 1 + web/i18n/hi-IN/register.ts | 3 - web/i18n/hi-IN/run-log.json | 23 + web/i18n/hi-IN/run-log.ts | 31 - web/i18n/hi-IN/share.json | 62 + web/i18n/hi-IN/share.ts | 86 - web/i18n/hi-IN/time.json | 32 + web/i18n/hi-IN/time.ts | 44 - web/i18n/hi-IN/tools.json | 211 + web/i18n/hi-IN/tools.ts | 269 - web/i18n/hi-IN/workflow.json | 1051 ++++ web/i18n/hi-IN/workflow.ts | 1316 ----- web/i18n/id-ID/app-annotation.json | 70 + web/i18n/id-ID/app-annotation.ts | 98 - web/i18n/id-ID/app-api.json | 72 + web/i18n/id-ID/app-api.ts | 85 - web/i18n/id-ID/app-debug.json | 391 ++ web/i18n/id-ID/app-debug.ts | 564 --- web/i18n/id-ID/app-log.json | 84 + web/i18n/id-ID/app-log.ts | 112 - web/i18n/id-ID/app-overview.json | 121 + web/i18n/id-ID/app-overview.ts | 189 - web/i18n/id-ID/app.json | 283 ++ web/i18n/id-ID/app.ts | 347 -- web/i18n/id-ID/billing.json | 185 + web/i18n/id-ID/billing.ts | 221 - web/i18n/id-ID/common.json | 622 +++ web/i18n/id-ID/common.ts | 786 --- web/i18n/id-ID/custom.json | 22 + web/i18n/id-ID/custom.ts | 32 - web/i18n/id-ID/dataset-creation.json | 185 + web/i18n/id-ID/dataset-creation.ts | 217 - web/i18n/id-ID/dataset-documents.json | 332 ++ web/i18n/id-ID/dataset-documents.ts | 407 -- web/i18n/id-ID/dataset-hit-testing.json | 28 + web/i18n/id-ID/dataset-hit-testing.ts | 40 - web/i18n/id-ID/dataset-pipeline.json | 95 + web/i18n/id-ID/dataset-pipeline.ts | 163 - web/i18n/id-ID/dataset-settings.json | 44 + web/i18n/id-ID/dataset-settings.ts | 52 - web/i18n/id-ID/dataset.json | 185 + web/i18n/id-ID/dataset.ts | 252 - web/i18n/id-ID/education.json | 44 + web/i18n/id-ID/education.ts | 76 - web/i18n/id-ID/explore.json | 29 + web/i18n/id-ID/explore.ts | 44 - web/i18n/id-ID/layout.json | 4 + web/i18n/id-ID/layout.ts | 8 - web/i18n/id-ID/login.json | 115 + web/i18n/id-ID/login.ts | 126 - web/i18n/id-ID/oauth.json | 19 + web/i18n/id-ID/oauth.ts | 27 - web/i18n/id-ID/pipeline.json | 24 + web/i18n/id-ID/pipeline.ts | 40 - web/i18n/id-ID/plugin-tags.json | 22 + web/i18n/id-ID/plugin-tags.ts | 26 - web/i18n/id-ID/plugin-trigger.json | 118 + web/i18n/id-ID/plugin-trigger.ts | 186 - web/i18n/id-ID/plugin.json | 251 + web/i18n/id-ID/plugin.ts | 325 -- web/i18n/id-ID/register.json | 1 + web/i18n/id-ID/register.ts | 4 - web/i18n/id-ID/run-log.json | 23 + web/i18n/id-ID/run-log.ts | 31 - web/i18n/id-ID/share.json | 62 + web/i18n/id-ID/share.ts | 82 - web/i18n/id-ID/time.json | 32 + web/i18n/id-ID/time.ts | 44 - web/i18n/id-ID/tools.json | 211 + web/i18n/id-ID/tools.ts | 264 - web/i18n/id-ID/workflow.json | 1051 ++++ web/i18n/id-ID/workflow.ts | 1296 ----- web/i18n/it-IT/app-annotation.json | 70 + web/i18n/it-IT/app-annotation.ts | 100 - web/i18n/it-IT/app-api.json | 72 + web/i18n/it-IT/app-api.ts | 106 - web/i18n/it-IT/app-debug.json | 391 ++ web/i18n/it-IT/app-debug.ts | 616 --- web/i18n/it-IT/app-log.json | 84 + web/i18n/it-IT/app-log.ts | 116 - web/i18n/it-IT/app-overview.json | 121 + web/i18n/it-IT/app-overview.ts | 209 - web/i18n/it-IT/app.json | 283 ++ web/i18n/it-IT/app.ts | 357 -- web/i18n/it-IT/billing.json | 185 + web/i18n/it-IT/billing.ts | 232 - web/i18n/it-IT/common.json | 622 +++ web/i18n/it-IT/common.ts | 821 ---- web/i18n/it-IT/custom.json | 22 + web/i18n/it-IT/custom.ts | 33 - web/i18n/it-IT/dataset-creation.json | 185 + web/i18n/it-IT/dataset-creation.ts | 239 - web/i18n/it-IT/dataset-documents.json | 332 ++ web/i18n/it-IT/dataset-documents.ts | 410 -- web/i18n/it-IT/dataset-hit-testing.json | 28 + web/i18n/it-IT/dataset-hit-testing.ts | 41 - web/i18n/it-IT/dataset-pipeline.json | 95 + web/i18n/it-IT/dataset-pipeline.ts | 163 - web/i18n/it-IT/dataset-settings.json | 44 + web/i18n/it-IT/dataset-settings.ts | 57 - web/i18n/it-IT/dataset.json | 185 + web/i18n/it-IT/dataset.ts | 258 - web/i18n/it-IT/education.json | 44 + web/i18n/it-IT/education.ts | 76 - web/i18n/it-IT/explore.json | 29 + web/i18n/it-IT/explore.ts | 45 - web/i18n/it-IT/layout.json | 4 + web/i18n/it-IT/layout.ts | 8 - web/i18n/it-IT/login.json | 115 + web/i18n/it-IT/login.ts | 136 - web/i18n/it-IT/oauth.json | 19 + web/i18n/it-IT/oauth.ts | 27 - web/i18n/it-IT/pipeline.json | 24 + web/i18n/it-IT/pipeline.ts | 40 - web/i18n/it-IT/plugin-tags.json | 22 + web/i18n/it-IT/plugin-tags.ts | 26 - web/i18n/it-IT/plugin-trigger.json | 118 + web/i18n/it-IT/plugin-trigger.ts | 186 - web/i18n/it-IT/plugin.json | 251 + web/i18n/it-IT/plugin.ts | 325 -- web/i18n/it-IT/register.json | 1 + web/i18n/it-IT/register.ts | 4 - web/i18n/it-IT/run-log.json | 23 + web/i18n/it-IT/run-log.ts | 31 - web/i18n/it-IT/share.json | 62 + web/i18n/it-IT/share.ts | 88 - web/i18n/it-IT/time.json | 32 + web/i18n/it-IT/time.ts | 44 - web/i18n/it-IT/tools.json | 211 + web/i18n/it-IT/tools.ts | 274 -- web/i18n/it-IT/workflow.json | 1051 ++++ web/i18n/it-IT/workflow.ts | 1322 ----- web/i18n/ja-JP/app-annotation.json | 70 + web/i18n/ja-JP/app-annotation.ts | 98 - web/i18n/ja-JP/app-api.json | 72 + web/i18n/ja-JP/app-api.ts | 85 - web/i18n/ja-JP/app-debug.json | 391 ++ web/i18n/ja-JP/app-debug.ts | 567 --- web/i18n/ja-JP/app-log.json | 84 + web/i18n/ja-JP/app-log.ts | 112 - web/i18n/ja-JP/app-overview.json | 121 + web/i18n/ja-JP/app-overview.ts | 189 - web/i18n/ja-JP/app.json | 283 ++ web/i18n/ja-JP/app.ts | 350 -- web/i18n/ja-JP/billing.json | 185 + web/i18n/ja-JP/billing.ts | 221 - web/i18n/ja-JP/common.json | 622 +++ web/i18n/ja-JP/common.ts | 791 --- web/i18n/ja-JP/custom.json | 22 + web/i18n/ja-JP/custom.ts | 32 - web/i18n/ja-JP/dataset-creation.json | 185 + web/i18n/ja-JP/dataset-creation.ts | 217 - web/i18n/ja-JP/dataset-documents.json | 332 ++ web/i18n/ja-JP/dataset-documents.ts | 408 -- web/i18n/ja-JP/dataset-hit-testing.json | 28 + web/i18n/ja-JP/dataset-hit-testing.ts | 40 - web/i18n/ja-JP/dataset-pipeline.json | 95 + web/i18n/ja-JP/dataset-pipeline.ts | 163 - web/i18n/ja-JP/dataset-settings.json | 44 + web/i18n/ja-JP/dataset-settings.ts | 52 - web/i18n/ja-JP/dataset.json | 185 + web/i18n/ja-JP/dataset.ts | 251 - web/i18n/ja-JP/education.json | 44 + web/i18n/ja-JP/education.ts | 76 - web/i18n/ja-JP/explore.json | 29 + web/i18n/ja-JP/explore.ts | 44 - web/i18n/ja-JP/layout.json | 4 + web/i18n/ja-JP/layout.ts | 8 - web/i18n/ja-JP/login.json | 115 + web/i18n/ja-JP/login.ts | 126 - web/i18n/ja-JP/oauth.json | 19 + web/i18n/ja-JP/oauth.ts | 27 - web/i18n/ja-JP/pipeline.json | 24 + web/i18n/ja-JP/pipeline.ts | 40 - web/i18n/ja-JP/plugin-tags.json | 22 + web/i18n/ja-JP/plugin-tags.ts | 26 - web/i18n/ja-JP/plugin-trigger.json | 118 + web/i18n/ja-JP/plugin-trigger.ts | 192 - web/i18n/ja-JP/plugin.json | 251 + web/i18n/ja-JP/plugin.ts | 325 -- web/i18n/ja-JP/register.json | 1 + web/i18n/ja-JP/register.ts | 4 - web/i18n/ja-JP/run-log.json | 23 + web/i18n/ja-JP/run-log.ts | 31 - web/i18n/ja-JP/share.json | 62 + web/i18n/ja-JP/share.ts | 82 - web/i18n/ja-JP/time.json | 32 + web/i18n/ja-JP/time.ts | 45 - web/i18n/ja-JP/tools.json | 211 + web/i18n/ja-JP/tools.ts | 264 - web/i18n/ja-JP/workflow.json | 1051 ++++ web/i18n/ja-JP/workflow.ts | 1296 ----- web/i18n/ko-KR/app-annotation.json | 70 + web/i18n/ko-KR/app-annotation.ts | 98 - web/i18n/ko-KR/app-api.json | 72 + web/i18n/ko-KR/app-api.ts | 87 - web/i18n/ko-KR/app-debug.json | 391 ++ web/i18n/ko-KR/app-debug.ts | 568 --- web/i18n/ko-KR/app-log.json | 84 + web/i18n/ko-KR/app-log.ts | 113 - web/i18n/ko-KR/app-overview.json | 121 + web/i18n/ko-KR/app-overview.ts | 189 - web/i18n/ko-KR/app.json | 283 ++ web/i18n/ko-KR/app.ts | 371 -- web/i18n/ko-KR/billing.json | 185 + web/i18n/ko-KR/billing.ts | 234 - web/i18n/ko-KR/common.json | 622 +++ web/i18n/ko-KR/common.ts | 787 --- web/i18n/ko-KR/custom.json | 22 + web/i18n/ko-KR/custom.ts | 32 - web/i18n/ko-KR/dataset-creation.json | 185 + web/i18n/ko-KR/dataset-creation.ts | 218 - web/i18n/ko-KR/dataset-documents.json | 332 ++ web/i18n/ko-KR/dataset-documents.ts | 407 -- web/i18n/ko-KR/dataset-hit-testing.json | 28 + web/i18n/ko-KR/dataset-hit-testing.ts | 40 - web/i18n/ko-KR/dataset-pipeline.json | 95 + web/i18n/ko-KR/dataset-pipeline.ts | 163 - web/i18n/ko-KR/dataset-settings.json | 44 + web/i18n/ko-KR/dataset-settings.ts | 52 - web/i18n/ko-KR/dataset.json | 185 + web/i18n/ko-KR/dataset.ts | 252 - web/i18n/ko-KR/education.json | 44 + web/i18n/ko-KR/education.ts | 80 - web/i18n/ko-KR/explore.json | 29 + web/i18n/ko-KR/explore.ts | 44 - web/i18n/ko-KR/layout.json | 4 + web/i18n/ko-KR/layout.ts | 8 - web/i18n/ko-KR/login.json | 115 + web/i18n/ko-KR/login.ts | 126 - web/i18n/ko-KR/oauth.json | 19 + web/i18n/ko-KR/oauth.ts | 27 - web/i18n/ko-KR/pipeline.json | 24 + web/i18n/ko-KR/pipeline.ts | 40 - web/i18n/ko-KR/plugin-tags.json | 22 + web/i18n/ko-KR/plugin-tags.ts | 26 - web/i18n/ko-KR/plugin-trigger.json | 118 + web/i18n/ko-KR/plugin-trigger.ts | 186 - web/i18n/ko-KR/plugin.json | 251 + web/i18n/ko-KR/plugin.ts | 325 -- web/i18n/ko-KR/register.json | 1 + web/i18n/ko-KR/register.ts | 3 - web/i18n/ko-KR/run-log.json | 23 + web/i18n/ko-KR/run-log.ts | 31 - web/i18n/ko-KR/share.json | 62 + web/i18n/ko-KR/share.ts | 82 - web/i18n/ko-KR/time.json | 32 + web/i18n/ko-KR/time.ts | 44 - web/i18n/ko-KR/tools.json | 211 + web/i18n/ko-KR/tools.ts | 264 - web/i18n/ko-KR/workflow.json | 1051 ++++ web/i18n/ko-KR/workflow.ts | 1347 ------ web/i18n/pl-PL/app-annotation.json | 70 + web/i18n/pl-PL/app-annotation.ts | 100 - web/i18n/pl-PL/app-api.json | 72 + web/i18n/pl-PL/app-api.ts | 104 - web/i18n/pl-PL/app-debug.json | 391 ++ web/i18n/pl-PL/app-debug.ts | 611 --- web/i18n/pl-PL/app-log.json | 84 + web/i18n/pl-PL/app-log.ts | 116 - web/i18n/pl-PL/app-overview.json | 121 + web/i18n/pl-PL/app-overview.ts | 207 - web/i18n/pl-PL/app.json | 283 ++ web/i18n/pl-PL/app.ts | 352 -- web/i18n/pl-PL/billing.json | 185 + web/i18n/pl-PL/billing.ts | 231 - web/i18n/pl-PL/common.json | 622 +++ web/i18n/pl-PL/common.ts | 809 ---- web/i18n/pl-PL/custom.json | 22 + web/i18n/pl-PL/custom.ts | 33 - web/i18n/pl-PL/dataset-creation.json | 185 + web/i18n/pl-PL/dataset-creation.ts | 233 - web/i18n/pl-PL/dataset-documents.json | 332 ++ web/i18n/pl-PL/dataset-documents.ts | 409 -- web/i18n/pl-PL/dataset-hit-testing.json | 28 + web/i18n/pl-PL/dataset-hit-testing.ts | 40 - web/i18n/pl-PL/dataset-pipeline.json | 95 + web/i18n/pl-PL/dataset-pipeline.ts | 163 - web/i18n/pl-PL/dataset-settings.json | 44 + web/i18n/pl-PL/dataset-settings.ts | 57 - web/i18n/pl-PL/dataset.json | 185 + web/i18n/pl-PL/dataset.ts | 257 - web/i18n/pl-PL/education.json | 44 + web/i18n/pl-PL/education.ts | 76 - web/i18n/pl-PL/explore.json | 29 + web/i18n/pl-PL/explore.ts | 45 - web/i18n/pl-PL/layout.json | 4 + web/i18n/pl-PL/layout.ts | 8 - web/i18n/pl-PL/login.json | 115 + web/i18n/pl-PL/login.ts | 131 - web/i18n/pl-PL/oauth.json | 19 + web/i18n/pl-PL/oauth.ts | 27 - web/i18n/pl-PL/pipeline.json | 24 + web/i18n/pl-PL/pipeline.ts | 40 - web/i18n/pl-PL/plugin-tags.json | 22 + web/i18n/pl-PL/plugin-tags.ts | 26 - web/i18n/pl-PL/plugin-trigger.json | 118 + web/i18n/pl-PL/plugin-trigger.ts | 186 - web/i18n/pl-PL/plugin.json | 251 + web/i18n/pl-PL/plugin.ts | 325 -- web/i18n/pl-PL/register.json | 1 + web/i18n/pl-PL/register.ts | 4 - web/i18n/pl-PL/run-log.json | 23 + web/i18n/pl-PL/run-log.ts | 31 - web/i18n/pl-PL/share.json | 62 + web/i18n/pl-PL/share.ts | 87 - web/i18n/pl-PL/time.json | 32 + web/i18n/pl-PL/time.ts | 44 - web/i18n/pl-PL/tools.json | 211 + web/i18n/pl-PL/tools.ts | 268 - web/i18n/pl-PL/workflow.json | 1051 ++++ web/i18n/pl-PL/workflow.ts | 1296 ----- web/i18n/pt-BR/app-annotation.json | 70 + web/i18n/pt-BR/app-annotation.ts | 98 - web/i18n/pt-BR/app-api.json | 72 + web/i18n/pt-BR/app-api.ts | 85 - web/i18n/pt-BR/app-debug.json | 391 ++ web/i18n/pt-BR/app-debug.ts | 577 --- web/i18n/pt-BR/app-log.json | 84 + web/i18n/pt-BR/app-log.ts | 114 - web/i18n/pt-BR/app-overview.json | 121 + web/i18n/pt-BR/app-overview.ts | 189 - web/i18n/pt-BR/app.json | 283 ++ web/i18n/pt-BR/app.ts | 351 -- web/i18n/pt-BR/billing.json | 185 + web/i18n/pt-BR/billing.ts | 221 - web/i18n/pt-BR/common.json | 622 +++ web/i18n/pt-BR/common.ts | 791 --- web/i18n/pt-BR/custom.json | 22 + web/i18n/pt-BR/custom.ts | 32 - web/i18n/pt-BR/dataset-creation.json | 185 + web/i18n/pt-BR/dataset-creation.ts | 217 - web/i18n/pt-BR/dataset-documents.json | 332 ++ web/i18n/pt-BR/dataset-documents.ts | 408 -- web/i18n/pt-BR/dataset-hit-testing.json | 28 + web/i18n/pt-BR/dataset-hit-testing.ts | 40 - web/i18n/pt-BR/dataset-pipeline.json | 95 + web/i18n/pt-BR/dataset-pipeline.ts | 163 - web/i18n/pt-BR/dataset-settings.json | 44 + web/i18n/pt-BR/dataset-settings.ts | 52 - web/i18n/pt-BR/dataset.json | 185 + web/i18n/pt-BR/dataset.ts | 253 - web/i18n/pt-BR/education.json | 44 + web/i18n/pt-BR/education.ts | 76 - web/i18n/pt-BR/explore.json | 29 + web/i18n/pt-BR/explore.ts | 44 - web/i18n/pt-BR/layout.json | 4 + web/i18n/pt-BR/layout.ts | 8 - web/i18n/pt-BR/login.json | 115 + web/i18n/pt-BR/login.ts | 126 - web/i18n/pt-BR/oauth.json | 19 + web/i18n/pt-BR/oauth.ts | 27 - web/i18n/pt-BR/pipeline.json | 24 + web/i18n/pt-BR/pipeline.ts | 40 - web/i18n/pt-BR/plugin-tags.json | 22 + web/i18n/pt-BR/plugin-tags.ts | 26 - web/i18n/pt-BR/plugin-trigger.json | 118 + web/i18n/pt-BR/plugin-trigger.ts | 186 - web/i18n/pt-BR/plugin.json | 251 + web/i18n/pt-BR/plugin.ts | 325 -- web/i18n/pt-BR/register.json | 1 + web/i18n/pt-BR/register.ts | 4 - web/i18n/pt-BR/run-log.json | 23 + web/i18n/pt-BR/run-log.ts | 31 - web/i18n/pt-BR/share.json | 62 + web/i18n/pt-BR/share.ts | 86 - web/i18n/pt-BR/time.json | 32 + web/i18n/pt-BR/time.ts | 44 - web/i18n/pt-BR/tools.json | 211 + web/i18n/pt-BR/tools.ts | 264 - web/i18n/pt-BR/workflow.json | 1051 ++++ web/i18n/pt-BR/workflow.ts | 1296 ----- web/i18n/ro-RO/app-annotation.json | 70 + web/i18n/ro-RO/app-annotation.ts | 98 - web/i18n/ro-RO/app-api.json | 72 + web/i18n/ro-RO/app-api.ts | 85 - web/i18n/ro-RO/app-debug.json | 391 ++ web/i18n/ro-RO/app-debug.ts | 577 --- web/i18n/ro-RO/app-log.json | 84 + web/i18n/ro-RO/app-log.ts | 112 - web/i18n/ro-RO/app-overview.json | 121 + web/i18n/ro-RO/app-overview.ts | 189 - web/i18n/ro-RO/app.json | 283 ++ web/i18n/ro-RO/app.ts | 351 -- web/i18n/ro-RO/billing.json | 185 + web/i18n/ro-RO/billing.ts | 221 - web/i18n/ro-RO/common.json | 622 +++ web/i18n/ro-RO/common.ts | 791 --- web/i18n/ro-RO/custom.json | 22 + web/i18n/ro-RO/custom.ts | 32 - web/i18n/ro-RO/dataset-creation.json | 185 + web/i18n/ro-RO/dataset-creation.ts | 217 - web/i18n/ro-RO/dataset-documents.json | 332 ++ web/i18n/ro-RO/dataset-documents.ts | 408 -- web/i18n/ro-RO/dataset-hit-testing.json | 28 + web/i18n/ro-RO/dataset-hit-testing.ts | 40 - web/i18n/ro-RO/dataset-pipeline.json | 95 + web/i18n/ro-RO/dataset-pipeline.ts | 163 - web/i18n/ro-RO/dataset-settings.json | 44 + web/i18n/ro-RO/dataset-settings.ts | 52 - web/i18n/ro-RO/dataset.json | 185 + web/i18n/ro-RO/dataset.ts | 253 - web/i18n/ro-RO/education.json | 44 + web/i18n/ro-RO/education.ts | 76 - web/i18n/ro-RO/explore.json | 29 + web/i18n/ro-RO/explore.ts | 44 - web/i18n/ro-RO/layout.json | 4 + web/i18n/ro-RO/layout.ts | 8 - web/i18n/ro-RO/login.json | 115 + web/i18n/ro-RO/login.ts | 126 - web/i18n/ro-RO/oauth.json | 19 + web/i18n/ro-RO/oauth.ts | 27 - web/i18n/ro-RO/pipeline.json | 24 + web/i18n/ro-RO/pipeline.ts | 40 - web/i18n/ro-RO/plugin-tags.json | 22 + web/i18n/ro-RO/plugin-tags.ts | 26 - web/i18n/ro-RO/plugin-trigger.json | 118 + web/i18n/ro-RO/plugin-trigger.ts | 186 - web/i18n/ro-RO/plugin.json | 251 + web/i18n/ro-RO/plugin.ts | 325 -- web/i18n/ro-RO/register.json | 1 + web/i18n/ro-RO/register.ts | 4 - web/i18n/ro-RO/run-log.json | 23 + web/i18n/ro-RO/run-log.ts | 31 - web/i18n/ro-RO/share.json | 62 + web/i18n/ro-RO/share.ts | 86 - web/i18n/ro-RO/time.json | 32 + web/i18n/ro-RO/time.ts | 44 - web/i18n/ro-RO/tools.json | 211 + web/i18n/ro-RO/tools.ts | 264 - web/i18n/ro-RO/workflow.json | 1051 ++++ web/i18n/ro-RO/workflow.ts | 1296 ----- web/i18n/ru-RU/app-annotation.json | 70 + web/i18n/ru-RU/app-annotation.ts | 98 - web/i18n/ru-RU/app-api.json | 72 + web/i18n/ru-RU/app-api.ts | 85 - web/i18n/ru-RU/app-debug.json | 391 ++ web/i18n/ru-RU/app-debug.ts | 575 --- web/i18n/ru-RU/app-log.json | 84 + web/i18n/ru-RU/app-log.ts | 112 - web/i18n/ru-RU/app-overview.json | 121 + web/i18n/ru-RU/app-overview.ts | 189 - web/i18n/ru-RU/app.json | 283 ++ web/i18n/ru-RU/app.ts | 351 -- web/i18n/ru-RU/billing.json | 185 + web/i18n/ru-RU/billing.ts | 221 - web/i18n/ru-RU/common.json | 622 +++ web/i18n/ru-RU/common.ts | 791 --- web/i18n/ru-RU/custom.json | 22 + web/i18n/ru-RU/custom.ts | 32 - web/i18n/ru-RU/dataset-creation.json | 185 + web/i18n/ru-RU/dataset-creation.ts | 217 - web/i18n/ru-RU/dataset-documents.json | 332 ++ web/i18n/ru-RU/dataset-documents.ts | 408 -- web/i18n/ru-RU/dataset-hit-testing.json | 28 + web/i18n/ru-RU/dataset-hit-testing.ts | 40 - web/i18n/ru-RU/dataset-pipeline.json | 95 + web/i18n/ru-RU/dataset-pipeline.ts | 163 - web/i18n/ru-RU/dataset-settings.json | 44 + web/i18n/ru-RU/dataset-settings.ts | 52 - web/i18n/ru-RU/dataset.json | 185 + web/i18n/ru-RU/dataset.ts | 253 - web/i18n/ru-RU/education.json | 44 + web/i18n/ru-RU/education.ts | 76 - web/i18n/ru-RU/explore.json | 29 + web/i18n/ru-RU/explore.ts | 44 - web/i18n/ru-RU/layout.json | 4 + web/i18n/ru-RU/layout.ts | 8 - web/i18n/ru-RU/login.json | 115 + web/i18n/ru-RU/login.ts | 126 - web/i18n/ru-RU/oauth.json | 19 + web/i18n/ru-RU/oauth.ts | 27 - web/i18n/ru-RU/pipeline.json | 24 + web/i18n/ru-RU/pipeline.ts | 40 - web/i18n/ru-RU/plugin-tags.json | 22 + web/i18n/ru-RU/plugin-tags.ts | 26 - web/i18n/ru-RU/plugin-trigger.json | 118 + web/i18n/ru-RU/plugin-trigger.ts | 186 - web/i18n/ru-RU/plugin.json | 251 + web/i18n/ru-RU/plugin.ts | 325 -- web/i18n/ru-RU/register.json | 1 + web/i18n/ru-RU/register.ts | 4 - web/i18n/ru-RU/run-log.json | 23 + web/i18n/ru-RU/run-log.ts | 31 - web/i18n/ru-RU/share.json | 62 + web/i18n/ru-RU/share.ts | 86 - web/i18n/ru-RU/time.json | 32 + web/i18n/ru-RU/time.ts | 44 - web/i18n/ru-RU/tools.json | 211 + web/i18n/ru-RU/tools.ts | 264 - web/i18n/ru-RU/workflow.json | 1051 ++++ web/i18n/ru-RU/workflow.ts | 1296 ----- web/i18n/sl-SI/app-annotation.json | 70 + web/i18n/sl-SI/app-annotation.ts | 98 - web/i18n/sl-SI/app-api.json | 72 + web/i18n/sl-SI/app-api.ts | 85 - web/i18n/sl-SI/app-debug.json | 391 ++ web/i18n/sl-SI/app-debug.ts | 586 --- web/i18n/sl-SI/app-log.json | 84 + web/i18n/sl-SI/app-log.ts | 112 - web/i18n/sl-SI/app-overview.json | 121 + web/i18n/sl-SI/app-overview.ts | 189 - web/i18n/sl-SI/app.json | 283 ++ web/i18n/sl-SI/app.ts | 351 -- web/i18n/sl-SI/billing.json | 185 + web/i18n/sl-SI/billing.ts | 221 - web/i18n/sl-SI/common.json | 622 +++ web/i18n/sl-SI/common.ts | 791 --- web/i18n/sl-SI/custom.json | 22 + web/i18n/sl-SI/custom.ts | 32 - web/i18n/sl-SI/dataset-creation.json | 185 + web/i18n/sl-SI/dataset-creation.ts | 217 - web/i18n/sl-SI/dataset-documents.json | 332 ++ web/i18n/sl-SI/dataset-documents.ts | 408 -- web/i18n/sl-SI/dataset-hit-testing.json | 28 + web/i18n/sl-SI/dataset-hit-testing.ts | 40 - web/i18n/sl-SI/dataset-pipeline.json | 95 + web/i18n/sl-SI/dataset-pipeline.ts | 163 - web/i18n/sl-SI/dataset-settings.json | 44 + web/i18n/sl-SI/dataset-settings.ts | 52 - web/i18n/sl-SI/dataset.json | 185 + web/i18n/sl-SI/dataset.ts | 253 - web/i18n/sl-SI/education.json | 44 + web/i18n/sl-SI/education.ts | 76 - web/i18n/sl-SI/explore.json | 29 + web/i18n/sl-SI/explore.ts | 44 - web/i18n/sl-SI/layout.json | 4 + web/i18n/sl-SI/layout.ts | 8 - web/i18n/sl-SI/login.json | 115 + web/i18n/sl-SI/login.ts | 126 - web/i18n/sl-SI/oauth.json | 19 + web/i18n/sl-SI/oauth.ts | 27 - web/i18n/sl-SI/pipeline.json | 24 + web/i18n/sl-SI/pipeline.ts | 40 - web/i18n/sl-SI/plugin-tags.json | 22 + web/i18n/sl-SI/plugin-tags.ts | 26 - web/i18n/sl-SI/plugin-trigger.json | 118 + web/i18n/sl-SI/plugin-trigger.ts | 186 - web/i18n/sl-SI/plugin.json | 251 + web/i18n/sl-SI/plugin.ts | 325 -- web/i18n/sl-SI/register.json | 1 + web/i18n/sl-SI/register.ts | 4 - web/i18n/sl-SI/run-log.json | 23 + web/i18n/sl-SI/run-log.ts | 31 - web/i18n/sl-SI/share.json | 62 + web/i18n/sl-SI/share.ts | 83 - web/i18n/sl-SI/time.json | 32 + web/i18n/sl-SI/time.ts | 44 - web/i18n/sl-SI/tools.json | 211 + web/i18n/sl-SI/tools.ts | 264 - web/i18n/sl-SI/workflow.json | 1051 ++++ web/i18n/sl-SI/workflow.ts | 1296 ----- web/i18n/th-TH/app-annotation.json | 70 + web/i18n/th-TH/app-annotation.ts | 98 - web/i18n/th-TH/app-api.json | 72 + web/i18n/th-TH/app-api.ts | 85 - web/i18n/th-TH/app-debug.json | 391 ++ web/i18n/th-TH/app-debug.ts | 564 --- web/i18n/th-TH/app-log.json | 84 + web/i18n/th-TH/app-log.ts | 112 - web/i18n/th-TH/app-overview.json | 121 + web/i18n/th-TH/app-overview.ts | 189 - web/i18n/th-TH/app.json | 283 ++ web/i18n/th-TH/app.ts | 347 -- web/i18n/th-TH/billing.json | 185 + web/i18n/th-TH/billing.ts | 221 - web/i18n/th-TH/common.json | 622 +++ web/i18n/th-TH/common.ts | 786 --- web/i18n/th-TH/custom.json | 22 + web/i18n/th-TH/custom.ts | 32 - web/i18n/th-TH/dataset-creation.json | 185 + web/i18n/th-TH/dataset-creation.ts | 217 - web/i18n/th-TH/dataset-documents.json | 332 ++ web/i18n/th-TH/dataset-documents.ts | 407 -- web/i18n/th-TH/dataset-hit-testing.json | 28 + web/i18n/th-TH/dataset-hit-testing.ts | 40 - web/i18n/th-TH/dataset-pipeline.json | 95 + web/i18n/th-TH/dataset-pipeline.ts | 163 - web/i18n/th-TH/dataset-settings.json | 44 + web/i18n/th-TH/dataset-settings.ts | 52 - web/i18n/th-TH/dataset.json | 185 + web/i18n/th-TH/dataset.ts | 252 - web/i18n/th-TH/education.json | 44 + web/i18n/th-TH/education.ts | 76 - web/i18n/th-TH/explore.json | 29 + web/i18n/th-TH/explore.ts | 44 - web/i18n/th-TH/layout.json | 4 + web/i18n/th-TH/layout.ts | 8 - web/i18n/th-TH/login.json | 115 + web/i18n/th-TH/login.ts | 126 - web/i18n/th-TH/oauth.json | 19 + web/i18n/th-TH/oauth.ts | 27 - web/i18n/th-TH/pipeline.json | 24 + web/i18n/th-TH/pipeline.ts | 40 - web/i18n/th-TH/plugin-tags.json | 22 + web/i18n/th-TH/plugin-tags.ts | 26 - web/i18n/th-TH/plugin-trigger.json | 118 + web/i18n/th-TH/plugin-trigger.ts | 186 - web/i18n/th-TH/plugin.json | 251 + web/i18n/th-TH/plugin.ts | 325 -- web/i18n/th-TH/register.json | 1 + web/i18n/th-TH/register.ts | 3 - web/i18n/th-TH/run-log.json | 23 + web/i18n/th-TH/run-log.ts | 31 - web/i18n/th-TH/share.json | 62 + web/i18n/th-TH/share.ts | 82 - web/i18n/th-TH/time.json | 32 + web/i18n/th-TH/time.ts | 44 - web/i18n/th-TH/tools.json | 211 + web/i18n/th-TH/tools.ts | 264 - web/i18n/th-TH/workflow.json | 1051 ++++ web/i18n/th-TH/workflow.ts | 1296 ----- web/i18n/tr-TR/app-annotation.json | 70 + web/i18n/tr-TR/app-annotation.ts | 98 - web/i18n/tr-TR/app-api.json | 72 + web/i18n/tr-TR/app-api.ts | 85 - web/i18n/tr-TR/app-debug.json | 391 ++ web/i18n/tr-TR/app-debug.ts | 571 --- web/i18n/tr-TR/app-log.json | 84 + web/i18n/tr-TR/app-log.ts | 112 - web/i18n/tr-TR/app-overview.json | 121 + web/i18n/tr-TR/app-overview.ts | 189 - web/i18n/tr-TR/app.json | 283 ++ web/i18n/tr-TR/app.ts | 347 -- web/i18n/tr-TR/billing.json | 185 + web/i18n/tr-TR/billing.ts | 221 - web/i18n/tr-TR/common.json | 622 +++ web/i18n/tr-TR/common.ts | 791 --- web/i18n/tr-TR/custom.json | 22 + web/i18n/tr-TR/custom.ts | 32 - web/i18n/tr-TR/dataset-creation.json | 185 + web/i18n/tr-TR/dataset-creation.ts | 217 - web/i18n/tr-TR/dataset-documents.json | 332 ++ web/i18n/tr-TR/dataset-documents.ts | 407 -- web/i18n/tr-TR/dataset-hit-testing.json | 28 + web/i18n/tr-TR/dataset-hit-testing.ts | 40 - web/i18n/tr-TR/dataset-pipeline.json | 95 + web/i18n/tr-TR/dataset-pipeline.ts | 163 - web/i18n/tr-TR/dataset-settings.json | 44 + web/i18n/tr-TR/dataset-settings.ts | 52 - web/i18n/tr-TR/dataset.json | 185 + web/i18n/tr-TR/dataset.ts | 253 - web/i18n/tr-TR/education.json | 44 + web/i18n/tr-TR/education.ts | 76 - web/i18n/tr-TR/explore.json | 29 + web/i18n/tr-TR/explore.ts | 44 - web/i18n/tr-TR/layout.json | 4 + web/i18n/tr-TR/layout.ts | 8 - web/i18n/tr-TR/login.json | 115 + web/i18n/tr-TR/login.ts | 126 - web/i18n/tr-TR/oauth.json | 19 + web/i18n/tr-TR/oauth.ts | 27 - web/i18n/tr-TR/pipeline.json | 24 + web/i18n/tr-TR/pipeline.ts | 40 - web/i18n/tr-TR/plugin-tags.json | 22 + web/i18n/tr-TR/plugin-tags.ts | 26 - web/i18n/tr-TR/plugin-trigger.json | 118 + web/i18n/tr-TR/plugin-trigger.ts | 186 - web/i18n/tr-TR/plugin.json | 251 + web/i18n/tr-TR/plugin.ts | 325 -- web/i18n/tr-TR/register.json | 1 + web/i18n/tr-TR/register.ts | 3 - web/i18n/tr-TR/run-log.json | 23 + web/i18n/tr-TR/run-log.ts | 31 - web/i18n/tr-TR/share.json | 62 + web/i18n/tr-TR/share.ts | 82 - web/i18n/tr-TR/time.json | 32 + web/i18n/tr-TR/time.ts | 44 - web/i18n/tr-TR/tools.json | 211 + web/i18n/tr-TR/tools.ts | 264 - web/i18n/tr-TR/workflow.json | 1051 ++++ web/i18n/tr-TR/workflow.ts | 1296 ----- web/i18n/uk-UA/app-annotation.json | 70 + web/i18n/uk-UA/app-annotation.ts | 98 - web/i18n/uk-UA/app-api.json | 72 + web/i18n/uk-UA/app-api.ts | 85 - web/i18n/uk-UA/app-debug.json | 391 ++ web/i18n/uk-UA/app-debug.ts | 599 --- web/i18n/uk-UA/app-log.json | 84 + web/i18n/uk-UA/app-log.ts | 112 - web/i18n/uk-UA/app-overview.json | 121 + web/i18n/uk-UA/app-overview.ts | 189 - web/i18n/uk-UA/app.json | 283 ++ web/i18n/uk-UA/app.ts | 351 -- web/i18n/uk-UA/billing.json | 185 + web/i18n/uk-UA/billing.ts | 221 - web/i18n/uk-UA/common.json | 622 +++ web/i18n/uk-UA/common.ts | 792 --- web/i18n/uk-UA/custom.json | 22 + web/i18n/uk-UA/custom.ts | 32 - web/i18n/uk-UA/dataset-creation.json | 185 + web/i18n/uk-UA/dataset-creation.ts | 217 - web/i18n/uk-UA/dataset-documents.json | 332 ++ web/i18n/uk-UA/dataset-documents.ts | 407 -- web/i18n/uk-UA/dataset-hit-testing.json | 28 + web/i18n/uk-UA/dataset-hit-testing.ts | 40 - web/i18n/uk-UA/dataset-pipeline.json | 95 + web/i18n/uk-UA/dataset-pipeline.ts | 163 - web/i18n/uk-UA/dataset-settings.json | 44 + web/i18n/uk-UA/dataset-settings.ts | 52 - web/i18n/uk-UA/dataset.json | 185 + web/i18n/uk-UA/dataset.ts | 254 - web/i18n/uk-UA/education.json | 44 + web/i18n/uk-UA/education.ts | 76 - web/i18n/uk-UA/explore.json | 29 + web/i18n/uk-UA/explore.ts | 44 - web/i18n/uk-UA/layout.json | 4 + web/i18n/uk-UA/layout.ts | 8 - web/i18n/uk-UA/login.json | 115 + web/i18n/uk-UA/login.ts | 126 - web/i18n/uk-UA/oauth.json | 19 + web/i18n/uk-UA/oauth.ts | 27 - web/i18n/uk-UA/pipeline.json | 24 + web/i18n/uk-UA/pipeline.ts | 40 - web/i18n/uk-UA/plugin-tags.json | 22 + web/i18n/uk-UA/plugin-tags.ts | 26 - web/i18n/uk-UA/plugin-trigger.json | 118 + web/i18n/uk-UA/plugin-trigger.ts | 186 - web/i18n/uk-UA/plugin.json | 251 + web/i18n/uk-UA/plugin.ts | 325 -- web/i18n/uk-UA/register.json | 1 + web/i18n/uk-UA/register.ts | 4 - web/i18n/uk-UA/run-log.json | 23 + web/i18n/uk-UA/run-log.ts | 31 - web/i18n/uk-UA/share.json | 62 + web/i18n/uk-UA/share.ts | 82 - web/i18n/uk-UA/time.json | 32 + web/i18n/uk-UA/time.ts | 44 - web/i18n/uk-UA/tools.json | 211 + web/i18n/uk-UA/tools.ts | 264 - web/i18n/uk-UA/workflow.json | 1051 ++++ web/i18n/uk-UA/workflow.ts | 1296 ----- web/i18n/vi-VN/app-annotation.json | 70 + web/i18n/vi-VN/app-annotation.ts | 98 - web/i18n/vi-VN/app-api.json | 72 + web/i18n/vi-VN/app-api.ts | 85 - web/i18n/vi-VN/app-debug.json | 391 ++ web/i18n/vi-VN/app-debug.ts | 568 --- web/i18n/vi-VN/app-log.json | 84 + web/i18n/vi-VN/app-log.ts | 112 - web/i18n/vi-VN/app-overview.json | 121 + web/i18n/vi-VN/app-overview.ts | 189 - web/i18n/vi-VN/app.json | 283 ++ web/i18n/vi-VN/app.ts | 351 -- web/i18n/vi-VN/billing.json | 185 + web/i18n/vi-VN/billing.ts | 221 - web/i18n/vi-VN/common.json | 622 +++ web/i18n/vi-VN/common.ts | 791 --- web/i18n/vi-VN/custom.json | 22 + web/i18n/vi-VN/custom.ts | 32 - web/i18n/vi-VN/dataset-creation.json | 185 + web/i18n/vi-VN/dataset-creation.ts | 217 - web/i18n/vi-VN/dataset-documents.json | 332 ++ web/i18n/vi-VN/dataset-documents.ts | 407 -- web/i18n/vi-VN/dataset-hit-testing.json | 28 + web/i18n/vi-VN/dataset-hit-testing.ts | 40 - web/i18n/vi-VN/dataset-pipeline.json | 95 + web/i18n/vi-VN/dataset-pipeline.ts | 163 - web/i18n/vi-VN/dataset-settings.json | 44 + web/i18n/vi-VN/dataset-settings.ts | 52 - web/i18n/vi-VN/dataset.json | 185 + web/i18n/vi-VN/dataset.ts | 253 - web/i18n/vi-VN/education.json | 44 + web/i18n/vi-VN/education.ts | 76 - web/i18n/vi-VN/explore.json | 29 + web/i18n/vi-VN/explore.ts | 44 - web/i18n/vi-VN/layout.json | 4 + web/i18n/vi-VN/layout.ts | 8 - web/i18n/vi-VN/login.json | 115 + web/i18n/vi-VN/login.ts | 126 - web/i18n/vi-VN/oauth.json | 19 + web/i18n/vi-VN/oauth.ts | 27 - web/i18n/vi-VN/pipeline.json | 24 + web/i18n/vi-VN/pipeline.ts | 40 - web/i18n/vi-VN/plugin-tags.json | 22 + web/i18n/vi-VN/plugin-tags.ts | 26 - web/i18n/vi-VN/plugin-trigger.json | 118 + web/i18n/vi-VN/plugin-trigger.ts | 186 - web/i18n/vi-VN/plugin.json | 251 + web/i18n/vi-VN/plugin.ts | 325 -- web/i18n/vi-VN/register.json | 1 + web/i18n/vi-VN/register.ts | 4 - web/i18n/vi-VN/run-log.json | 23 + web/i18n/vi-VN/run-log.ts | 31 - web/i18n/vi-VN/share.json | 62 + web/i18n/vi-VN/share.ts | 82 - web/i18n/vi-VN/time.json | 32 + web/i18n/vi-VN/time.ts | 44 - web/i18n/vi-VN/tools.json | 211 + web/i18n/vi-VN/tools.ts | 264 - web/i18n/vi-VN/workflow.json | 1051 ++++ web/i18n/vi-VN/workflow.ts | 1296 ----- web/i18n/zh-Hans/app-annotation.json | 70 + web/i18n/zh-Hans/app-annotation.ts | 98 - web/i18n/zh-Hans/app-api.json | 72 + web/i18n/zh-Hans/app-api.ts | 85 - web/i18n/zh-Hans/app-debug.json | 391 ++ web/i18n/zh-Hans/app-debug.ts | 569 --- web/i18n/zh-Hans/app-log.json | 84 + web/i18n/zh-Hans/app-log.ts | 112 - web/i18n/zh-Hans/app-overview.json | 121 + web/i18n/zh-Hans/app-overview.ts | 189 - web/i18n/zh-Hans/app.json | 283 ++ web/i18n/zh-Hans/app.ts | 350 -- web/i18n/zh-Hans/billing.json | 185 + web/i18n/zh-Hans/billing.ts | 221 - web/i18n/zh-Hans/common.json | 622 +++ web/i18n/zh-Hans/common.ts | 791 --- web/i18n/zh-Hans/custom.json | 22 + web/i18n/zh-Hans/custom.ts | 32 - web/i18n/zh-Hans/dataset-creation.json | 185 + web/i18n/zh-Hans/dataset-creation.ts | 217 - web/i18n/zh-Hans/dataset-documents.json | 332 ++ web/i18n/zh-Hans/dataset-documents.ts | 407 -- web/i18n/zh-Hans/dataset-hit-testing.json | 28 + web/i18n/zh-Hans/dataset-hit-testing.ts | 40 - web/i18n/zh-Hans/dataset-pipeline.json | 95 + web/i18n/zh-Hans/dataset-pipeline.ts | 163 - web/i18n/zh-Hans/dataset-settings.json | 44 + web/i18n/zh-Hans/dataset-settings.ts | 52 - web/i18n/zh-Hans/dataset.json | 185 + web/i18n/zh-Hans/dataset.ts | 251 - web/i18n/zh-Hans/education.json | 44 + web/i18n/zh-Hans/education.ts | 76 - web/i18n/zh-Hans/explore.json | 29 + web/i18n/zh-Hans/explore.ts | 44 - web/i18n/zh-Hans/layout.json | 4 + web/i18n/zh-Hans/layout.ts | 8 - web/i18n/zh-Hans/login.json | 115 + web/i18n/zh-Hans/login.ts | 126 - web/i18n/zh-Hans/oauth.json | 19 + web/i18n/zh-Hans/oauth.ts | 27 - web/i18n/zh-Hans/pipeline.json | 24 + web/i18n/zh-Hans/pipeline.ts | 40 - web/i18n/zh-Hans/plugin-tags.json | 22 + web/i18n/zh-Hans/plugin-tags.ts | 26 - web/i18n/zh-Hans/plugin-trigger.json | 118 + web/i18n/zh-Hans/plugin-trigger.ts | 186 - web/i18n/zh-Hans/plugin.json | 251 + web/i18n/zh-Hans/plugin.ts | 325 -- web/i18n/zh-Hans/register.json | 1 + web/i18n/zh-Hans/register.ts | 4 - web/i18n/zh-Hans/run-log.json | 23 + web/i18n/zh-Hans/run-log.ts | 31 - web/i18n/zh-Hans/share.json | 62 + web/i18n/zh-Hans/share.ts | 82 - web/i18n/zh-Hans/time.json | 32 + web/i18n/zh-Hans/time.ts | 45 - web/i18n/zh-Hans/tools.json | 211 + web/i18n/zh-Hans/tools.ts | 264 - web/i18n/zh-Hans/workflow.json | 1051 ++++ web/i18n/zh-Hans/workflow.ts | 1300 ----- web/i18n/zh-Hant/app-annotation.json | 70 + web/i18n/zh-Hant/app-annotation.ts | 98 - web/i18n/zh-Hant/app-api.json | 72 + web/i18n/zh-Hant/app-api.ts | 85 - web/i18n/zh-Hant/app-debug.json | 391 ++ web/i18n/zh-Hant/app-debug.ts | 569 --- web/i18n/zh-Hant/app-log.json | 84 + web/i18n/zh-Hant/app-log.ts | 112 - web/i18n/zh-Hant/app-overview.json | 121 + web/i18n/zh-Hant/app-overview.ts | 189 - web/i18n/zh-Hant/app.json | 283 ++ web/i18n/zh-Hant/app.ts | 350 -- web/i18n/zh-Hant/billing.json | 185 + web/i18n/zh-Hant/billing.ts | 221 - web/i18n/zh-Hant/common.json | 622 +++ web/i18n/zh-Hant/common.ts | 791 --- web/i18n/zh-Hant/custom.json | 22 + web/i18n/zh-Hant/custom.ts | 32 - web/i18n/zh-Hant/dataset-creation.json | 185 + web/i18n/zh-Hant/dataset-creation.ts | 217 - web/i18n/zh-Hant/dataset-documents.json | 332 ++ web/i18n/zh-Hant/dataset-documents.ts | 407 -- web/i18n/zh-Hant/dataset-hit-testing.json | 28 + web/i18n/zh-Hant/dataset-hit-testing.ts | 40 - web/i18n/zh-Hant/dataset-pipeline.json | 95 + web/i18n/zh-Hant/dataset-pipeline.ts | 163 - web/i18n/zh-Hant/dataset-settings.json | 44 + web/i18n/zh-Hant/dataset-settings.ts | 52 - web/i18n/zh-Hant/dataset.json | 185 + web/i18n/zh-Hant/dataset.ts | 253 - web/i18n/zh-Hant/education.json | 44 + web/i18n/zh-Hant/education.ts | 76 - web/i18n/zh-Hant/explore.json | 29 + web/i18n/zh-Hant/explore.ts | 44 - web/i18n/zh-Hant/layout.json | 4 + web/i18n/zh-Hant/layout.ts | 8 - web/i18n/zh-Hant/login.json | 115 + web/i18n/zh-Hant/login.ts | 126 - web/i18n/zh-Hant/oauth.json | 19 + web/i18n/zh-Hant/oauth.ts | 27 - web/i18n/zh-Hant/pipeline.json | 24 + web/i18n/zh-Hant/pipeline.ts | 40 - web/i18n/zh-Hant/plugin-tags.json | 22 + web/i18n/zh-Hant/plugin-tags.ts | 26 - web/i18n/zh-Hant/plugin-trigger.json | 118 + web/i18n/zh-Hant/plugin-trigger.ts | 186 - web/i18n/zh-Hant/plugin.json | 251 + web/i18n/zh-Hant/plugin.ts | 325 -- web/i18n/zh-Hant/register.json | 1 + web/i18n/zh-Hant/register.ts | 4 - web/i18n/zh-Hant/run-log.json | 23 + web/i18n/zh-Hant/run-log.ts | 31 - web/i18n/zh-Hant/share.json | 62 + web/i18n/zh-Hant/share.ts | 82 - web/i18n/zh-Hant/time.json | 32 + web/i18n/zh-Hant/time.ts | 44 - web/i18n/zh-Hant/tools.json | 211 + web/i18n/zh-Hant/tools.ts | 264 - web/i18n/zh-Hant/workflow.json | 1051 ++++ web/i18n/zh-Hant/workflow.ts | 1296 ----- web/jest.config.ts | 214 - web/jest.setup.ts | 25 - web/knip.config.ts | 13 +- web/models/access-control.ts | 4 +- web/models/app.ts | 12 +- web/models/common.ts | 2 +- web/models/datasets.ts | 19 +- web/models/debug.ts | 11 +- web/models/explore.ts | 1 + web/models/log.ts | 21 +- web/models/pipeline.ts | 10 +- web/next.config.js | 55 +- web/package.json | 130 +- web/pnpm-lock.yaml | 4309 ++++++++--------- web/postcss.config.js | 2 +- web/{testing => scripts}/analyze-component.js | 504 +- web/scripts/analyze-i18n-diff.ts | 406 ++ web/{i18n-config => scripts}/auto-gen-i18n.js | 176 +- web/{i18n-config => scripts}/check-i18n.js | 236 +- web/scripts/component-analyzer.js | 484 ++ web/scripts/copy-and-start.mjs | 2 +- web/scripts/generate-icons.js | 50 +- web/scripts/optimize-standalone.js | 132 +- web/scripts/refactor-component.js | 415 ++ web/service/_tools_util.spec.ts | 8 +- web/service/access-control.ts | 12 +- web/service/annotation.ts | 15 +- web/service/apps.ts | 67 +- web/service/base.ts | 30 +- web/service/billing.ts | 2 +- web/service/common.ts | 255 +- web/service/datasets.ts | 50 +- web/service/debug.ts | 10 +- web/service/demo/index.tsx | 12 +- web/service/explore.ts | 16 +- web/service/fetch.ts | 10 +- web/service/knowledge/use-create-dataset.ts | 10 +- web/service/knowledge/use-dataset.ts | 24 +- web/service/knowledge/use-document.ts | 28 +- web/service/knowledge/use-hit-testing.ts | 4 +- web/service/knowledge/use-import.ts | 2 +- web/service/knowledge/use-metadata.spec.tsx | 10 +- web/service/knowledge/use-metadata.ts | 8 +- web/service/knowledge/use-segment.ts | 28 +- web/service/log.ts | 56 +- web/service/plugins.ts | 18 +- web/service/share.ts | 27 +- web/service/sso.ts | 4 +- web/service/tag.ts | 2 +- web/service/tools.ts | 4 +- web/service/use-apps.ts | 6 +- web/service/use-base.ts | 3 +- web/service/use-billing.ts | 2 +- web/service/use-common.ts | 259 +- web/service/use-datasource.ts | 11 +- web/service/use-education.ts | 6 +- web/service/use-endpoints.ts | 5 +- web/service/use-explore.ts | 25 +- web/service/use-flow.ts | 2 +- web/service/use-log.ts | 89 + web/service/use-models.ts | 12 +- web/service/use-oauth.ts | 2 +- web/service/use-pipeline.ts | 24 +- web/service/use-plugins-auth.ts | 13 +- web/service/use-plugins.ts | 66 +- web/service/use-share.spec.tsx | 231 + web/service/use-share.ts | 115 +- web/service/use-strategy.ts | 4 +- web/service/use-tools.ts | 23 +- web/service/use-triggers.ts | 113 +- web/service/use-workflow.ts | 20 +- web/service/utils.spec.ts | 2 +- web/service/workflow-payload.ts | 6 +- web/service/workflow.ts | 37 +- web/tailwind-common-config.ts | 8 +- web/tailwind.config.js | 1 + web/testing/testing.md | 72 +- web/themes/dark.css | 12 + web/tsconfig.json | 36 +- web/types/app.ts | 23 +- web/types/feature.ts | 5 +- web/types/i18n.d.ts | 125 +- web/types/react-18-input-autosize.d.ts | 2 +- web/types/workflow.ts | 36 +- web/typography.js | 2 +- web/utils/app-redirection.spec.ts | 28 +- web/utils/classnames.spec.ts | 22 +- web/utils/classnames.ts | 9 +- web/utils/clipboard.spec.ts | 22 +- web/utils/completion-params.spec.ts | 42 +- web/utils/completion-params.ts | 4 +- web/utils/context.spec.ts | 14 +- web/utils/context.ts | 6 +- web/utils/emoji.spec.ts | 21 +- web/utils/emoji.ts | 2 +- web/utils/encryption.ts | 6 +- web/utils/format.spec.ts | 52 +- web/utils/format.ts | 34 +- web/utils/get-icon.spec.ts | 36 +- web/utils/index.spec.ts | 162 +- web/utils/index.ts | 11 +- web/utils/mcp.spec.ts | 28 +- web/utils/model-config.spec.ts | 4 +- web/utils/model-config.ts | 4 +- web/utils/navigation.spec.ts | 74 +- web/utils/permission.spec.ts | 18 +- web/utils/plugin-version-feature.spec.ts | 2 +- web/utils/time.spec.ts | 26 +- web/utils/time.ts | 5 +- web/utils/timezone.json | 2544 +++++----- web/utils/tool-call.spec.ts | 20 +- web/utils/tool-call.ts | 3 +- web/utils/urlValidation.ts | 3 +- web/utils/var.spec.ts | 2 +- web/utils/var.ts | 21 +- web/utils/zod.spec.ts | 6 +- web/vitest.config.ts | 16 + web/vitest.setup.ts | 157 + 4921 files changed, 308906 insertions(+), 230467 deletions(-) delete mode 100644 web/.oxlintrc.json delete mode 100644 web/__mocks__/mime.js delete mode 100644 web/__mocks__/react-i18next.ts rename web/app/components/{swr-initializer.tsx => app-initializer.tsx} (81%) create mode 100644 web/app/components/app-sidebar/dataset-info/index.spec.tsx create mode 100644 web/app/components/app/annotation/batch-action.spec.tsx create mode 100644 web/app/components/app/annotation/batch-add-annotation-modal/csv-downloader.spec.tsx create mode 100644 web/app/components/app/annotation/batch-add-annotation-modal/index.spec.tsx create mode 100644 web/app/components/app/annotation/empty-element.spec.tsx create mode 100644 web/app/components/app/annotation/filter.spec.tsx create mode 100644 web/app/components/app/annotation/header-opts/index.spec.tsx create mode 100644 web/app/components/app/annotation/index.spec.tsx create mode 100644 web/app/components/app/annotation/list.spec.tsx create mode 100644 web/app/components/app/annotation/view-annotation-modal/index.spec.tsx create mode 100644 web/app/components/app/app-access-control/access-control.spec.tsx create mode 100644 web/app/components/app/configuration/base/feature-panel/index.spec.tsx create mode 100644 web/app/components/app/configuration/config-var/index.spec.tsx create mode 100644 web/app/components/app/configuration/config-vision/index.spec.tsx create mode 100644 web/app/components/app/configuration/config/agent-setting-button.spec.tsx create mode 100644 web/app/components/app/configuration/config/agent/agent-setting/index.spec.tsx create mode 100644 web/app/components/app/configuration/config/agent/agent-setting/item-panel.spec.tsx create mode 100644 web/app/components/app/configuration/config/agent/agent-tools/index.spec.tsx create mode 100644 web/app/components/app/configuration/config/agent/agent-tools/setting-built-in-tool.spec.tsx create mode 100644 web/app/components/app/configuration/config/assistant-type-picker/index.spec.tsx create mode 100644 web/app/components/app/configuration/config/config-audio.spec.tsx create mode 100644 web/app/components/app/configuration/config/config-document.spec.tsx create mode 100644 web/app/components/app/configuration/config/index.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/index.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/params-config/config-content.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/params-config/index.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/params-config/weighted-score.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/select-dataset/index.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/settings-modal/index.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/settings-modal/retrieval-section.spec.tsx create mode 100644 web/app/components/app/configuration/dataset-config/settings-modal/retrieval-section.tsx create mode 100644 web/app/components/app/configuration/debug/debug-with-single-model/index.spec.tsx create mode 100644 web/app/components/app/configuration/prompt-value-panel/index.spec.tsx create mode 100644 web/app/components/app/configuration/prompt-value-panel/utils.spec.ts create mode 100644 web/app/components/app/create-app-dialog/app-list/index.spec.tsx create mode 100644 web/app/components/app/create-app-dialog/app-list/sidebar.spec.tsx create mode 100644 web/app/components/app/create-app-modal/index.spec.tsx create mode 100644 web/app/components/app/duplicate-modal/index.spec.tsx create mode 100644 web/app/components/app/log-annotation/index.spec.tsx create mode 100644 web/app/components/app/overview/embedded/index.spec.tsx create mode 100644 web/app/components/app/overview/settings/index.spec.tsx create mode 100644 web/app/components/app/switch-app-modal/index.spec.tsx create mode 100644 web/app/components/app/text-generate/saved-items/index.spec.tsx create mode 100644 web/app/components/app/text-generate/saved-items/no-data/index.spec.tsx create mode 100644 web/app/components/app/type-selector/index.spec.tsx delete mode 100644 web/app/components/apps/hooks/use-apps-query-state.spec.ts create mode 100644 web/app/components/apps/hooks/use-apps-query-state.spec.tsx create mode 100644 web/app/components/base/avatar/index.spec.tsx create mode 100644 web/app/components/base/badge/index.spec.tsx create mode 100644 web/app/components/base/chat/chat-with-history/hooks.spec.tsx create mode 100644 web/app/components/base/chat/embedded-chatbot/hooks.spec.tsx create mode 100644 web/app/components/base/chip/index.spec.tsx create mode 100644 web/app/components/billing/apps-full-in-dialog/index.spec.tsx create mode 100644 web/app/components/billing/billing-page/index.spec.tsx create mode 100644 web/app/components/billing/header-billing-btn/index.spec.tsx create mode 100644 web/app/components/billing/partner-stack/index.spec.tsx create mode 100644 web/app/components/billing/partner-stack/use-ps-info.spec.tsx create mode 100644 web/app/components/billing/plan/index.spec.tsx create mode 100644 web/app/components/billing/pricing/assets/index.spec.tsx create mode 100644 web/app/components/billing/pricing/footer.spec.tsx create mode 100644 web/app/components/billing/pricing/header.spec.tsx create mode 100644 web/app/components/billing/pricing/index.spec.tsx create mode 100644 web/app/components/billing/pricing/plan-switcher/index.spec.tsx create mode 100644 web/app/components/billing/pricing/plan-switcher/plan-range-switcher.spec.tsx create mode 100644 web/app/components/billing/pricing/plan-switcher/tab.spec.tsx create mode 100644 web/app/components/billing/priority-label/index.spec.tsx create mode 100644 web/app/components/billing/progress-bar/index.spec.tsx create mode 100644 web/app/components/billing/trigger-events-limit-modal/index.spec.tsx create mode 100644 web/app/components/billing/upgrade-btn/index.spec.tsx create mode 100644 web/app/components/billing/usage-info/apps-info.spec.tsx create mode 100644 web/app/components/billing/usage-info/index.spec.tsx create mode 100644 web/app/components/billing/vector-space-full/index.spec.tsx create mode 100644 web/app/components/browser-initializer.spec.ts create mode 100644 web/app/components/custom/custom-web-app-brand/index.spec.tsx create mode 100644 web/app/components/datasets/create/empty-dataset-creation-modal/index.spec.tsx create mode 100644 web/app/components/datasets/create/file-preview/index.spec.tsx create mode 100644 web/app/components/datasets/create/index.spec.tsx create mode 100644 web/app/components/datasets/create/notion-page-preview/index.spec.tsx create mode 100644 web/app/components/datasets/create/step-three/index.spec.tsx create mode 100644 web/app/components/datasets/create/step-two/language-select/index.spec.tsx create mode 100644 web/app/components/datasets/create/step-two/preview-item/index.spec.tsx create mode 100644 web/app/components/datasets/create/stepper/index.spec.tsx create mode 100644 web/app/components/datasets/create/stop-embedding-modal/index.spec.tsx create mode 100644 web/app/components/datasets/create/top-bar/index.spec.tsx create mode 100644 web/app/components/datasets/create/website/base.spec.tsx create mode 100644 web/app/components/datasets/create/website/jina-reader/base.spec.tsx create mode 100644 web/app/components/datasets/create/website/jina-reader/index.spec.tsx create mode 100644 web/app/components/datasets/create/website/watercrawl/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/actions/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source-options/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/base/credential-selector/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/base/header.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-documents/page-selector/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/connect/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/dropdown/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/breadcrumbs/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/header/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/file-list/list/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/online-drive/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/website-crawl/base/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/website-crawl/base/options/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/data-source/website-crawl/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/preview/chunk-preview.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/preview/file-preview.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/preview/online-document-preview.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/preview/web-preview.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/process-documents/components.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/process-documents/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/index.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/processing/embedding-process/rule-detail.spec.tsx create mode 100644 web/app/components/datasets/documents/create-from-pipeline/processing/index.spec.tsx delete mode 100644 web/app/components/datasets/hit-testing/style.module.css create mode 100644 web/app/components/devtools/react-scan/loader.tsx rename web/app/components/{react-scan.tsx => devtools/react-scan/scan.tsx} (100%) create mode 100644 web/app/components/devtools/tanstack/devtools.tsx create mode 100644 web/app/components/devtools/tanstack/loader.tsx create mode 100644 web/app/components/explore/app-list/index.spec.tsx create mode 100644 web/app/components/explore/category.spec.tsx create mode 100644 web/app/components/explore/create-app-modal/index.spec.tsx create mode 100644 web/app/components/explore/index.spec.tsx create mode 100644 web/app/components/explore/installed-app/index.spec.tsx create mode 100644 web/app/components/explore/item-operation/index.spec.tsx create mode 100644 web/app/components/explore/sidebar/app-nav-item/index.spec.tsx create mode 100644 web/app/components/explore/sidebar/index.spec.tsx create mode 100644 web/app/components/goto-anything/command-selector.spec.tsx create mode 100644 web/app/components/goto-anything/context.spec.tsx create mode 100644 web/app/components/goto-anything/index.spec.tsx create mode 100644 web/app/components/header/account-setting/members-page/operation/index.spec.tsx create mode 100644 web/app/components/plugins/card/index.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-bundle/index.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-github/index.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-github/steps/loaded.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-github/steps/selectPackage.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-github/steps/setURL.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-local-package/index.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-local-package/ready-to-install.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-local-package/steps/install.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-local-package/steps/uploading.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-marketplace/index.spec.tsx create mode 100644 web/app/components/plugins/install-plugin/install-from-marketplace/steps/install.spec.tsx create mode 100644 web/app/components/plugins/marketplace/description/index.spec.tsx create mode 100644 web/app/components/plugins/marketplace/empty/index.spec.tsx create mode 100644 web/app/components/plugins/marketplace/index.spec.tsx create mode 100644 web/app/components/plugins/marketplace/list/index.spec.tsx create mode 100644 web/app/components/plugins/marketplace/search-box/index.spec.tsx create mode 100644 web/app/components/plugins/marketplace/sort-dropdown/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-auth/authorize/authorize-components.spec.tsx create mode 100644 web/app/components/plugins/plugin-auth/authorize/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-auth/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/model-selector/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/model-selector/llm-params-panel.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/model-selector/tts-params-panel.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/multiple-tool-selector/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/create/common-modal.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/create/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/create/oauth-client.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/delete-confirm.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/apikey-edit-modal.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/apikey-edit-modal.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/index.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/manual-edit-modal.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/manual-edit-modal.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/oauth-edit-modal.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/edit/oauth-edit-modal.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/list-view.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/log-viewer.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/selector-entry.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/selector-view.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/subscription-card.spec.tsx create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/types.ts create mode 100644 web/app/components/plugins/plugin-detail-panel/subscription-list/use-subscription-list.spec.ts create mode 100644 web/app/components/plugins/plugin-item/action.spec.tsx create mode 100644 web/app/components/plugins/plugin-item/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-mutation-model/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-page/empty/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-page/filter-management/index.spec.tsx create mode 100644 web/app/components/plugins/plugin-page/list/index.spec.tsx create mode 100644 web/app/components/plugins/readme-panel/index.spec.tsx create mode 100644 web/app/components/plugins/reference-setting-modal/auto-update-setting/index.spec.tsx create mode 100644 web/app/components/plugins/reference-setting-modal/index.spec.tsx rename web/app/components/plugins/reference-setting-modal/{modal.tsx => index.tsx} (66%) create mode 100644 web/app/components/plugins/update-plugin/index.spec.tsx create mode 100644 web/app/components/share/text-generation/run-once/index.spec.tsx create mode 100644 web/app/components/tools/edit-custom-collection-modal/config-credentials.spec.tsx create mode 100644 web/app/components/tools/edit-custom-collection-modal/get-schema.spec.tsx create mode 100644 web/app/components/tools/edit-custom-collection-modal/index.spec.tsx create mode 100644 web/app/components/tools/edit-custom-collection-modal/test-api.spec.tsx create mode 100644 web/app/components/tools/marketplace/index.spec.tsx create mode 100644 web/app/components/tools/workflow-tool/utils.test.ts create mode 100644 web/app/components/tools/workflow-tool/utils.ts create mode 100644 web/app/components/workflow-app/components/workflow-header/chat-variable-trigger.spec.tsx create mode 100644 web/app/components/workflow-app/components/workflow-header/features-trigger.spec.tsx create mode 100644 web/app/components/workflow-app/components/workflow-header/index.spec.tsx delete mode 100644 web/app/components/workflow/nodes/llm/components/tools-config.tsx delete mode 100644 web/app/components/workflow/nodes/llm/constants.ts create mode 100644 web/app/components/workflow/panel/version-history-panel/index.spec.tsx delete mode 100644 web/app/dev-preview/page.tsx create mode 100644 web/app/styles/monaco-sticky-fix.css create mode 100644 web/eslint-rules/index.js create mode 100644 web/eslint-rules/namespaces.js create mode 100644 web/eslint-rules/rules/no-as-any-in-t.js create mode 100644 web/eslint-rules/rules/no-legacy-namespace-prefix.js create mode 100644 web/eslint-rules/rules/require-ns-option.js create mode 100644 web/hooks/use-query-params.spec.tsx create mode 100644 web/hooks/use-query-params.ts delete mode 100644 web/hooks/use-tab-searchparams.spec.ts delete mode 100644 web/hooks/use-tab-searchparams.ts delete mode 100644 web/i18n-config/check-i18n-sync.js delete mode 100644 web/i18n-config/generate-i18n-types.js delete mode 100644 web/i18n-config/languages.json create mode 100644 web/i18n-config/languages.ts create mode 100644 web/i18n/ar-TN/app-annotation.json delete mode 100644 web/i18n/ar-TN/app-annotation.ts create mode 100644 web/i18n/ar-TN/app-api.json delete mode 100644 web/i18n/ar-TN/app-api.ts create mode 100644 web/i18n/ar-TN/app-debug.json delete mode 100644 web/i18n/ar-TN/app-debug.ts create mode 100644 web/i18n/ar-TN/app-log.json delete mode 100644 web/i18n/ar-TN/app-log.ts create mode 100644 web/i18n/ar-TN/app-overview.json delete mode 100644 web/i18n/ar-TN/app-overview.ts create mode 100644 web/i18n/ar-TN/app.json delete mode 100644 web/i18n/ar-TN/app.ts create mode 100644 web/i18n/ar-TN/billing.json delete mode 100644 web/i18n/ar-TN/billing.ts create mode 100644 web/i18n/ar-TN/common.json delete mode 100644 web/i18n/ar-TN/common.ts create mode 100644 web/i18n/ar-TN/custom.json delete mode 100644 web/i18n/ar-TN/custom.ts create mode 100644 web/i18n/ar-TN/dataset-creation.json delete mode 100644 web/i18n/ar-TN/dataset-creation.ts create mode 100644 web/i18n/ar-TN/dataset-documents.json delete mode 100644 web/i18n/ar-TN/dataset-documents.ts create mode 100644 web/i18n/ar-TN/dataset-hit-testing.json delete mode 100644 web/i18n/ar-TN/dataset-hit-testing.ts create mode 100644 web/i18n/ar-TN/dataset-pipeline.json delete mode 100644 web/i18n/ar-TN/dataset-pipeline.ts create mode 100644 web/i18n/ar-TN/dataset-settings.json delete mode 100644 web/i18n/ar-TN/dataset-settings.ts create mode 100644 web/i18n/ar-TN/dataset.json delete mode 100644 web/i18n/ar-TN/dataset.ts create mode 100644 web/i18n/ar-TN/education.json delete mode 100644 web/i18n/ar-TN/education.ts create mode 100644 web/i18n/ar-TN/explore.json delete mode 100644 web/i18n/ar-TN/explore.ts create mode 100644 web/i18n/ar-TN/layout.json delete mode 100644 web/i18n/ar-TN/layout.ts create mode 100644 web/i18n/ar-TN/login.json delete mode 100644 web/i18n/ar-TN/login.ts create mode 100644 web/i18n/ar-TN/oauth.json delete mode 100644 web/i18n/ar-TN/oauth.ts create mode 100644 web/i18n/ar-TN/pipeline.json delete mode 100644 web/i18n/ar-TN/pipeline.ts create mode 100644 web/i18n/ar-TN/plugin-tags.json delete mode 100644 web/i18n/ar-TN/plugin-tags.ts create mode 100644 web/i18n/ar-TN/plugin-trigger.json delete mode 100644 web/i18n/ar-TN/plugin-trigger.ts create mode 100644 web/i18n/ar-TN/plugin.json delete mode 100644 web/i18n/ar-TN/plugin.ts create mode 100644 web/i18n/ar-TN/register.json delete mode 100644 web/i18n/ar-TN/register.ts create mode 100644 web/i18n/ar-TN/run-log.json delete mode 100644 web/i18n/ar-TN/run-log.ts create mode 100644 web/i18n/ar-TN/share.json delete mode 100644 web/i18n/ar-TN/share.ts create mode 100644 web/i18n/ar-TN/time.json delete mode 100644 web/i18n/ar-TN/time.ts create mode 100644 web/i18n/ar-TN/tools.json delete mode 100644 web/i18n/ar-TN/tools.ts create mode 100644 web/i18n/ar-TN/workflow.json delete mode 100644 web/i18n/ar-TN/workflow.ts create mode 100644 web/i18n/de-DE/app-annotation.json delete mode 100644 web/i18n/de-DE/app-annotation.ts create mode 100644 web/i18n/de-DE/app-api.json delete mode 100644 web/i18n/de-DE/app-api.ts create mode 100644 web/i18n/de-DE/app-debug.json delete mode 100644 web/i18n/de-DE/app-debug.ts create mode 100644 web/i18n/de-DE/app-log.json delete mode 100644 web/i18n/de-DE/app-log.ts create mode 100644 web/i18n/de-DE/app-overview.json delete mode 100644 web/i18n/de-DE/app-overview.ts create mode 100644 web/i18n/de-DE/app.json delete mode 100644 web/i18n/de-DE/app.ts create mode 100644 web/i18n/de-DE/billing.json delete mode 100644 web/i18n/de-DE/billing.ts create mode 100644 web/i18n/de-DE/common.json delete mode 100644 web/i18n/de-DE/common.ts create mode 100644 web/i18n/de-DE/custom.json delete mode 100644 web/i18n/de-DE/custom.ts create mode 100644 web/i18n/de-DE/dataset-creation.json delete mode 100644 web/i18n/de-DE/dataset-creation.ts create mode 100644 web/i18n/de-DE/dataset-documents.json delete mode 100644 web/i18n/de-DE/dataset-documents.ts create mode 100644 web/i18n/de-DE/dataset-hit-testing.json delete mode 100644 web/i18n/de-DE/dataset-hit-testing.ts create mode 100644 web/i18n/de-DE/dataset-pipeline.json delete mode 100644 web/i18n/de-DE/dataset-pipeline.ts create mode 100644 web/i18n/de-DE/dataset-settings.json delete mode 100644 web/i18n/de-DE/dataset-settings.ts create mode 100644 web/i18n/de-DE/dataset.json delete mode 100644 web/i18n/de-DE/dataset.ts create mode 100644 web/i18n/de-DE/education.json delete mode 100644 web/i18n/de-DE/education.ts create mode 100644 web/i18n/de-DE/explore.json delete mode 100644 web/i18n/de-DE/explore.ts create mode 100644 web/i18n/de-DE/layout.json delete mode 100644 web/i18n/de-DE/layout.ts create mode 100644 web/i18n/de-DE/login.json delete mode 100644 web/i18n/de-DE/login.ts create mode 100644 web/i18n/de-DE/oauth.json delete mode 100644 web/i18n/de-DE/oauth.ts create mode 100644 web/i18n/de-DE/pipeline.json delete mode 100644 web/i18n/de-DE/pipeline.ts create mode 100644 web/i18n/de-DE/plugin-tags.json delete mode 100644 web/i18n/de-DE/plugin-tags.ts create mode 100644 web/i18n/de-DE/plugin-trigger.json delete mode 100644 web/i18n/de-DE/plugin-trigger.ts create mode 100644 web/i18n/de-DE/plugin.json delete mode 100644 web/i18n/de-DE/plugin.ts create mode 100644 web/i18n/de-DE/register.json delete mode 100644 web/i18n/de-DE/register.ts create mode 100644 web/i18n/de-DE/run-log.json delete mode 100644 web/i18n/de-DE/run-log.ts create mode 100644 web/i18n/de-DE/share.json delete mode 100644 web/i18n/de-DE/share.ts create mode 100644 web/i18n/de-DE/time.json delete mode 100644 web/i18n/de-DE/time.ts create mode 100644 web/i18n/de-DE/tools.json delete mode 100644 web/i18n/de-DE/tools.ts create mode 100644 web/i18n/de-DE/workflow.json delete mode 100644 web/i18n/de-DE/workflow.ts create mode 100644 web/i18n/en-US/app-annotation.json delete mode 100644 web/i18n/en-US/app-annotation.ts create mode 100644 web/i18n/en-US/app-api.json delete mode 100644 web/i18n/en-US/app-api.ts create mode 100644 web/i18n/en-US/app-debug.json delete mode 100644 web/i18n/en-US/app-debug.ts create mode 100644 web/i18n/en-US/app-log.json delete mode 100644 web/i18n/en-US/app-log.ts create mode 100644 web/i18n/en-US/app-overview.json delete mode 100644 web/i18n/en-US/app-overview.ts create mode 100644 web/i18n/en-US/app.json delete mode 100644 web/i18n/en-US/app.ts create mode 100644 web/i18n/en-US/billing.json delete mode 100644 web/i18n/en-US/billing.ts create mode 100644 web/i18n/en-US/common.json delete mode 100644 web/i18n/en-US/common.ts create mode 100644 web/i18n/en-US/custom.json delete mode 100644 web/i18n/en-US/custom.ts create mode 100644 web/i18n/en-US/dataset-creation.json delete mode 100644 web/i18n/en-US/dataset-creation.ts create mode 100644 web/i18n/en-US/dataset-documents.json delete mode 100644 web/i18n/en-US/dataset-documents.ts create mode 100644 web/i18n/en-US/dataset-hit-testing.json delete mode 100644 web/i18n/en-US/dataset-hit-testing.ts create mode 100644 web/i18n/en-US/dataset-pipeline.json delete mode 100644 web/i18n/en-US/dataset-pipeline.ts create mode 100644 web/i18n/en-US/dataset-settings.json delete mode 100644 web/i18n/en-US/dataset-settings.ts create mode 100644 web/i18n/en-US/dataset.json delete mode 100644 web/i18n/en-US/dataset.ts create mode 100644 web/i18n/en-US/education.json delete mode 100644 web/i18n/en-US/education.ts create mode 100644 web/i18n/en-US/explore.json delete mode 100644 web/i18n/en-US/explore.ts create mode 100644 web/i18n/en-US/layout.json delete mode 100644 web/i18n/en-US/layout.ts create mode 100644 web/i18n/en-US/login.json delete mode 100644 web/i18n/en-US/login.ts create mode 100644 web/i18n/en-US/oauth.json delete mode 100644 web/i18n/en-US/oauth.ts create mode 100644 web/i18n/en-US/pipeline.json delete mode 100644 web/i18n/en-US/pipeline.ts create mode 100644 web/i18n/en-US/plugin-tags.json delete mode 100644 web/i18n/en-US/plugin-tags.ts create mode 100644 web/i18n/en-US/plugin-trigger.json delete mode 100644 web/i18n/en-US/plugin-trigger.ts create mode 100644 web/i18n/en-US/plugin.json delete mode 100644 web/i18n/en-US/plugin.ts create mode 100644 web/i18n/en-US/register.json delete mode 100644 web/i18n/en-US/register.ts create mode 100644 web/i18n/en-US/run-log.json delete mode 100644 web/i18n/en-US/run-log.ts create mode 100644 web/i18n/en-US/share.json delete mode 100644 web/i18n/en-US/share.ts create mode 100644 web/i18n/en-US/time.json delete mode 100644 web/i18n/en-US/time.ts create mode 100644 web/i18n/en-US/tools.json delete mode 100644 web/i18n/en-US/tools.ts create mode 100644 web/i18n/en-US/workflow.json delete mode 100644 web/i18n/en-US/workflow.ts create mode 100644 web/i18n/es-ES/app-annotation.json delete mode 100644 web/i18n/es-ES/app-annotation.ts create mode 100644 web/i18n/es-ES/app-api.json delete mode 100644 web/i18n/es-ES/app-api.ts create mode 100644 web/i18n/es-ES/app-debug.json delete mode 100644 web/i18n/es-ES/app-debug.ts create mode 100644 web/i18n/es-ES/app-log.json delete mode 100644 web/i18n/es-ES/app-log.ts create mode 100644 web/i18n/es-ES/app-overview.json delete mode 100644 web/i18n/es-ES/app-overview.ts create mode 100644 web/i18n/es-ES/app.json delete mode 100644 web/i18n/es-ES/app.ts create mode 100644 web/i18n/es-ES/billing.json delete mode 100644 web/i18n/es-ES/billing.ts create mode 100644 web/i18n/es-ES/common.json delete mode 100644 web/i18n/es-ES/common.ts create mode 100644 web/i18n/es-ES/custom.json delete mode 100644 web/i18n/es-ES/custom.ts create mode 100644 web/i18n/es-ES/dataset-creation.json delete mode 100644 web/i18n/es-ES/dataset-creation.ts create mode 100644 web/i18n/es-ES/dataset-documents.json delete mode 100644 web/i18n/es-ES/dataset-documents.ts create mode 100644 web/i18n/es-ES/dataset-hit-testing.json delete mode 100644 web/i18n/es-ES/dataset-hit-testing.ts create mode 100644 web/i18n/es-ES/dataset-pipeline.json delete mode 100644 web/i18n/es-ES/dataset-pipeline.ts create mode 100644 web/i18n/es-ES/dataset-settings.json delete mode 100644 web/i18n/es-ES/dataset-settings.ts create mode 100644 web/i18n/es-ES/dataset.json delete mode 100644 web/i18n/es-ES/dataset.ts create mode 100644 web/i18n/es-ES/education.json delete mode 100644 web/i18n/es-ES/education.ts create mode 100644 web/i18n/es-ES/explore.json delete mode 100644 web/i18n/es-ES/explore.ts create mode 100644 web/i18n/es-ES/layout.json delete mode 100644 web/i18n/es-ES/layout.ts create mode 100644 web/i18n/es-ES/login.json delete mode 100644 web/i18n/es-ES/login.ts create mode 100644 web/i18n/es-ES/oauth.json delete mode 100644 web/i18n/es-ES/oauth.ts create mode 100644 web/i18n/es-ES/pipeline.json delete mode 100644 web/i18n/es-ES/pipeline.ts create mode 100644 web/i18n/es-ES/plugin-tags.json delete mode 100644 web/i18n/es-ES/plugin-tags.ts create mode 100644 web/i18n/es-ES/plugin-trigger.json delete mode 100644 web/i18n/es-ES/plugin-trigger.ts create mode 100644 web/i18n/es-ES/plugin.json delete mode 100644 web/i18n/es-ES/plugin.ts create mode 100644 web/i18n/es-ES/register.json delete mode 100644 web/i18n/es-ES/register.ts create mode 100644 web/i18n/es-ES/run-log.json delete mode 100644 web/i18n/es-ES/run-log.ts create mode 100644 web/i18n/es-ES/share.json delete mode 100644 web/i18n/es-ES/share.ts create mode 100644 web/i18n/es-ES/time.json delete mode 100644 web/i18n/es-ES/time.ts create mode 100644 web/i18n/es-ES/tools.json delete mode 100644 web/i18n/es-ES/tools.ts create mode 100644 web/i18n/es-ES/workflow.json delete mode 100644 web/i18n/es-ES/workflow.ts create mode 100644 web/i18n/fa-IR/app-annotation.json delete mode 100644 web/i18n/fa-IR/app-annotation.ts create mode 100644 web/i18n/fa-IR/app-api.json delete mode 100644 web/i18n/fa-IR/app-api.ts create mode 100644 web/i18n/fa-IR/app-debug.json delete mode 100644 web/i18n/fa-IR/app-debug.ts create mode 100644 web/i18n/fa-IR/app-log.json delete mode 100644 web/i18n/fa-IR/app-log.ts create mode 100644 web/i18n/fa-IR/app-overview.json delete mode 100644 web/i18n/fa-IR/app-overview.ts create mode 100644 web/i18n/fa-IR/app.json delete mode 100644 web/i18n/fa-IR/app.ts create mode 100644 web/i18n/fa-IR/billing.json delete mode 100644 web/i18n/fa-IR/billing.ts create mode 100644 web/i18n/fa-IR/common.json delete mode 100644 web/i18n/fa-IR/common.ts create mode 100644 web/i18n/fa-IR/custom.json delete mode 100644 web/i18n/fa-IR/custom.ts create mode 100644 web/i18n/fa-IR/dataset-creation.json delete mode 100644 web/i18n/fa-IR/dataset-creation.ts create mode 100644 web/i18n/fa-IR/dataset-documents.json delete mode 100644 web/i18n/fa-IR/dataset-documents.ts create mode 100644 web/i18n/fa-IR/dataset-hit-testing.json delete mode 100644 web/i18n/fa-IR/dataset-hit-testing.ts create mode 100644 web/i18n/fa-IR/dataset-pipeline.json delete mode 100644 web/i18n/fa-IR/dataset-pipeline.ts create mode 100644 web/i18n/fa-IR/dataset-settings.json delete mode 100644 web/i18n/fa-IR/dataset-settings.ts create mode 100644 web/i18n/fa-IR/dataset.json delete mode 100644 web/i18n/fa-IR/dataset.ts create mode 100644 web/i18n/fa-IR/education.json delete mode 100644 web/i18n/fa-IR/education.ts create mode 100644 web/i18n/fa-IR/explore.json delete mode 100644 web/i18n/fa-IR/explore.ts create mode 100644 web/i18n/fa-IR/layout.json delete mode 100644 web/i18n/fa-IR/layout.ts create mode 100644 web/i18n/fa-IR/login.json delete mode 100644 web/i18n/fa-IR/login.ts create mode 100644 web/i18n/fa-IR/oauth.json delete mode 100644 web/i18n/fa-IR/oauth.ts create mode 100644 web/i18n/fa-IR/pipeline.json delete mode 100644 web/i18n/fa-IR/pipeline.ts create mode 100644 web/i18n/fa-IR/plugin-tags.json delete mode 100644 web/i18n/fa-IR/plugin-tags.ts create mode 100644 web/i18n/fa-IR/plugin-trigger.json delete mode 100644 web/i18n/fa-IR/plugin-trigger.ts create mode 100644 web/i18n/fa-IR/plugin.json delete mode 100644 web/i18n/fa-IR/plugin.ts create mode 100644 web/i18n/fa-IR/register.json delete mode 100644 web/i18n/fa-IR/register.ts create mode 100644 web/i18n/fa-IR/run-log.json delete mode 100644 web/i18n/fa-IR/run-log.ts create mode 100644 web/i18n/fa-IR/share.json delete mode 100644 web/i18n/fa-IR/share.ts create mode 100644 web/i18n/fa-IR/time.json delete mode 100644 web/i18n/fa-IR/time.ts create mode 100644 web/i18n/fa-IR/tools.json delete mode 100644 web/i18n/fa-IR/tools.ts create mode 100644 web/i18n/fa-IR/workflow.json delete mode 100644 web/i18n/fa-IR/workflow.ts create mode 100644 web/i18n/fr-FR/app-annotation.json delete mode 100644 web/i18n/fr-FR/app-annotation.ts create mode 100644 web/i18n/fr-FR/app-api.json delete mode 100644 web/i18n/fr-FR/app-api.ts create mode 100644 web/i18n/fr-FR/app-debug.json delete mode 100644 web/i18n/fr-FR/app-debug.ts create mode 100644 web/i18n/fr-FR/app-log.json delete mode 100644 web/i18n/fr-FR/app-log.ts create mode 100644 web/i18n/fr-FR/app-overview.json delete mode 100644 web/i18n/fr-FR/app-overview.ts create mode 100644 web/i18n/fr-FR/app.json delete mode 100644 web/i18n/fr-FR/app.ts create mode 100644 web/i18n/fr-FR/billing.json delete mode 100644 web/i18n/fr-FR/billing.ts create mode 100644 web/i18n/fr-FR/common.json delete mode 100644 web/i18n/fr-FR/common.ts create mode 100644 web/i18n/fr-FR/custom.json delete mode 100644 web/i18n/fr-FR/custom.ts create mode 100644 web/i18n/fr-FR/dataset-creation.json delete mode 100644 web/i18n/fr-FR/dataset-creation.ts create mode 100644 web/i18n/fr-FR/dataset-documents.json delete mode 100644 web/i18n/fr-FR/dataset-documents.ts create mode 100644 web/i18n/fr-FR/dataset-hit-testing.json delete mode 100644 web/i18n/fr-FR/dataset-hit-testing.ts create mode 100644 web/i18n/fr-FR/dataset-pipeline.json delete mode 100644 web/i18n/fr-FR/dataset-pipeline.ts create mode 100644 web/i18n/fr-FR/dataset-settings.json delete mode 100644 web/i18n/fr-FR/dataset-settings.ts create mode 100644 web/i18n/fr-FR/dataset.json delete mode 100644 web/i18n/fr-FR/dataset.ts create mode 100644 web/i18n/fr-FR/education.json delete mode 100644 web/i18n/fr-FR/education.ts create mode 100644 web/i18n/fr-FR/explore.json delete mode 100644 web/i18n/fr-FR/explore.ts create mode 100644 web/i18n/fr-FR/layout.json delete mode 100644 web/i18n/fr-FR/layout.ts create mode 100644 web/i18n/fr-FR/login.json delete mode 100644 web/i18n/fr-FR/login.ts create mode 100644 web/i18n/fr-FR/oauth.json delete mode 100644 web/i18n/fr-FR/oauth.ts create mode 100644 web/i18n/fr-FR/pipeline.json delete mode 100644 web/i18n/fr-FR/pipeline.ts create mode 100644 web/i18n/fr-FR/plugin-tags.json delete mode 100644 web/i18n/fr-FR/plugin-tags.ts create mode 100644 web/i18n/fr-FR/plugin-trigger.json delete mode 100644 web/i18n/fr-FR/plugin-trigger.ts create mode 100644 web/i18n/fr-FR/plugin.json delete mode 100644 web/i18n/fr-FR/plugin.ts create mode 100644 web/i18n/fr-FR/register.json delete mode 100644 web/i18n/fr-FR/register.ts create mode 100644 web/i18n/fr-FR/run-log.json delete mode 100644 web/i18n/fr-FR/run-log.ts create mode 100644 web/i18n/fr-FR/share.json delete mode 100644 web/i18n/fr-FR/share.ts create mode 100644 web/i18n/fr-FR/time.json delete mode 100644 web/i18n/fr-FR/time.ts create mode 100644 web/i18n/fr-FR/tools.json delete mode 100644 web/i18n/fr-FR/tools.ts create mode 100644 web/i18n/fr-FR/workflow.json delete mode 100644 web/i18n/fr-FR/workflow.ts create mode 100644 web/i18n/hi-IN/app-annotation.json delete mode 100644 web/i18n/hi-IN/app-annotation.ts create mode 100644 web/i18n/hi-IN/app-api.json delete mode 100644 web/i18n/hi-IN/app-api.ts create mode 100644 web/i18n/hi-IN/app-debug.json delete mode 100644 web/i18n/hi-IN/app-debug.ts create mode 100644 web/i18n/hi-IN/app-log.json delete mode 100644 web/i18n/hi-IN/app-log.ts create mode 100644 web/i18n/hi-IN/app-overview.json delete mode 100644 web/i18n/hi-IN/app-overview.ts create mode 100644 web/i18n/hi-IN/app.json delete mode 100644 web/i18n/hi-IN/app.ts create mode 100644 web/i18n/hi-IN/billing.json delete mode 100644 web/i18n/hi-IN/billing.ts create mode 100644 web/i18n/hi-IN/common.json delete mode 100644 web/i18n/hi-IN/common.ts create mode 100644 web/i18n/hi-IN/custom.json delete mode 100644 web/i18n/hi-IN/custom.ts create mode 100644 web/i18n/hi-IN/dataset-creation.json delete mode 100644 web/i18n/hi-IN/dataset-creation.ts create mode 100644 web/i18n/hi-IN/dataset-documents.json delete mode 100644 web/i18n/hi-IN/dataset-documents.ts create mode 100644 web/i18n/hi-IN/dataset-hit-testing.json delete mode 100644 web/i18n/hi-IN/dataset-hit-testing.ts create mode 100644 web/i18n/hi-IN/dataset-pipeline.json delete mode 100644 web/i18n/hi-IN/dataset-pipeline.ts create mode 100644 web/i18n/hi-IN/dataset-settings.json delete mode 100644 web/i18n/hi-IN/dataset-settings.ts create mode 100644 web/i18n/hi-IN/dataset.json delete mode 100644 web/i18n/hi-IN/dataset.ts create mode 100644 web/i18n/hi-IN/education.json delete mode 100644 web/i18n/hi-IN/education.ts create mode 100644 web/i18n/hi-IN/explore.json delete mode 100644 web/i18n/hi-IN/explore.ts create mode 100644 web/i18n/hi-IN/layout.json delete mode 100644 web/i18n/hi-IN/layout.ts create mode 100644 web/i18n/hi-IN/login.json delete mode 100644 web/i18n/hi-IN/login.ts create mode 100644 web/i18n/hi-IN/oauth.json delete mode 100644 web/i18n/hi-IN/oauth.ts create mode 100644 web/i18n/hi-IN/pipeline.json delete mode 100644 web/i18n/hi-IN/pipeline.ts create mode 100644 web/i18n/hi-IN/plugin-tags.json delete mode 100644 web/i18n/hi-IN/plugin-tags.ts create mode 100644 web/i18n/hi-IN/plugin-trigger.json delete mode 100644 web/i18n/hi-IN/plugin-trigger.ts create mode 100644 web/i18n/hi-IN/plugin.json delete mode 100644 web/i18n/hi-IN/plugin.ts create mode 100644 web/i18n/hi-IN/register.json delete mode 100644 web/i18n/hi-IN/register.ts create mode 100644 web/i18n/hi-IN/run-log.json delete mode 100644 web/i18n/hi-IN/run-log.ts create mode 100644 web/i18n/hi-IN/share.json delete mode 100644 web/i18n/hi-IN/share.ts create mode 100644 web/i18n/hi-IN/time.json delete mode 100644 web/i18n/hi-IN/time.ts create mode 100644 web/i18n/hi-IN/tools.json delete mode 100644 web/i18n/hi-IN/tools.ts create mode 100644 web/i18n/hi-IN/workflow.json delete mode 100644 web/i18n/hi-IN/workflow.ts create mode 100644 web/i18n/id-ID/app-annotation.json delete mode 100644 web/i18n/id-ID/app-annotation.ts create mode 100644 web/i18n/id-ID/app-api.json delete mode 100644 web/i18n/id-ID/app-api.ts create mode 100644 web/i18n/id-ID/app-debug.json delete mode 100644 web/i18n/id-ID/app-debug.ts create mode 100644 web/i18n/id-ID/app-log.json delete mode 100644 web/i18n/id-ID/app-log.ts create mode 100644 web/i18n/id-ID/app-overview.json delete mode 100644 web/i18n/id-ID/app-overview.ts create mode 100644 web/i18n/id-ID/app.json delete mode 100644 web/i18n/id-ID/app.ts create mode 100644 web/i18n/id-ID/billing.json delete mode 100644 web/i18n/id-ID/billing.ts create mode 100644 web/i18n/id-ID/common.json delete mode 100644 web/i18n/id-ID/common.ts create mode 100644 web/i18n/id-ID/custom.json delete mode 100644 web/i18n/id-ID/custom.ts create mode 100644 web/i18n/id-ID/dataset-creation.json delete mode 100644 web/i18n/id-ID/dataset-creation.ts create mode 100644 web/i18n/id-ID/dataset-documents.json delete mode 100644 web/i18n/id-ID/dataset-documents.ts create mode 100644 web/i18n/id-ID/dataset-hit-testing.json delete mode 100644 web/i18n/id-ID/dataset-hit-testing.ts create mode 100644 web/i18n/id-ID/dataset-pipeline.json delete mode 100644 web/i18n/id-ID/dataset-pipeline.ts create mode 100644 web/i18n/id-ID/dataset-settings.json delete mode 100644 web/i18n/id-ID/dataset-settings.ts create mode 100644 web/i18n/id-ID/dataset.json delete mode 100644 web/i18n/id-ID/dataset.ts create mode 100644 web/i18n/id-ID/education.json delete mode 100644 web/i18n/id-ID/education.ts create mode 100644 web/i18n/id-ID/explore.json delete mode 100644 web/i18n/id-ID/explore.ts create mode 100644 web/i18n/id-ID/layout.json delete mode 100644 web/i18n/id-ID/layout.ts create mode 100644 web/i18n/id-ID/login.json delete mode 100644 web/i18n/id-ID/login.ts create mode 100644 web/i18n/id-ID/oauth.json delete mode 100644 web/i18n/id-ID/oauth.ts create mode 100644 web/i18n/id-ID/pipeline.json delete mode 100644 web/i18n/id-ID/pipeline.ts create mode 100644 web/i18n/id-ID/plugin-tags.json delete mode 100644 web/i18n/id-ID/plugin-tags.ts create mode 100644 web/i18n/id-ID/plugin-trigger.json delete mode 100644 web/i18n/id-ID/plugin-trigger.ts create mode 100644 web/i18n/id-ID/plugin.json delete mode 100644 web/i18n/id-ID/plugin.ts create mode 100644 web/i18n/id-ID/register.json delete mode 100644 web/i18n/id-ID/register.ts create mode 100644 web/i18n/id-ID/run-log.json delete mode 100644 web/i18n/id-ID/run-log.ts create mode 100644 web/i18n/id-ID/share.json delete mode 100644 web/i18n/id-ID/share.ts create mode 100644 web/i18n/id-ID/time.json delete mode 100644 web/i18n/id-ID/time.ts create mode 100644 web/i18n/id-ID/tools.json delete mode 100644 web/i18n/id-ID/tools.ts create mode 100644 web/i18n/id-ID/workflow.json delete mode 100644 web/i18n/id-ID/workflow.ts create mode 100644 web/i18n/it-IT/app-annotation.json delete mode 100644 web/i18n/it-IT/app-annotation.ts create mode 100644 web/i18n/it-IT/app-api.json delete mode 100644 web/i18n/it-IT/app-api.ts create mode 100644 web/i18n/it-IT/app-debug.json delete mode 100644 web/i18n/it-IT/app-debug.ts create mode 100644 web/i18n/it-IT/app-log.json delete mode 100644 web/i18n/it-IT/app-log.ts create mode 100644 web/i18n/it-IT/app-overview.json delete mode 100644 web/i18n/it-IT/app-overview.ts create mode 100644 web/i18n/it-IT/app.json delete mode 100644 web/i18n/it-IT/app.ts create mode 100644 web/i18n/it-IT/billing.json delete mode 100644 web/i18n/it-IT/billing.ts create mode 100644 web/i18n/it-IT/common.json delete mode 100644 web/i18n/it-IT/common.ts create mode 100644 web/i18n/it-IT/custom.json delete mode 100644 web/i18n/it-IT/custom.ts create mode 100644 web/i18n/it-IT/dataset-creation.json delete mode 100644 web/i18n/it-IT/dataset-creation.ts create mode 100644 web/i18n/it-IT/dataset-documents.json delete mode 100644 web/i18n/it-IT/dataset-documents.ts create mode 100644 web/i18n/it-IT/dataset-hit-testing.json delete mode 100644 web/i18n/it-IT/dataset-hit-testing.ts create mode 100644 web/i18n/it-IT/dataset-pipeline.json delete mode 100644 web/i18n/it-IT/dataset-pipeline.ts create mode 100644 web/i18n/it-IT/dataset-settings.json delete mode 100644 web/i18n/it-IT/dataset-settings.ts create mode 100644 web/i18n/it-IT/dataset.json delete mode 100644 web/i18n/it-IT/dataset.ts create mode 100644 web/i18n/it-IT/education.json delete mode 100644 web/i18n/it-IT/education.ts create mode 100644 web/i18n/it-IT/explore.json delete mode 100644 web/i18n/it-IT/explore.ts create mode 100644 web/i18n/it-IT/layout.json delete mode 100644 web/i18n/it-IT/layout.ts create mode 100644 web/i18n/it-IT/login.json delete mode 100644 web/i18n/it-IT/login.ts create mode 100644 web/i18n/it-IT/oauth.json delete mode 100644 web/i18n/it-IT/oauth.ts create mode 100644 web/i18n/it-IT/pipeline.json delete mode 100644 web/i18n/it-IT/pipeline.ts create mode 100644 web/i18n/it-IT/plugin-tags.json delete mode 100644 web/i18n/it-IT/plugin-tags.ts create mode 100644 web/i18n/it-IT/plugin-trigger.json delete mode 100644 web/i18n/it-IT/plugin-trigger.ts create mode 100644 web/i18n/it-IT/plugin.json delete mode 100644 web/i18n/it-IT/plugin.ts create mode 100644 web/i18n/it-IT/register.json delete mode 100644 web/i18n/it-IT/register.ts create mode 100644 web/i18n/it-IT/run-log.json delete mode 100644 web/i18n/it-IT/run-log.ts create mode 100644 web/i18n/it-IT/share.json delete mode 100644 web/i18n/it-IT/share.ts create mode 100644 web/i18n/it-IT/time.json delete mode 100644 web/i18n/it-IT/time.ts create mode 100644 web/i18n/it-IT/tools.json delete mode 100644 web/i18n/it-IT/tools.ts create mode 100644 web/i18n/it-IT/workflow.json delete mode 100644 web/i18n/it-IT/workflow.ts create mode 100644 web/i18n/ja-JP/app-annotation.json delete mode 100644 web/i18n/ja-JP/app-annotation.ts create mode 100644 web/i18n/ja-JP/app-api.json delete mode 100644 web/i18n/ja-JP/app-api.ts create mode 100644 web/i18n/ja-JP/app-debug.json delete mode 100644 web/i18n/ja-JP/app-debug.ts create mode 100644 web/i18n/ja-JP/app-log.json delete mode 100644 web/i18n/ja-JP/app-log.ts create mode 100644 web/i18n/ja-JP/app-overview.json delete mode 100644 web/i18n/ja-JP/app-overview.ts create mode 100644 web/i18n/ja-JP/app.json delete mode 100644 web/i18n/ja-JP/app.ts create mode 100644 web/i18n/ja-JP/billing.json delete mode 100644 web/i18n/ja-JP/billing.ts create mode 100644 web/i18n/ja-JP/common.json delete mode 100644 web/i18n/ja-JP/common.ts create mode 100644 web/i18n/ja-JP/custom.json delete mode 100644 web/i18n/ja-JP/custom.ts create mode 100644 web/i18n/ja-JP/dataset-creation.json delete mode 100644 web/i18n/ja-JP/dataset-creation.ts create mode 100644 web/i18n/ja-JP/dataset-documents.json delete mode 100644 web/i18n/ja-JP/dataset-documents.ts create mode 100644 web/i18n/ja-JP/dataset-hit-testing.json delete mode 100644 web/i18n/ja-JP/dataset-hit-testing.ts create mode 100644 web/i18n/ja-JP/dataset-pipeline.json delete mode 100644 web/i18n/ja-JP/dataset-pipeline.ts create mode 100644 web/i18n/ja-JP/dataset-settings.json delete mode 100644 web/i18n/ja-JP/dataset-settings.ts create mode 100644 web/i18n/ja-JP/dataset.json delete mode 100644 web/i18n/ja-JP/dataset.ts create mode 100644 web/i18n/ja-JP/education.json delete mode 100644 web/i18n/ja-JP/education.ts create mode 100644 web/i18n/ja-JP/explore.json delete mode 100644 web/i18n/ja-JP/explore.ts create mode 100644 web/i18n/ja-JP/layout.json delete mode 100644 web/i18n/ja-JP/layout.ts create mode 100644 web/i18n/ja-JP/login.json delete mode 100644 web/i18n/ja-JP/login.ts create mode 100644 web/i18n/ja-JP/oauth.json delete mode 100644 web/i18n/ja-JP/oauth.ts create mode 100644 web/i18n/ja-JP/pipeline.json delete mode 100644 web/i18n/ja-JP/pipeline.ts create mode 100644 web/i18n/ja-JP/plugin-tags.json delete mode 100644 web/i18n/ja-JP/plugin-tags.ts create mode 100644 web/i18n/ja-JP/plugin-trigger.json delete mode 100644 web/i18n/ja-JP/plugin-trigger.ts create mode 100644 web/i18n/ja-JP/plugin.json delete mode 100644 web/i18n/ja-JP/plugin.ts create mode 100644 web/i18n/ja-JP/register.json delete mode 100644 web/i18n/ja-JP/register.ts create mode 100644 web/i18n/ja-JP/run-log.json delete mode 100644 web/i18n/ja-JP/run-log.ts create mode 100644 web/i18n/ja-JP/share.json delete mode 100644 web/i18n/ja-JP/share.ts create mode 100644 web/i18n/ja-JP/time.json delete mode 100644 web/i18n/ja-JP/time.ts create mode 100644 web/i18n/ja-JP/tools.json delete mode 100644 web/i18n/ja-JP/tools.ts create mode 100644 web/i18n/ja-JP/workflow.json delete mode 100644 web/i18n/ja-JP/workflow.ts create mode 100644 web/i18n/ko-KR/app-annotation.json delete mode 100644 web/i18n/ko-KR/app-annotation.ts create mode 100644 web/i18n/ko-KR/app-api.json delete mode 100644 web/i18n/ko-KR/app-api.ts create mode 100644 web/i18n/ko-KR/app-debug.json delete mode 100644 web/i18n/ko-KR/app-debug.ts create mode 100644 web/i18n/ko-KR/app-log.json delete mode 100644 web/i18n/ko-KR/app-log.ts create mode 100644 web/i18n/ko-KR/app-overview.json delete mode 100644 web/i18n/ko-KR/app-overview.ts create mode 100644 web/i18n/ko-KR/app.json delete mode 100644 web/i18n/ko-KR/app.ts create mode 100644 web/i18n/ko-KR/billing.json delete mode 100644 web/i18n/ko-KR/billing.ts create mode 100644 web/i18n/ko-KR/common.json delete mode 100644 web/i18n/ko-KR/common.ts create mode 100644 web/i18n/ko-KR/custom.json delete mode 100644 web/i18n/ko-KR/custom.ts create mode 100644 web/i18n/ko-KR/dataset-creation.json delete mode 100644 web/i18n/ko-KR/dataset-creation.ts create mode 100644 web/i18n/ko-KR/dataset-documents.json delete mode 100644 web/i18n/ko-KR/dataset-documents.ts create mode 100644 web/i18n/ko-KR/dataset-hit-testing.json delete mode 100644 web/i18n/ko-KR/dataset-hit-testing.ts create mode 100644 web/i18n/ko-KR/dataset-pipeline.json delete mode 100644 web/i18n/ko-KR/dataset-pipeline.ts create mode 100644 web/i18n/ko-KR/dataset-settings.json delete mode 100644 web/i18n/ko-KR/dataset-settings.ts create mode 100644 web/i18n/ko-KR/dataset.json delete mode 100644 web/i18n/ko-KR/dataset.ts create mode 100644 web/i18n/ko-KR/education.json delete mode 100644 web/i18n/ko-KR/education.ts create mode 100644 web/i18n/ko-KR/explore.json delete mode 100644 web/i18n/ko-KR/explore.ts create mode 100644 web/i18n/ko-KR/layout.json delete mode 100644 web/i18n/ko-KR/layout.ts create mode 100644 web/i18n/ko-KR/login.json delete mode 100644 web/i18n/ko-KR/login.ts create mode 100644 web/i18n/ko-KR/oauth.json delete mode 100644 web/i18n/ko-KR/oauth.ts create mode 100644 web/i18n/ko-KR/pipeline.json delete mode 100644 web/i18n/ko-KR/pipeline.ts create mode 100644 web/i18n/ko-KR/plugin-tags.json delete mode 100644 web/i18n/ko-KR/plugin-tags.ts create mode 100644 web/i18n/ko-KR/plugin-trigger.json delete mode 100644 web/i18n/ko-KR/plugin-trigger.ts create mode 100644 web/i18n/ko-KR/plugin.json delete mode 100644 web/i18n/ko-KR/plugin.ts create mode 100644 web/i18n/ko-KR/register.json delete mode 100644 web/i18n/ko-KR/register.ts create mode 100644 web/i18n/ko-KR/run-log.json delete mode 100644 web/i18n/ko-KR/run-log.ts create mode 100644 web/i18n/ko-KR/share.json delete mode 100644 web/i18n/ko-KR/share.ts create mode 100644 web/i18n/ko-KR/time.json delete mode 100644 web/i18n/ko-KR/time.ts create mode 100644 web/i18n/ko-KR/tools.json delete mode 100644 web/i18n/ko-KR/tools.ts create mode 100644 web/i18n/ko-KR/workflow.json delete mode 100644 web/i18n/ko-KR/workflow.ts create mode 100644 web/i18n/pl-PL/app-annotation.json delete mode 100644 web/i18n/pl-PL/app-annotation.ts create mode 100644 web/i18n/pl-PL/app-api.json delete mode 100644 web/i18n/pl-PL/app-api.ts create mode 100644 web/i18n/pl-PL/app-debug.json delete mode 100644 web/i18n/pl-PL/app-debug.ts create mode 100644 web/i18n/pl-PL/app-log.json delete mode 100644 web/i18n/pl-PL/app-log.ts create mode 100644 web/i18n/pl-PL/app-overview.json delete mode 100644 web/i18n/pl-PL/app-overview.ts create mode 100644 web/i18n/pl-PL/app.json delete mode 100644 web/i18n/pl-PL/app.ts create mode 100644 web/i18n/pl-PL/billing.json delete mode 100644 web/i18n/pl-PL/billing.ts create mode 100644 web/i18n/pl-PL/common.json delete mode 100644 web/i18n/pl-PL/common.ts create mode 100644 web/i18n/pl-PL/custom.json delete mode 100644 web/i18n/pl-PL/custom.ts create mode 100644 web/i18n/pl-PL/dataset-creation.json delete mode 100644 web/i18n/pl-PL/dataset-creation.ts create mode 100644 web/i18n/pl-PL/dataset-documents.json delete mode 100644 web/i18n/pl-PL/dataset-documents.ts create mode 100644 web/i18n/pl-PL/dataset-hit-testing.json delete mode 100644 web/i18n/pl-PL/dataset-hit-testing.ts create mode 100644 web/i18n/pl-PL/dataset-pipeline.json delete mode 100644 web/i18n/pl-PL/dataset-pipeline.ts create mode 100644 web/i18n/pl-PL/dataset-settings.json delete mode 100644 web/i18n/pl-PL/dataset-settings.ts create mode 100644 web/i18n/pl-PL/dataset.json delete mode 100644 web/i18n/pl-PL/dataset.ts create mode 100644 web/i18n/pl-PL/education.json delete mode 100644 web/i18n/pl-PL/education.ts create mode 100644 web/i18n/pl-PL/explore.json delete mode 100644 web/i18n/pl-PL/explore.ts create mode 100644 web/i18n/pl-PL/layout.json delete mode 100644 web/i18n/pl-PL/layout.ts create mode 100644 web/i18n/pl-PL/login.json delete mode 100644 web/i18n/pl-PL/login.ts create mode 100644 web/i18n/pl-PL/oauth.json delete mode 100644 web/i18n/pl-PL/oauth.ts create mode 100644 web/i18n/pl-PL/pipeline.json delete mode 100644 web/i18n/pl-PL/pipeline.ts create mode 100644 web/i18n/pl-PL/plugin-tags.json delete mode 100644 web/i18n/pl-PL/plugin-tags.ts create mode 100644 web/i18n/pl-PL/plugin-trigger.json delete mode 100644 web/i18n/pl-PL/plugin-trigger.ts create mode 100644 web/i18n/pl-PL/plugin.json delete mode 100644 web/i18n/pl-PL/plugin.ts create mode 100644 web/i18n/pl-PL/register.json delete mode 100644 web/i18n/pl-PL/register.ts create mode 100644 web/i18n/pl-PL/run-log.json delete mode 100644 web/i18n/pl-PL/run-log.ts create mode 100644 web/i18n/pl-PL/share.json delete mode 100644 web/i18n/pl-PL/share.ts create mode 100644 web/i18n/pl-PL/time.json delete mode 100644 web/i18n/pl-PL/time.ts create mode 100644 web/i18n/pl-PL/tools.json delete mode 100644 web/i18n/pl-PL/tools.ts create mode 100644 web/i18n/pl-PL/workflow.json delete mode 100644 web/i18n/pl-PL/workflow.ts create mode 100644 web/i18n/pt-BR/app-annotation.json delete mode 100644 web/i18n/pt-BR/app-annotation.ts create mode 100644 web/i18n/pt-BR/app-api.json delete mode 100644 web/i18n/pt-BR/app-api.ts create mode 100644 web/i18n/pt-BR/app-debug.json delete mode 100644 web/i18n/pt-BR/app-debug.ts create mode 100644 web/i18n/pt-BR/app-log.json delete mode 100644 web/i18n/pt-BR/app-log.ts create mode 100644 web/i18n/pt-BR/app-overview.json delete mode 100644 web/i18n/pt-BR/app-overview.ts create mode 100644 web/i18n/pt-BR/app.json delete mode 100644 web/i18n/pt-BR/app.ts create mode 100644 web/i18n/pt-BR/billing.json delete mode 100644 web/i18n/pt-BR/billing.ts create mode 100644 web/i18n/pt-BR/common.json delete mode 100644 web/i18n/pt-BR/common.ts create mode 100644 web/i18n/pt-BR/custom.json delete mode 100644 web/i18n/pt-BR/custom.ts create mode 100644 web/i18n/pt-BR/dataset-creation.json delete mode 100644 web/i18n/pt-BR/dataset-creation.ts create mode 100644 web/i18n/pt-BR/dataset-documents.json delete mode 100644 web/i18n/pt-BR/dataset-documents.ts create mode 100644 web/i18n/pt-BR/dataset-hit-testing.json delete mode 100644 web/i18n/pt-BR/dataset-hit-testing.ts create mode 100644 web/i18n/pt-BR/dataset-pipeline.json delete mode 100644 web/i18n/pt-BR/dataset-pipeline.ts create mode 100644 web/i18n/pt-BR/dataset-settings.json delete mode 100644 web/i18n/pt-BR/dataset-settings.ts create mode 100644 web/i18n/pt-BR/dataset.json delete mode 100644 web/i18n/pt-BR/dataset.ts create mode 100644 web/i18n/pt-BR/education.json delete mode 100644 web/i18n/pt-BR/education.ts create mode 100644 web/i18n/pt-BR/explore.json delete mode 100644 web/i18n/pt-BR/explore.ts create mode 100644 web/i18n/pt-BR/layout.json delete mode 100644 web/i18n/pt-BR/layout.ts create mode 100644 web/i18n/pt-BR/login.json delete mode 100644 web/i18n/pt-BR/login.ts create mode 100644 web/i18n/pt-BR/oauth.json delete mode 100644 web/i18n/pt-BR/oauth.ts create mode 100644 web/i18n/pt-BR/pipeline.json delete mode 100644 web/i18n/pt-BR/pipeline.ts create mode 100644 web/i18n/pt-BR/plugin-tags.json delete mode 100644 web/i18n/pt-BR/plugin-tags.ts create mode 100644 web/i18n/pt-BR/plugin-trigger.json delete mode 100644 web/i18n/pt-BR/plugin-trigger.ts create mode 100644 web/i18n/pt-BR/plugin.json delete mode 100644 web/i18n/pt-BR/plugin.ts create mode 100644 web/i18n/pt-BR/register.json delete mode 100644 web/i18n/pt-BR/register.ts create mode 100644 web/i18n/pt-BR/run-log.json delete mode 100644 web/i18n/pt-BR/run-log.ts create mode 100644 web/i18n/pt-BR/share.json delete mode 100644 web/i18n/pt-BR/share.ts create mode 100644 web/i18n/pt-BR/time.json delete mode 100644 web/i18n/pt-BR/time.ts create mode 100644 web/i18n/pt-BR/tools.json delete mode 100644 web/i18n/pt-BR/tools.ts create mode 100644 web/i18n/pt-BR/workflow.json delete mode 100644 web/i18n/pt-BR/workflow.ts create mode 100644 web/i18n/ro-RO/app-annotation.json delete mode 100644 web/i18n/ro-RO/app-annotation.ts create mode 100644 web/i18n/ro-RO/app-api.json delete mode 100644 web/i18n/ro-RO/app-api.ts create mode 100644 web/i18n/ro-RO/app-debug.json delete mode 100644 web/i18n/ro-RO/app-debug.ts create mode 100644 web/i18n/ro-RO/app-log.json delete mode 100644 web/i18n/ro-RO/app-log.ts create mode 100644 web/i18n/ro-RO/app-overview.json delete mode 100644 web/i18n/ro-RO/app-overview.ts create mode 100644 web/i18n/ro-RO/app.json delete mode 100644 web/i18n/ro-RO/app.ts create mode 100644 web/i18n/ro-RO/billing.json delete mode 100644 web/i18n/ro-RO/billing.ts create mode 100644 web/i18n/ro-RO/common.json delete mode 100644 web/i18n/ro-RO/common.ts create mode 100644 web/i18n/ro-RO/custom.json delete mode 100644 web/i18n/ro-RO/custom.ts create mode 100644 web/i18n/ro-RO/dataset-creation.json delete mode 100644 web/i18n/ro-RO/dataset-creation.ts create mode 100644 web/i18n/ro-RO/dataset-documents.json delete mode 100644 web/i18n/ro-RO/dataset-documents.ts create mode 100644 web/i18n/ro-RO/dataset-hit-testing.json delete mode 100644 web/i18n/ro-RO/dataset-hit-testing.ts create mode 100644 web/i18n/ro-RO/dataset-pipeline.json delete mode 100644 web/i18n/ro-RO/dataset-pipeline.ts create mode 100644 web/i18n/ro-RO/dataset-settings.json delete mode 100644 web/i18n/ro-RO/dataset-settings.ts create mode 100644 web/i18n/ro-RO/dataset.json delete mode 100644 web/i18n/ro-RO/dataset.ts create mode 100644 web/i18n/ro-RO/education.json delete mode 100644 web/i18n/ro-RO/education.ts create mode 100644 web/i18n/ro-RO/explore.json delete mode 100644 web/i18n/ro-RO/explore.ts create mode 100644 web/i18n/ro-RO/layout.json delete mode 100644 web/i18n/ro-RO/layout.ts create mode 100644 web/i18n/ro-RO/login.json delete mode 100644 web/i18n/ro-RO/login.ts create mode 100644 web/i18n/ro-RO/oauth.json delete mode 100644 web/i18n/ro-RO/oauth.ts create mode 100644 web/i18n/ro-RO/pipeline.json delete mode 100644 web/i18n/ro-RO/pipeline.ts create mode 100644 web/i18n/ro-RO/plugin-tags.json delete mode 100644 web/i18n/ro-RO/plugin-tags.ts create mode 100644 web/i18n/ro-RO/plugin-trigger.json delete mode 100644 web/i18n/ro-RO/plugin-trigger.ts create mode 100644 web/i18n/ro-RO/plugin.json delete mode 100644 web/i18n/ro-RO/plugin.ts create mode 100644 web/i18n/ro-RO/register.json delete mode 100644 web/i18n/ro-RO/register.ts create mode 100644 web/i18n/ro-RO/run-log.json delete mode 100644 web/i18n/ro-RO/run-log.ts create mode 100644 web/i18n/ro-RO/share.json delete mode 100644 web/i18n/ro-RO/share.ts create mode 100644 web/i18n/ro-RO/time.json delete mode 100644 web/i18n/ro-RO/time.ts create mode 100644 web/i18n/ro-RO/tools.json delete mode 100644 web/i18n/ro-RO/tools.ts create mode 100644 web/i18n/ro-RO/workflow.json delete mode 100644 web/i18n/ro-RO/workflow.ts create mode 100644 web/i18n/ru-RU/app-annotation.json delete mode 100644 web/i18n/ru-RU/app-annotation.ts create mode 100644 web/i18n/ru-RU/app-api.json delete mode 100644 web/i18n/ru-RU/app-api.ts create mode 100644 web/i18n/ru-RU/app-debug.json delete mode 100644 web/i18n/ru-RU/app-debug.ts create mode 100644 web/i18n/ru-RU/app-log.json delete mode 100644 web/i18n/ru-RU/app-log.ts create mode 100644 web/i18n/ru-RU/app-overview.json delete mode 100644 web/i18n/ru-RU/app-overview.ts create mode 100644 web/i18n/ru-RU/app.json delete mode 100644 web/i18n/ru-RU/app.ts create mode 100644 web/i18n/ru-RU/billing.json delete mode 100644 web/i18n/ru-RU/billing.ts create mode 100644 web/i18n/ru-RU/common.json delete mode 100644 web/i18n/ru-RU/common.ts create mode 100644 web/i18n/ru-RU/custom.json delete mode 100644 web/i18n/ru-RU/custom.ts create mode 100644 web/i18n/ru-RU/dataset-creation.json delete mode 100644 web/i18n/ru-RU/dataset-creation.ts create mode 100644 web/i18n/ru-RU/dataset-documents.json delete mode 100644 web/i18n/ru-RU/dataset-documents.ts create mode 100644 web/i18n/ru-RU/dataset-hit-testing.json delete mode 100644 web/i18n/ru-RU/dataset-hit-testing.ts create mode 100644 web/i18n/ru-RU/dataset-pipeline.json delete mode 100644 web/i18n/ru-RU/dataset-pipeline.ts create mode 100644 web/i18n/ru-RU/dataset-settings.json delete mode 100644 web/i18n/ru-RU/dataset-settings.ts create mode 100644 web/i18n/ru-RU/dataset.json delete mode 100644 web/i18n/ru-RU/dataset.ts create mode 100644 web/i18n/ru-RU/education.json delete mode 100644 web/i18n/ru-RU/education.ts create mode 100644 web/i18n/ru-RU/explore.json delete mode 100644 web/i18n/ru-RU/explore.ts create mode 100644 web/i18n/ru-RU/layout.json delete mode 100644 web/i18n/ru-RU/layout.ts create mode 100644 web/i18n/ru-RU/login.json delete mode 100644 web/i18n/ru-RU/login.ts create mode 100644 web/i18n/ru-RU/oauth.json delete mode 100644 web/i18n/ru-RU/oauth.ts create mode 100644 web/i18n/ru-RU/pipeline.json delete mode 100644 web/i18n/ru-RU/pipeline.ts create mode 100644 web/i18n/ru-RU/plugin-tags.json delete mode 100644 web/i18n/ru-RU/plugin-tags.ts create mode 100644 web/i18n/ru-RU/plugin-trigger.json delete mode 100644 web/i18n/ru-RU/plugin-trigger.ts create mode 100644 web/i18n/ru-RU/plugin.json delete mode 100644 web/i18n/ru-RU/plugin.ts create mode 100644 web/i18n/ru-RU/register.json delete mode 100644 web/i18n/ru-RU/register.ts create mode 100644 web/i18n/ru-RU/run-log.json delete mode 100644 web/i18n/ru-RU/run-log.ts create mode 100644 web/i18n/ru-RU/share.json delete mode 100644 web/i18n/ru-RU/share.ts create mode 100644 web/i18n/ru-RU/time.json delete mode 100644 web/i18n/ru-RU/time.ts create mode 100644 web/i18n/ru-RU/tools.json delete mode 100644 web/i18n/ru-RU/tools.ts create mode 100644 web/i18n/ru-RU/workflow.json delete mode 100644 web/i18n/ru-RU/workflow.ts create mode 100644 web/i18n/sl-SI/app-annotation.json delete mode 100644 web/i18n/sl-SI/app-annotation.ts create mode 100644 web/i18n/sl-SI/app-api.json delete mode 100644 web/i18n/sl-SI/app-api.ts create mode 100644 web/i18n/sl-SI/app-debug.json delete mode 100644 web/i18n/sl-SI/app-debug.ts create mode 100644 web/i18n/sl-SI/app-log.json delete mode 100644 web/i18n/sl-SI/app-log.ts create mode 100644 web/i18n/sl-SI/app-overview.json delete mode 100644 web/i18n/sl-SI/app-overview.ts create mode 100644 web/i18n/sl-SI/app.json delete mode 100644 web/i18n/sl-SI/app.ts create mode 100644 web/i18n/sl-SI/billing.json delete mode 100644 web/i18n/sl-SI/billing.ts create mode 100644 web/i18n/sl-SI/common.json delete mode 100644 web/i18n/sl-SI/common.ts create mode 100644 web/i18n/sl-SI/custom.json delete mode 100644 web/i18n/sl-SI/custom.ts create mode 100644 web/i18n/sl-SI/dataset-creation.json delete mode 100644 web/i18n/sl-SI/dataset-creation.ts create mode 100644 web/i18n/sl-SI/dataset-documents.json delete mode 100644 web/i18n/sl-SI/dataset-documents.ts create mode 100644 web/i18n/sl-SI/dataset-hit-testing.json delete mode 100644 web/i18n/sl-SI/dataset-hit-testing.ts create mode 100644 web/i18n/sl-SI/dataset-pipeline.json delete mode 100644 web/i18n/sl-SI/dataset-pipeline.ts create mode 100644 web/i18n/sl-SI/dataset-settings.json delete mode 100644 web/i18n/sl-SI/dataset-settings.ts create mode 100644 web/i18n/sl-SI/dataset.json delete mode 100644 web/i18n/sl-SI/dataset.ts create mode 100644 web/i18n/sl-SI/education.json delete mode 100644 web/i18n/sl-SI/education.ts create mode 100644 web/i18n/sl-SI/explore.json delete mode 100644 web/i18n/sl-SI/explore.ts create mode 100644 web/i18n/sl-SI/layout.json delete mode 100644 web/i18n/sl-SI/layout.ts create mode 100644 web/i18n/sl-SI/login.json delete mode 100644 web/i18n/sl-SI/login.ts create mode 100644 web/i18n/sl-SI/oauth.json delete mode 100644 web/i18n/sl-SI/oauth.ts create mode 100644 web/i18n/sl-SI/pipeline.json delete mode 100644 web/i18n/sl-SI/pipeline.ts create mode 100644 web/i18n/sl-SI/plugin-tags.json delete mode 100644 web/i18n/sl-SI/plugin-tags.ts create mode 100644 web/i18n/sl-SI/plugin-trigger.json delete mode 100644 web/i18n/sl-SI/plugin-trigger.ts create mode 100644 web/i18n/sl-SI/plugin.json delete mode 100644 web/i18n/sl-SI/plugin.ts create mode 100644 web/i18n/sl-SI/register.json delete mode 100644 web/i18n/sl-SI/register.ts create mode 100644 web/i18n/sl-SI/run-log.json delete mode 100644 web/i18n/sl-SI/run-log.ts create mode 100644 web/i18n/sl-SI/share.json delete mode 100644 web/i18n/sl-SI/share.ts create mode 100644 web/i18n/sl-SI/time.json delete mode 100644 web/i18n/sl-SI/time.ts create mode 100644 web/i18n/sl-SI/tools.json delete mode 100644 web/i18n/sl-SI/tools.ts create mode 100644 web/i18n/sl-SI/workflow.json delete mode 100644 web/i18n/sl-SI/workflow.ts create mode 100644 web/i18n/th-TH/app-annotation.json delete mode 100644 web/i18n/th-TH/app-annotation.ts create mode 100644 web/i18n/th-TH/app-api.json delete mode 100644 web/i18n/th-TH/app-api.ts create mode 100644 web/i18n/th-TH/app-debug.json delete mode 100644 web/i18n/th-TH/app-debug.ts create mode 100644 web/i18n/th-TH/app-log.json delete mode 100644 web/i18n/th-TH/app-log.ts create mode 100644 web/i18n/th-TH/app-overview.json delete mode 100644 web/i18n/th-TH/app-overview.ts create mode 100644 web/i18n/th-TH/app.json delete mode 100644 web/i18n/th-TH/app.ts create mode 100644 web/i18n/th-TH/billing.json delete mode 100644 web/i18n/th-TH/billing.ts create mode 100644 web/i18n/th-TH/common.json delete mode 100644 web/i18n/th-TH/common.ts create mode 100644 web/i18n/th-TH/custom.json delete mode 100644 web/i18n/th-TH/custom.ts create mode 100644 web/i18n/th-TH/dataset-creation.json delete mode 100644 web/i18n/th-TH/dataset-creation.ts create mode 100644 web/i18n/th-TH/dataset-documents.json delete mode 100644 web/i18n/th-TH/dataset-documents.ts create mode 100644 web/i18n/th-TH/dataset-hit-testing.json delete mode 100644 web/i18n/th-TH/dataset-hit-testing.ts create mode 100644 web/i18n/th-TH/dataset-pipeline.json delete mode 100644 web/i18n/th-TH/dataset-pipeline.ts create mode 100644 web/i18n/th-TH/dataset-settings.json delete mode 100644 web/i18n/th-TH/dataset-settings.ts create mode 100644 web/i18n/th-TH/dataset.json delete mode 100644 web/i18n/th-TH/dataset.ts create mode 100644 web/i18n/th-TH/education.json delete mode 100644 web/i18n/th-TH/education.ts create mode 100644 web/i18n/th-TH/explore.json delete mode 100644 web/i18n/th-TH/explore.ts create mode 100644 web/i18n/th-TH/layout.json delete mode 100644 web/i18n/th-TH/layout.ts create mode 100644 web/i18n/th-TH/login.json delete mode 100644 web/i18n/th-TH/login.ts create mode 100644 web/i18n/th-TH/oauth.json delete mode 100644 web/i18n/th-TH/oauth.ts create mode 100644 web/i18n/th-TH/pipeline.json delete mode 100644 web/i18n/th-TH/pipeline.ts create mode 100644 web/i18n/th-TH/plugin-tags.json delete mode 100644 web/i18n/th-TH/plugin-tags.ts create mode 100644 web/i18n/th-TH/plugin-trigger.json delete mode 100644 web/i18n/th-TH/plugin-trigger.ts create mode 100644 web/i18n/th-TH/plugin.json delete mode 100644 web/i18n/th-TH/plugin.ts create mode 100644 web/i18n/th-TH/register.json delete mode 100644 web/i18n/th-TH/register.ts create mode 100644 web/i18n/th-TH/run-log.json delete mode 100644 web/i18n/th-TH/run-log.ts create mode 100644 web/i18n/th-TH/share.json delete mode 100644 web/i18n/th-TH/share.ts create mode 100644 web/i18n/th-TH/time.json delete mode 100644 web/i18n/th-TH/time.ts create mode 100644 web/i18n/th-TH/tools.json delete mode 100644 web/i18n/th-TH/tools.ts create mode 100644 web/i18n/th-TH/workflow.json delete mode 100644 web/i18n/th-TH/workflow.ts create mode 100644 web/i18n/tr-TR/app-annotation.json delete mode 100644 web/i18n/tr-TR/app-annotation.ts create mode 100644 web/i18n/tr-TR/app-api.json delete mode 100644 web/i18n/tr-TR/app-api.ts create mode 100644 web/i18n/tr-TR/app-debug.json delete mode 100644 web/i18n/tr-TR/app-debug.ts create mode 100644 web/i18n/tr-TR/app-log.json delete mode 100644 web/i18n/tr-TR/app-log.ts create mode 100644 web/i18n/tr-TR/app-overview.json delete mode 100644 web/i18n/tr-TR/app-overview.ts create mode 100644 web/i18n/tr-TR/app.json delete mode 100644 web/i18n/tr-TR/app.ts create mode 100644 web/i18n/tr-TR/billing.json delete mode 100644 web/i18n/tr-TR/billing.ts create mode 100644 web/i18n/tr-TR/common.json delete mode 100644 web/i18n/tr-TR/common.ts create mode 100644 web/i18n/tr-TR/custom.json delete mode 100644 web/i18n/tr-TR/custom.ts create mode 100644 web/i18n/tr-TR/dataset-creation.json delete mode 100644 web/i18n/tr-TR/dataset-creation.ts create mode 100644 web/i18n/tr-TR/dataset-documents.json delete mode 100644 web/i18n/tr-TR/dataset-documents.ts create mode 100644 web/i18n/tr-TR/dataset-hit-testing.json delete mode 100644 web/i18n/tr-TR/dataset-hit-testing.ts create mode 100644 web/i18n/tr-TR/dataset-pipeline.json delete mode 100644 web/i18n/tr-TR/dataset-pipeline.ts create mode 100644 web/i18n/tr-TR/dataset-settings.json delete mode 100644 web/i18n/tr-TR/dataset-settings.ts create mode 100644 web/i18n/tr-TR/dataset.json delete mode 100644 web/i18n/tr-TR/dataset.ts create mode 100644 web/i18n/tr-TR/education.json delete mode 100644 web/i18n/tr-TR/education.ts create mode 100644 web/i18n/tr-TR/explore.json delete mode 100644 web/i18n/tr-TR/explore.ts create mode 100644 web/i18n/tr-TR/layout.json delete mode 100644 web/i18n/tr-TR/layout.ts create mode 100644 web/i18n/tr-TR/login.json delete mode 100644 web/i18n/tr-TR/login.ts create mode 100644 web/i18n/tr-TR/oauth.json delete mode 100644 web/i18n/tr-TR/oauth.ts create mode 100644 web/i18n/tr-TR/pipeline.json delete mode 100644 web/i18n/tr-TR/pipeline.ts create mode 100644 web/i18n/tr-TR/plugin-tags.json delete mode 100644 web/i18n/tr-TR/plugin-tags.ts create mode 100644 web/i18n/tr-TR/plugin-trigger.json delete mode 100644 web/i18n/tr-TR/plugin-trigger.ts create mode 100644 web/i18n/tr-TR/plugin.json delete mode 100644 web/i18n/tr-TR/plugin.ts create mode 100644 web/i18n/tr-TR/register.json delete mode 100644 web/i18n/tr-TR/register.ts create mode 100644 web/i18n/tr-TR/run-log.json delete mode 100644 web/i18n/tr-TR/run-log.ts create mode 100644 web/i18n/tr-TR/share.json delete mode 100644 web/i18n/tr-TR/share.ts create mode 100644 web/i18n/tr-TR/time.json delete mode 100644 web/i18n/tr-TR/time.ts create mode 100644 web/i18n/tr-TR/tools.json delete mode 100644 web/i18n/tr-TR/tools.ts create mode 100644 web/i18n/tr-TR/workflow.json delete mode 100644 web/i18n/tr-TR/workflow.ts create mode 100644 web/i18n/uk-UA/app-annotation.json delete mode 100644 web/i18n/uk-UA/app-annotation.ts create mode 100644 web/i18n/uk-UA/app-api.json delete mode 100644 web/i18n/uk-UA/app-api.ts create mode 100644 web/i18n/uk-UA/app-debug.json delete mode 100644 web/i18n/uk-UA/app-debug.ts create mode 100644 web/i18n/uk-UA/app-log.json delete mode 100644 web/i18n/uk-UA/app-log.ts create mode 100644 web/i18n/uk-UA/app-overview.json delete mode 100644 web/i18n/uk-UA/app-overview.ts create mode 100644 web/i18n/uk-UA/app.json delete mode 100644 web/i18n/uk-UA/app.ts create mode 100644 web/i18n/uk-UA/billing.json delete mode 100644 web/i18n/uk-UA/billing.ts create mode 100644 web/i18n/uk-UA/common.json delete mode 100644 web/i18n/uk-UA/common.ts create mode 100644 web/i18n/uk-UA/custom.json delete mode 100644 web/i18n/uk-UA/custom.ts create mode 100644 web/i18n/uk-UA/dataset-creation.json delete mode 100644 web/i18n/uk-UA/dataset-creation.ts create mode 100644 web/i18n/uk-UA/dataset-documents.json delete mode 100644 web/i18n/uk-UA/dataset-documents.ts create mode 100644 web/i18n/uk-UA/dataset-hit-testing.json delete mode 100644 web/i18n/uk-UA/dataset-hit-testing.ts create mode 100644 web/i18n/uk-UA/dataset-pipeline.json delete mode 100644 web/i18n/uk-UA/dataset-pipeline.ts create mode 100644 web/i18n/uk-UA/dataset-settings.json delete mode 100644 web/i18n/uk-UA/dataset-settings.ts create mode 100644 web/i18n/uk-UA/dataset.json delete mode 100644 web/i18n/uk-UA/dataset.ts create mode 100644 web/i18n/uk-UA/education.json delete mode 100644 web/i18n/uk-UA/education.ts create mode 100644 web/i18n/uk-UA/explore.json delete mode 100644 web/i18n/uk-UA/explore.ts create mode 100644 web/i18n/uk-UA/layout.json delete mode 100644 web/i18n/uk-UA/layout.ts create mode 100644 web/i18n/uk-UA/login.json delete mode 100644 web/i18n/uk-UA/login.ts create mode 100644 web/i18n/uk-UA/oauth.json delete mode 100644 web/i18n/uk-UA/oauth.ts create mode 100644 web/i18n/uk-UA/pipeline.json delete mode 100644 web/i18n/uk-UA/pipeline.ts create mode 100644 web/i18n/uk-UA/plugin-tags.json delete mode 100644 web/i18n/uk-UA/plugin-tags.ts create mode 100644 web/i18n/uk-UA/plugin-trigger.json delete mode 100644 web/i18n/uk-UA/plugin-trigger.ts create mode 100644 web/i18n/uk-UA/plugin.json delete mode 100644 web/i18n/uk-UA/plugin.ts create mode 100644 web/i18n/uk-UA/register.json delete mode 100644 web/i18n/uk-UA/register.ts create mode 100644 web/i18n/uk-UA/run-log.json delete mode 100644 web/i18n/uk-UA/run-log.ts create mode 100644 web/i18n/uk-UA/share.json delete mode 100644 web/i18n/uk-UA/share.ts create mode 100644 web/i18n/uk-UA/time.json delete mode 100644 web/i18n/uk-UA/time.ts create mode 100644 web/i18n/uk-UA/tools.json delete mode 100644 web/i18n/uk-UA/tools.ts create mode 100644 web/i18n/uk-UA/workflow.json delete mode 100644 web/i18n/uk-UA/workflow.ts create mode 100644 web/i18n/vi-VN/app-annotation.json delete mode 100644 web/i18n/vi-VN/app-annotation.ts create mode 100644 web/i18n/vi-VN/app-api.json delete mode 100644 web/i18n/vi-VN/app-api.ts create mode 100644 web/i18n/vi-VN/app-debug.json delete mode 100644 web/i18n/vi-VN/app-debug.ts create mode 100644 web/i18n/vi-VN/app-log.json delete mode 100644 web/i18n/vi-VN/app-log.ts create mode 100644 web/i18n/vi-VN/app-overview.json delete mode 100644 web/i18n/vi-VN/app-overview.ts create mode 100644 web/i18n/vi-VN/app.json delete mode 100644 web/i18n/vi-VN/app.ts create mode 100644 web/i18n/vi-VN/billing.json delete mode 100644 web/i18n/vi-VN/billing.ts create mode 100644 web/i18n/vi-VN/common.json delete mode 100644 web/i18n/vi-VN/common.ts create mode 100644 web/i18n/vi-VN/custom.json delete mode 100644 web/i18n/vi-VN/custom.ts create mode 100644 web/i18n/vi-VN/dataset-creation.json delete mode 100644 web/i18n/vi-VN/dataset-creation.ts create mode 100644 web/i18n/vi-VN/dataset-documents.json delete mode 100644 web/i18n/vi-VN/dataset-documents.ts create mode 100644 web/i18n/vi-VN/dataset-hit-testing.json delete mode 100644 web/i18n/vi-VN/dataset-hit-testing.ts create mode 100644 web/i18n/vi-VN/dataset-pipeline.json delete mode 100644 web/i18n/vi-VN/dataset-pipeline.ts create mode 100644 web/i18n/vi-VN/dataset-settings.json delete mode 100644 web/i18n/vi-VN/dataset-settings.ts create mode 100644 web/i18n/vi-VN/dataset.json delete mode 100644 web/i18n/vi-VN/dataset.ts create mode 100644 web/i18n/vi-VN/education.json delete mode 100644 web/i18n/vi-VN/education.ts create mode 100644 web/i18n/vi-VN/explore.json delete mode 100644 web/i18n/vi-VN/explore.ts create mode 100644 web/i18n/vi-VN/layout.json delete mode 100644 web/i18n/vi-VN/layout.ts create mode 100644 web/i18n/vi-VN/login.json delete mode 100644 web/i18n/vi-VN/login.ts create mode 100644 web/i18n/vi-VN/oauth.json delete mode 100644 web/i18n/vi-VN/oauth.ts create mode 100644 web/i18n/vi-VN/pipeline.json delete mode 100644 web/i18n/vi-VN/pipeline.ts create mode 100644 web/i18n/vi-VN/plugin-tags.json delete mode 100644 web/i18n/vi-VN/plugin-tags.ts create mode 100644 web/i18n/vi-VN/plugin-trigger.json delete mode 100644 web/i18n/vi-VN/plugin-trigger.ts create mode 100644 web/i18n/vi-VN/plugin.json delete mode 100644 web/i18n/vi-VN/plugin.ts create mode 100644 web/i18n/vi-VN/register.json delete mode 100644 web/i18n/vi-VN/register.ts create mode 100644 web/i18n/vi-VN/run-log.json delete mode 100644 web/i18n/vi-VN/run-log.ts create mode 100644 web/i18n/vi-VN/share.json delete mode 100644 web/i18n/vi-VN/share.ts create mode 100644 web/i18n/vi-VN/time.json delete mode 100644 web/i18n/vi-VN/time.ts create mode 100644 web/i18n/vi-VN/tools.json delete mode 100644 web/i18n/vi-VN/tools.ts create mode 100644 web/i18n/vi-VN/workflow.json delete mode 100644 web/i18n/vi-VN/workflow.ts create mode 100644 web/i18n/zh-Hans/app-annotation.json delete mode 100644 web/i18n/zh-Hans/app-annotation.ts create mode 100644 web/i18n/zh-Hans/app-api.json delete mode 100644 web/i18n/zh-Hans/app-api.ts create mode 100644 web/i18n/zh-Hans/app-debug.json delete mode 100644 web/i18n/zh-Hans/app-debug.ts create mode 100644 web/i18n/zh-Hans/app-log.json delete mode 100644 web/i18n/zh-Hans/app-log.ts create mode 100644 web/i18n/zh-Hans/app-overview.json delete mode 100644 web/i18n/zh-Hans/app-overview.ts create mode 100644 web/i18n/zh-Hans/app.json delete mode 100644 web/i18n/zh-Hans/app.ts create mode 100644 web/i18n/zh-Hans/billing.json delete mode 100644 web/i18n/zh-Hans/billing.ts create mode 100644 web/i18n/zh-Hans/common.json delete mode 100644 web/i18n/zh-Hans/common.ts create mode 100644 web/i18n/zh-Hans/custom.json delete mode 100644 web/i18n/zh-Hans/custom.ts create mode 100644 web/i18n/zh-Hans/dataset-creation.json delete mode 100644 web/i18n/zh-Hans/dataset-creation.ts create mode 100644 web/i18n/zh-Hans/dataset-documents.json delete mode 100644 web/i18n/zh-Hans/dataset-documents.ts create mode 100644 web/i18n/zh-Hans/dataset-hit-testing.json delete mode 100644 web/i18n/zh-Hans/dataset-hit-testing.ts create mode 100644 web/i18n/zh-Hans/dataset-pipeline.json delete mode 100644 web/i18n/zh-Hans/dataset-pipeline.ts create mode 100644 web/i18n/zh-Hans/dataset-settings.json delete mode 100644 web/i18n/zh-Hans/dataset-settings.ts create mode 100644 web/i18n/zh-Hans/dataset.json delete mode 100644 web/i18n/zh-Hans/dataset.ts create mode 100644 web/i18n/zh-Hans/education.json delete mode 100644 web/i18n/zh-Hans/education.ts create mode 100644 web/i18n/zh-Hans/explore.json delete mode 100644 web/i18n/zh-Hans/explore.ts create mode 100644 web/i18n/zh-Hans/layout.json delete mode 100644 web/i18n/zh-Hans/layout.ts create mode 100644 web/i18n/zh-Hans/login.json delete mode 100644 web/i18n/zh-Hans/login.ts create mode 100644 web/i18n/zh-Hans/oauth.json delete mode 100644 web/i18n/zh-Hans/oauth.ts create mode 100644 web/i18n/zh-Hans/pipeline.json delete mode 100644 web/i18n/zh-Hans/pipeline.ts create mode 100644 web/i18n/zh-Hans/plugin-tags.json delete mode 100644 web/i18n/zh-Hans/plugin-tags.ts create mode 100644 web/i18n/zh-Hans/plugin-trigger.json delete mode 100644 web/i18n/zh-Hans/plugin-trigger.ts create mode 100644 web/i18n/zh-Hans/plugin.json delete mode 100644 web/i18n/zh-Hans/plugin.ts create mode 100644 web/i18n/zh-Hans/register.json delete mode 100644 web/i18n/zh-Hans/register.ts create mode 100644 web/i18n/zh-Hans/run-log.json delete mode 100644 web/i18n/zh-Hans/run-log.ts create mode 100644 web/i18n/zh-Hans/share.json delete mode 100644 web/i18n/zh-Hans/share.ts create mode 100644 web/i18n/zh-Hans/time.json delete mode 100644 web/i18n/zh-Hans/time.ts create mode 100644 web/i18n/zh-Hans/tools.json delete mode 100644 web/i18n/zh-Hans/tools.ts create mode 100644 web/i18n/zh-Hans/workflow.json delete mode 100644 web/i18n/zh-Hans/workflow.ts create mode 100644 web/i18n/zh-Hant/app-annotation.json delete mode 100644 web/i18n/zh-Hant/app-annotation.ts create mode 100644 web/i18n/zh-Hant/app-api.json delete mode 100644 web/i18n/zh-Hant/app-api.ts create mode 100644 web/i18n/zh-Hant/app-debug.json delete mode 100644 web/i18n/zh-Hant/app-debug.ts create mode 100644 web/i18n/zh-Hant/app-log.json delete mode 100644 web/i18n/zh-Hant/app-log.ts create mode 100644 web/i18n/zh-Hant/app-overview.json delete mode 100644 web/i18n/zh-Hant/app-overview.ts create mode 100644 web/i18n/zh-Hant/app.json delete mode 100644 web/i18n/zh-Hant/app.ts create mode 100644 web/i18n/zh-Hant/billing.json delete mode 100644 web/i18n/zh-Hant/billing.ts create mode 100644 web/i18n/zh-Hant/common.json delete mode 100644 web/i18n/zh-Hant/common.ts create mode 100644 web/i18n/zh-Hant/custom.json delete mode 100644 web/i18n/zh-Hant/custom.ts create mode 100644 web/i18n/zh-Hant/dataset-creation.json delete mode 100644 web/i18n/zh-Hant/dataset-creation.ts create mode 100644 web/i18n/zh-Hant/dataset-documents.json delete mode 100644 web/i18n/zh-Hant/dataset-documents.ts create mode 100644 web/i18n/zh-Hant/dataset-hit-testing.json delete mode 100644 web/i18n/zh-Hant/dataset-hit-testing.ts create mode 100644 web/i18n/zh-Hant/dataset-pipeline.json delete mode 100644 web/i18n/zh-Hant/dataset-pipeline.ts create mode 100644 web/i18n/zh-Hant/dataset-settings.json delete mode 100644 web/i18n/zh-Hant/dataset-settings.ts create mode 100644 web/i18n/zh-Hant/dataset.json delete mode 100644 web/i18n/zh-Hant/dataset.ts create mode 100644 web/i18n/zh-Hant/education.json delete mode 100644 web/i18n/zh-Hant/education.ts create mode 100644 web/i18n/zh-Hant/explore.json delete mode 100644 web/i18n/zh-Hant/explore.ts create mode 100644 web/i18n/zh-Hant/layout.json delete mode 100644 web/i18n/zh-Hant/layout.ts create mode 100644 web/i18n/zh-Hant/login.json delete mode 100644 web/i18n/zh-Hant/login.ts create mode 100644 web/i18n/zh-Hant/oauth.json delete mode 100644 web/i18n/zh-Hant/oauth.ts create mode 100644 web/i18n/zh-Hant/pipeline.json delete mode 100644 web/i18n/zh-Hant/pipeline.ts create mode 100644 web/i18n/zh-Hant/plugin-tags.json delete mode 100644 web/i18n/zh-Hant/plugin-tags.ts create mode 100644 web/i18n/zh-Hant/plugin-trigger.json delete mode 100644 web/i18n/zh-Hant/plugin-trigger.ts create mode 100644 web/i18n/zh-Hant/plugin.json delete mode 100644 web/i18n/zh-Hant/plugin.ts create mode 100644 web/i18n/zh-Hant/register.json delete mode 100644 web/i18n/zh-Hant/register.ts create mode 100644 web/i18n/zh-Hant/run-log.json delete mode 100644 web/i18n/zh-Hant/run-log.ts create mode 100644 web/i18n/zh-Hant/share.json delete mode 100644 web/i18n/zh-Hant/share.ts create mode 100644 web/i18n/zh-Hant/time.json delete mode 100644 web/i18n/zh-Hant/time.ts create mode 100644 web/i18n/zh-Hant/tools.json delete mode 100644 web/i18n/zh-Hant/tools.ts create mode 100644 web/i18n/zh-Hant/workflow.json delete mode 100644 web/i18n/zh-Hant/workflow.ts delete mode 100644 web/jest.config.ts delete mode 100644 web/jest.setup.ts rename web/{testing => scripts}/analyze-component.js (60%) create mode 100644 web/scripts/analyze-i18n-diff.ts rename web/{i18n-config => scripts}/auto-gen-i18n.js (60%) rename web/{i18n-config => scripts}/check-i18n.js (53%) create mode 100644 web/scripts/component-analyzer.js create mode 100644 web/scripts/refactor-component.js create mode 100644 web/service/use-log.ts create mode 100644 web/service/use-share.spec.tsx create mode 100644 web/vitest.config.ts create mode 100644 web/vitest.setup.ts diff --git a/web/.env.example b/web/.env.example index b488c31057..c06a4fba87 100644 --- a/web/.env.example +++ b/web/.env.example @@ -73,3 +73,6 @@ NEXT_PUBLIC_MAX_TREE_DEPTH=50 # The API key of amplitude NEXT_PUBLIC_AMPLITUDE_API_KEY= + +# number of concurrency +NEXT_PUBLIC_BATCH_CONCURRENCY=5 diff --git a/web/.gitignore b/web/.gitignore index 048c5f6485..9de3dc83f9 100644 --- a/web/.gitignore +++ b/web/.gitignore @@ -54,3 +54,13 @@ package-lock.json # mise mise.toml + +# PWA generated files +public/sw.js +public/sw.js.map +public/workbox-*.js +public/workbox-*.js.map +public/fallback-*.js + +.vscode/settings.json +.vscode/mcp.json diff --git a/web/.oxlintrc.json b/web/.oxlintrc.json deleted file mode 100644 index 57eddd34fb..0000000000 --- a/web/.oxlintrc.json +++ /dev/null @@ -1,144 +0,0 @@ -{ - "plugins": [ - "unicorn", - "typescript", - "oxc" - ], - "categories": {}, - "rules": { - "for-direction": "error", - "no-async-promise-executor": "error", - "no-caller": "error", - "no-class-assign": "error", - "no-compare-neg-zero": "error", - "no-cond-assign": "warn", - "no-const-assign": "warn", - "no-constant-binary-expression": "error", - "no-constant-condition": "warn", - "no-control-regex": "warn", - "no-debugger": "warn", - "no-delete-var": "warn", - "no-dupe-class-members": "warn", - "no-dupe-else-if": "warn", - "no-dupe-keys": "warn", - "no-duplicate-case": "warn", - "no-empty-character-class": "warn", - "no-empty-pattern": "warn", - "no-empty-static-block": "warn", - "no-eval": "warn", - "no-ex-assign": "warn", - "no-extra-boolean-cast": "warn", - "no-func-assign": "warn", - "no-global-assign": "warn", - "no-import-assign": "warn", - "no-invalid-regexp": "warn", - "no-irregular-whitespace": "warn", - "no-loss-of-precision": "warn", - "no-new-native-nonconstructor": "warn", - "no-nonoctal-decimal-escape": "warn", - "no-obj-calls": "warn", - "no-self-assign": "warn", - "no-setter-return": "warn", - "no-shadow-restricted-names": "warn", - "no-sparse-arrays": "warn", - "no-this-before-super": "warn", - "no-unassigned-vars": "warn", - "no-unsafe-finally": "warn", - "no-unsafe-negation": "warn", - "no-unsafe-optional-chaining": "error", - "no-unused-labels": "warn", - "no-unused-private-class-members": "warn", - "no-unused-vars": "warn", - "no-useless-backreference": "warn", - "no-useless-catch": "error", - "no-useless-escape": "warn", - "no-useless-rename": "warn", - "no-with": "warn", - "require-yield": "warn", - "use-isnan": "warn", - "valid-typeof": "warn", - "oxc/bad-array-method-on-arguments": "warn", - "oxc/bad-char-at-comparison": "warn", - "oxc/bad-comparison-sequence": "warn", - "oxc/bad-min-max-func": "warn", - "oxc/bad-object-literal-comparison": "warn", - "oxc/bad-replace-all-arg": "warn", - "oxc/const-comparisons": "warn", - "oxc/double-comparisons": "warn", - "oxc/erasing-op": "warn", - "oxc/missing-throw": "warn", - "oxc/number-arg-out-of-range": "warn", - "oxc/only-used-in-recursion": "warn", - "oxc/uninvoked-array-callback": "warn", - "typescript/await-thenable": "warn", - "typescript/no-array-delete": "warn", - "typescript/no-base-to-string": "warn", - "typescript/no-confusing-void-expression": "warn", - "typescript/no-duplicate-enum-values": "warn", - "typescript/no-duplicate-type-constituents": "warn", - "typescript/no-extra-non-null-assertion": "warn", - "typescript/no-floating-promises": "warn", - "typescript/no-for-in-array": "warn", - "typescript/no-implied-eval": "warn", - "typescript/no-meaningless-void-operator": "warn", - "typescript/no-misused-new": "warn", - "typescript/no-misused-spread": "warn", - "typescript/no-non-null-asserted-optional-chain": "warn", - "typescript/no-redundant-type-constituents": "warn", - "typescript/no-this-alias": "warn", - "typescript/no-unnecessary-parameter-property-assignment": "warn", - "typescript/no-unsafe-declaration-merging": "warn", - "typescript/no-unsafe-unary-minus": "warn", - "typescript/no-useless-empty-export": "warn", - "typescript/no-wrapper-object-types": "warn", - "typescript/prefer-as-const": "warn", - "typescript/require-array-sort-compare": "warn", - "typescript/restrict-template-expressions": "warn", - "typescript/triple-slash-reference": "warn", - "typescript/unbound-method": "warn", - "unicorn/no-await-in-promise-methods": "warn", - "unicorn/no-empty-file": "warn", - "unicorn/no-invalid-fetch-options": "warn", - "unicorn/no-invalid-remove-event-listener": "warn", - "unicorn/no-new-array": "warn", - "unicorn/no-single-promise-in-promise-methods": "warn", - "unicorn/no-thenable": "warn", - "unicorn/no-unnecessary-await": "warn", - "unicorn/no-useless-fallback-in-spread": "warn", - "unicorn/no-useless-length-check": "warn", - "unicorn/no-useless-spread": "warn", - "unicorn/prefer-set-size": "warn", - "unicorn/prefer-string-starts-ends-with": "warn" - }, - "settings": { - "jsx-a11y": { - "polymorphicPropName": null, - "components": {}, - "attributes": {} - }, - "next": { - "rootDir": [] - }, - "react": { - "formComponents": [], - "linkComponents": [] - }, - "jsdoc": { - "ignorePrivate": false, - "ignoreInternal": false, - "ignoreReplacesDocs": true, - "overrideReplacesDocs": true, - "augmentsExtendsReplacesDocs": false, - "implementsReplacesDocs": false, - "exemptDestructuredRootsFromChecks": false, - "tagNamePreference": {} - } - }, - "env": { - "builtin": true - }, - "globals": {}, - "ignorePatterns": [ - "**/*.js" - ] -} \ No newline at end of file diff --git a/web/.storybook/preview.tsx b/web/.storybook/preview.tsx index 1f5726de34..37c636cc75 100644 --- a/web/.storybook/preview.tsx +++ b/web/.storybook/preview.tsx @@ -1,8 +1,8 @@ import type { Preview } from '@storybook/react' import { withThemeByDataAttribute } from '@storybook/addon-themes' import { QueryClient, QueryClientProvider } from '@tanstack/react-query' -import I18N from '../app/components/i18n' import { ToastProvider } from '../app/components/base/toast' +import I18N from '../app/components/i18n' import '../app/styles/globals.css' import '../app/styles/markdown.scss' diff --git a/web/.storybook/utils/form-story-wrapper.tsx b/web/.storybook/utils/form-story-wrapper.tsx index 689c3a20ff..90349a0325 100644 --- a/web/.storybook/utils/form-story-wrapper.tsx +++ b/web/.storybook/utils/form-story-wrapper.tsx @@ -1,6 +1,6 @@ -import { useState } from 'react' import type { ReactNode } from 'react' import { useStore } from '@tanstack/react-form' +import { useState } from 'react' import { useAppForm } from '@/app/components/base/form' type UseAppFormOptions = Parameters[0] @@ -49,7 +49,12 @@ export const FormStoryWrapper = ({