From 66232792a2eb6c23588cef796183c0b03b388037 Mon Sep 17 00:00:00 2001 From: ShuangLiu Date: Tue, 12 Aug 2025 12:01:51 +0800 Subject: [PATCH 01/27] fix: add MAX_TREE_DEPTH in env.service.web (#23785) Co-authored-by: crazywoola <427733928@qq.com> --- docker/docker-compose-template.yaml | 1 + docker/docker-compose.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/docker/docker-compose-template.yaml b/docker/docker-compose-template.yaml index 6494087a4a..1dbd9b3993 100644 --- a/docker/docker-compose-template.yaml +++ b/docker/docker-compose-template.yaml @@ -96,6 +96,7 @@ services: MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} + MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index d64a8566a0..779fbf382a 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -662,6 +662,7 @@ services: MAX_TOOLS_NUM: ${MAX_TOOLS_NUM:-10} MAX_PARALLEL_LIMIT: ${MAX_PARALLEL_LIMIT:-10} MAX_ITERATIONS_NUM: ${MAX_ITERATIONS_NUM:-99} + MAX_TREE_DEPTH: ${MAX_TREE_DEPTH:-50} ENABLE_WEBSITE_JINAREADER: ${ENABLE_WEBSITE_JINAREADER:-true} ENABLE_WEBSITE_FIRECRAWL: ${ENABLE_WEBSITE_FIRECRAWL:-true} ENABLE_WEBSITE_WATERCRAWL: ${ENABLE_WEBSITE_WATERCRAWL:-true} From d3eff9b1a3c8492147d5a08c345ba51775c8a95c Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 12 Aug 2025 12:03:04 +0800 Subject: [PATCH 02/27] fix: prevent X button flying to screen corners in dataset settings modal (#23788) --- web/app/components/datasets/rename-modal/index.tsx | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/web/app/components/datasets/rename-modal/index.tsx b/web/app/components/datasets/rename-modal/index.tsx index c497dbb957..0104b4ac2a 100644 --- a/web/app/components/datasets/rename-modal/index.tsx +++ b/web/app/components/datasets/rename-modal/index.tsx @@ -69,9 +69,11 @@ const RenameDatasetModal = ({ show, dataset, onSuccess, onClose }: RenameDataset isShow={show} onClose={noop} > -
{t('datasetSettings.title')}
-
- +
+
{t('datasetSettings.title')}
+
+ +
From 1ffe1905578ee6d87184c71440e8786cc638f63d Mon Sep 17 00:00:00 2001 From: Will Date: Tue, 12 Aug 2025 13:14:00 +0800 Subject: [PATCH 03/27] Feat/improved mcp timeout configs (#23605) Co-authored-by: crazywoola <427733928@qq.com> --- .../console/workspace/tool_providers.py | 10 ++++++ api/core/mcp/client/sse_client.py | 2 +- api/core/mcp/client/streamable_client.py | 26 +++++++-------- api/core/mcp/mcp_client.py | 28 ++++++++++------ api/core/mcp/session/base_session.py | 8 +---- api/core/mcp/session/client_session.py | 5 +-- api/core/tools/__base/tool_provider.py | 2 -- api/core/tools/mcp_tool/provider.py | 30 +++++++++++++---- api/core/tools/mcp_tool/tool.py | 27 +++++++++++++-- api/core/tools/tool_manager.py | 3 -- ...f407_add_timeout_for_tool_mcp_providers.py | 33 +++++++++++++++++++ api/models/tools.py | 2 ++ .../tools/mcp_tools_manage_service.py | 10 ++++++ web/app/components/tools/mcp/modal.tsx | 32 +++++++++++++++++- web/app/components/tools/types.ts | 2 ++ web/i18n/en-US/tools.ts | 2 ++ web/i18n/zh-Hans/tools.ts | 2 ++ web/service/use-tools.ts | 4 +++ 18 files changed, 180 insertions(+), 48 deletions(-) create mode 100644 api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py diff --git a/api/controllers/console/workspace/tool_providers.py b/api/controllers/console/workspace/tool_providers.py index c4d1ef70d8..8c8b73b45d 100644 --- a/api/controllers/console/workspace/tool_providers.py +++ b/api/controllers/console/workspace/tool_providers.py @@ -862,6 +862,10 @@ class ToolProviderMCPApi(Resource): parser.add_argument("icon_type", type=str, required=True, nullable=False, location="json") parser.add_argument("icon_background", type=str, required=False, nullable=True, location="json", default="") parser.add_argument("server_identifier", type=str, required=True, nullable=False, location="json") + parser.add_argument("timeout", type=float, required=False, nullable=False, location="json", default=30) + parser.add_argument( + "sse_read_timeout", type=float, required=False, nullable=False, location="json", default=300 + ) args = parser.parse_args() user = current_user if not is_valid_url(args["server_url"]): @@ -876,6 +880,8 @@ class ToolProviderMCPApi(Resource): icon_background=args["icon_background"], user_id=user.id, server_identifier=args["server_identifier"], + timeout=args["timeout"], + sse_read_timeout=args["sse_read_timeout"], ) ) @@ -891,6 +897,8 @@ class ToolProviderMCPApi(Resource): parser.add_argument("icon_background", type=str, required=False, nullable=True, location="json") parser.add_argument("provider_id", type=str, required=True, nullable=False, location="json") parser.add_argument("server_identifier", type=str, required=True, nullable=False, location="json") + parser.add_argument("timeout", type=float, required=False, nullable=True, location="json") + parser.add_argument("sse_read_timeout", type=float, required=False, nullable=True, location="json") args = parser.parse_args() if not is_valid_url(args["server_url"]): if "[__HIDDEN__]" in args["server_url"]: @@ -906,6 +914,8 @@ class ToolProviderMCPApi(Resource): icon_type=args["icon_type"], icon_background=args["icon_background"], server_identifier=args["server_identifier"], + timeout=args.get("timeout"), + sse_read_timeout=args.get("sse_read_timeout"), ) return {"result": "success"} diff --git a/api/core/mcp/client/sse_client.py b/api/core/mcp/client/sse_client.py index 4226e77f7e..2d3a3f5344 100644 --- a/api/core/mcp/client/sse_client.py +++ b/api/core/mcp/client/sse_client.py @@ -327,7 +327,7 @@ def send_message(http_client: httpx.Client, endpoint_url: str, session_message: ) response.raise_for_status() logger.debug("Client message sent successfully: %s", response.status_code) - except Exception as exc: + except Exception: logger.exception("Error sending message") raise diff --git a/api/core/mcp/client/streamable_client.py b/api/core/mcp/client/streamable_client.py index ca414ebb93..14e346c2f3 100644 --- a/api/core/mcp/client/streamable_client.py +++ b/api/core/mcp/client/streamable_client.py @@ -55,14 +55,10 @@ DEFAULT_QUEUE_READ_TIMEOUT = 3 class StreamableHTTPError(Exception): """Base exception for StreamableHTTP transport errors.""" - pass - class ResumptionError(StreamableHTTPError): """Raised when resumption request is invalid.""" - pass - @dataclass class RequestContext: @@ -74,7 +70,7 @@ class RequestContext: session_message: SessionMessage metadata: ClientMessageMetadata | None server_to_client_queue: ServerToClientQueue # Renamed for clarity - sse_read_timeout: timedelta + sse_read_timeout: float class StreamableHTTPTransport: @@ -84,8 +80,8 @@ class StreamableHTTPTransport: self, url: str, headers: dict[str, Any] | None = None, - timeout: timedelta = timedelta(seconds=30), - sse_read_timeout: timedelta = timedelta(seconds=60 * 5), + timeout: float | timedelta = 30, + sse_read_timeout: float | timedelta = 60 * 5, ) -> None: """Initialize the StreamableHTTP transport. @@ -97,8 +93,10 @@ class StreamableHTTPTransport: """ self.url = url self.headers = headers or {} - self.timeout = timeout - self.sse_read_timeout = sse_read_timeout + self.timeout = timeout.total_seconds() if isinstance(timeout, timedelta) else timeout + self.sse_read_timeout = ( + sse_read_timeout.total_seconds() if isinstance(sse_read_timeout, timedelta) else sse_read_timeout + ) self.session_id: str | None = None self.request_headers = { ACCEPT: f"{JSON}, {SSE}", @@ -186,7 +184,7 @@ class StreamableHTTPTransport: with ssrf_proxy_sse_connect( self.url, headers=headers, - timeout=httpx.Timeout(self.timeout.seconds, read=self.sse_read_timeout.seconds), + timeout=httpx.Timeout(self.timeout, read=self.sse_read_timeout), client=client, method="GET", ) as event_source: @@ -215,7 +213,7 @@ class StreamableHTTPTransport: with ssrf_proxy_sse_connect( self.url, headers=headers, - timeout=httpx.Timeout(self.timeout.seconds, read=ctx.sse_read_timeout.seconds), + timeout=httpx.Timeout(self.timeout, read=self.sse_read_timeout), client=ctx.client, method="GET", ) as event_source: @@ -402,8 +400,8 @@ class StreamableHTTPTransport: def streamablehttp_client( url: str, headers: dict[str, Any] | None = None, - timeout: timedelta = timedelta(seconds=30), - sse_read_timeout: timedelta = timedelta(seconds=60 * 5), + timeout: float | timedelta = 30, + sse_read_timeout: float | timedelta = 60 * 5, terminate_on_close: bool = True, ) -> Generator[ tuple[ @@ -436,7 +434,7 @@ def streamablehttp_client( try: with create_ssrf_proxy_mcp_http_client( headers=transport.request_headers, - timeout=httpx.Timeout(transport.timeout.seconds, read=transport.sse_read_timeout.seconds), + timeout=httpx.Timeout(transport.timeout, read=transport.sse_read_timeout), ) as client: # Define callbacks that need access to thread pool def start_get_stream() -> None: diff --git a/api/core/mcp/mcp_client.py b/api/core/mcp/mcp_client.py index 875d13de05..7d90d51956 100644 --- a/api/core/mcp/mcp_client.py +++ b/api/core/mcp/mcp_client.py @@ -23,12 +23,18 @@ class MCPClient: authed: bool = True, authorization_code: Optional[str] = None, for_list: bool = False, + headers: Optional[dict[str, str]] = None, + timeout: Optional[float] = None, + sse_read_timeout: Optional[float] = None, ): # Initialize info self.provider_id = provider_id self.tenant_id = tenant_id self.client_type = "streamable" self.server_url = server_url + self.headers = headers or {} + self.timeout = timeout + self.sse_read_timeout = sse_read_timeout # Authentication info self.authed = authed @@ -43,7 +49,7 @@ class MCPClient: self._session: Optional[ClientSession] = None self._streams_context: Optional[AbstractContextManager[Any]] = None self._session_context: Optional[ClientSession] = None - self.exit_stack = ExitStack() + self._exit_stack = ExitStack() # Whether the client has been initialized self._initialized = False @@ -90,21 +96,26 @@ class MCPClient: headers = ( {"Authorization": f"{self.token.token_type.capitalize()} {self.token.access_token}"} if self.authed and self.token - else {} + else self.headers + ) + self._streams_context = client_factory( + url=self.server_url, + headers=headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) - self._streams_context = client_factory(url=self.server_url, headers=headers) if not self._streams_context: raise MCPConnectionError("Failed to create connection context") # Use exit_stack to manage context managers properly if method_name == "mcp": - read_stream, write_stream, _ = self.exit_stack.enter_context(self._streams_context) + read_stream, write_stream, _ = self._exit_stack.enter_context(self._streams_context) streams = (read_stream, write_stream) else: # sse_client - streams = self.exit_stack.enter_context(self._streams_context) + streams = self._exit_stack.enter_context(self._streams_context) self._session_context = ClientSession(*streams) - self._session = self.exit_stack.enter_context(self._session_context) + self._session = self._exit_stack.enter_context(self._session_context) session = cast(ClientSession, self._session) session.initialize() return @@ -120,9 +131,6 @@ class MCPClient: if first_try: return self.connect_server(client_factory, method_name, first_try=False) - except MCPConnectionError: - raise - def list_tools(self) -> list[Tool]: """Connect to an MCP server running with SSE transport""" # List available tools to verify connection @@ -142,7 +150,7 @@ class MCPClient: """Clean up resources""" try: # ExitStack will handle proper cleanup of all managed context managers - self.exit_stack.close() + self._exit_stack.close() except Exception as e: logging.exception("Error during cleanup") raise ValueError(f"Error during cleanup: {e}") diff --git a/api/core/mcp/session/base_session.py b/api/core/mcp/session/base_session.py index 3b6c9a7424..3f98aa94ae 100644 --- a/api/core/mcp/session/base_session.py +++ b/api/core/mcp/session/base_session.py @@ -2,7 +2,6 @@ import logging import queue from collections.abc import Callable from concurrent.futures import Future, ThreadPoolExecutor, TimeoutError -from contextlib import ExitStack from datetime import timedelta from types import TracebackType from typing import Any, Generic, Self, TypeVar @@ -170,7 +169,6 @@ class BaseSession( self._receive_notification_type = receive_notification_type self._session_read_timeout_seconds = read_timeout_seconds self._in_flight = {} - self._exit_stack = ExitStack() # Initialize executor and future to None for proper cleanup checks self._executor: ThreadPoolExecutor | None = None self._receiver_future: Future | None = None @@ -377,7 +375,7 @@ class BaseSession( self._handle_incoming(RuntimeError(f"Server Error: {message}")) except queue.Empty: continue - except Exception as e: + except Exception: logging.exception("Error in message processing loop") raise @@ -389,14 +387,12 @@ class BaseSession( If the request is responded to within this method, it will not be forwarded on to the message stream. """ - pass def _received_notification(self, notification: ReceiveNotificationT) -> None: """ Can be overridden by subclasses to handle a notification without needing to listen on the message stream. """ - pass def send_progress_notification( self, progress_token: str | int, progress: float, total: float | None = None @@ -405,11 +401,9 @@ class BaseSession( Sends a progress notification for a request that is currently being processed. """ - pass def _handle_incoming( self, req: RequestResponder[ReceiveRequestT, SendResultT] | ReceiveNotificationT | Exception, ) -> None: """A generic handler for incoming messages. Overwritten by subclasses.""" - pass diff --git a/api/core/mcp/session/client_session.py b/api/core/mcp/session/client_session.py index ed2ad508ab..1bccf1d031 100644 --- a/api/core/mcp/session/client_session.py +++ b/api/core/mcp/session/client_session.py @@ -1,3 +1,4 @@ +import queue from datetime import timedelta from typing import Any, Protocol @@ -85,8 +86,8 @@ class ClientSession( ): def __init__( self, - read_stream, - write_stream, + read_stream: queue.Queue, + write_stream: queue.Queue, read_timeout_seconds: timedelta | None = None, sampling_callback: SamplingFnT | None = None, list_roots_callback: ListRootsFnT | None = None, diff --git a/api/core/tools/__base/tool_provider.py b/api/core/tools/__base/tool_provider.py index d096fc7df7..d1d7976cc3 100644 --- a/api/core/tools/__base/tool_provider.py +++ b/api/core/tools/__base/tool_provider.py @@ -12,8 +12,6 @@ from core.tools.errors import ToolProviderCredentialValidationError class ToolProviderController(ABC): - entity: ToolProviderEntity - def __init__(self, entity: ToolProviderEntity) -> None: self.entity = entity diff --git a/api/core/tools/mcp_tool/provider.py b/api/core/tools/mcp_tool/provider.py index 93f003effe..24ee981a1b 100644 --- a/api/core/tools/mcp_tool/provider.py +++ b/api/core/tools/mcp_tool/provider.py @@ -1,5 +1,5 @@ import json -from typing import Any +from typing import Any, Optional from core.mcp.types import Tool as RemoteMCPTool from core.tools.__base.tool_provider import ToolProviderController @@ -19,15 +19,24 @@ from services.tools.tools_transform_service import ToolTransformService class MCPToolProviderController(ToolProviderController): - provider_id: str - entity: ToolProviderEntityWithPlugin - - def __init__(self, entity: ToolProviderEntityWithPlugin, provider_id: str, tenant_id: str, server_url: str) -> None: + def __init__( + self, + entity: ToolProviderEntityWithPlugin, + provider_id: str, + tenant_id: str, + server_url: str, + headers: Optional[dict[str, str]] = None, + timeout: Optional[float] = None, + sse_read_timeout: Optional[float] = None, + ) -> None: super().__init__(entity) - self.entity = entity + self.entity: ToolProviderEntityWithPlugin = entity self.tenant_id = tenant_id self.provider_id = provider_id self.server_url = server_url + self.headers = headers or {} + self.timeout = timeout + self.sse_read_timeout = sse_read_timeout @property def provider_type(self) -> ToolProviderType: @@ -85,6 +94,9 @@ class MCPToolProviderController(ToolProviderController): provider_id=db_provider.server_identifier or "", tenant_id=db_provider.tenant_id or "", server_url=db_provider.decrypted_server_url, + headers={}, # TODO: get headers from db provider + timeout=db_provider.timeout, + sse_read_timeout=db_provider.sse_read_timeout, ) def _validate_credentials(self, user_id: str, credentials: dict[str, Any]) -> None: @@ -111,6 +123,9 @@ class MCPToolProviderController(ToolProviderController): icon=self.entity.identity.icon, server_url=self.server_url, provider_id=self.provider_id, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) def get_tools(self) -> list[MCPTool]: # type: ignore @@ -125,6 +140,9 @@ class MCPToolProviderController(ToolProviderController): icon=self.entity.identity.icon, server_url=self.server_url, provider_id=self.provider_id, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) for tool_entity in self.entity.tools ] diff --git a/api/core/tools/mcp_tool/tool.py b/api/core/tools/mcp_tool/tool.py index 8ebbb6b0fe..26789b23ce 100644 --- a/api/core/tools/mcp_tool/tool.py +++ b/api/core/tools/mcp_tool/tool.py @@ -13,13 +13,25 @@ from core.tools.entities.tool_entities import ToolEntity, ToolInvokeMessage, Too class MCPTool(Tool): def __init__( - self, entity: ToolEntity, runtime: ToolRuntime, tenant_id: str, icon: str, server_url: str, provider_id: str + self, + entity: ToolEntity, + runtime: ToolRuntime, + tenant_id: str, + icon: str, + server_url: str, + provider_id: str, + headers: Optional[dict[str, str]] = None, + timeout: Optional[float] = None, + sse_read_timeout: Optional[float] = None, ) -> None: super().__init__(entity, runtime) self.tenant_id = tenant_id self.icon = icon self.server_url = server_url self.provider_id = provider_id + self.headers = headers or {} + self.timeout = timeout + self.sse_read_timeout = sse_read_timeout def tool_provider_type(self) -> ToolProviderType: return ToolProviderType.MCP @@ -35,7 +47,15 @@ class MCPTool(Tool): from core.tools.errors import ToolInvokeError try: - with MCPClient(self.server_url, self.provider_id, self.tenant_id, authed=True) as mcp_client: + with MCPClient( + self.server_url, + self.provider_id, + self.tenant_id, + authed=True, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, + ) as mcp_client: tool_parameters = self._handle_none_parameter(tool_parameters) result = mcp_client.invoke_tool(tool_name=self.entity.identity.name, tool_args=tool_parameters) except MCPAuthError as e: @@ -72,6 +92,9 @@ class MCPTool(Tool): icon=self.icon, server_url=self.server_url, provider_id=self.provider_id, + headers=self.headers, + timeout=self.timeout, + sse_read_timeout=self.sse_read_timeout, ) def _handle_none_parameter(self, parameter: dict[str, Any]) -> dict[str, Any]: diff --git a/api/core/tools/tool_manager.py b/api/core/tools/tool_manager.py index 2737bcfb16..7472f4f605 100644 --- a/api/core/tools/tool_manager.py +++ b/api/core/tools/tool_manager.py @@ -789,9 +789,6 @@ class ToolManager: """ get api provider """ - """ - get tool provider - """ provider_name = provider provider_obj: ApiToolProvider | None = ( db.session.query(ApiToolProvider) diff --git a/api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py b/api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py new file mode 100644 index 0000000000..383e21cd28 --- /dev/null +++ b/api/migrations/versions/2025_08_07_1115-fa8b0fa6f407_add_timeout_for_tool_mcp_providers.py @@ -0,0 +1,33 @@ +"""add timeout for tool_mcp_providers + +Revision ID: fa8b0fa6f407 +Revises: 532b3f888abf +Create Date: 2025-08-07 11:15:31.517985 + +""" +from alembic import op +import sqlalchemy as sa + + +# revision identifiers, used by Alembic. +revision = 'fa8b0fa6f407' +down_revision = '532b3f888abf' +branch_labels = None +depends_on = None + + +def upgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tool_mcp_providers', schema=None) as batch_op: + batch_op.add_column(sa.Column('timeout', sa.Float(), server_default=sa.text('30'), nullable=False)) + batch_op.add_column(sa.Column('sse_read_timeout', sa.Float(), server_default=sa.text('300'), nullable=False)) + # ### end Alembic commands ### + + +def downgrade(): + # ### commands auto generated by Alembic - please adjust! ### + with op.batch_alter_table('tool_mcp_providers', schema=None) as batch_op: + batch_op.drop_column('sse_read_timeout') + batch_op.drop_column('timeout') + + # ### end Alembic commands ### diff --git a/api/models/tools.py b/api/models/tools.py index 408c1371c2..e0c9fa6ffc 100644 --- a/api/models/tools.py +++ b/api/models/tools.py @@ -278,6 +278,8 @@ class MCPToolProvider(Base): updated_at: Mapped[datetime] = mapped_column( sa.DateTime, nullable=False, server_default=sa.text("CURRENT_TIMESTAMP(0)") ) + timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("30")) + sse_read_timeout: Mapped[float] = mapped_column(sa.Float, nullable=False, server_default=sa.text("300")) def load_user(self) -> Account | None: return db.session.query(Account).where(Account.id == self.user_id).first() diff --git a/api/services/tools/mcp_tools_manage_service.py b/api/services/tools/mcp_tools_manage_service.py index 23be449a5a..f45c931768 100644 --- a/api/services/tools/mcp_tools_manage_service.py +++ b/api/services/tools/mcp_tools_manage_service.py @@ -59,6 +59,8 @@ class MCPToolManageService: icon_type: str, icon_background: str, server_identifier: str, + timeout: float, + sse_read_timeout: float, ) -> ToolProviderApiEntity: server_url_hash = hashlib.sha256(server_url.encode()).hexdigest() existing_provider = ( @@ -91,6 +93,8 @@ class MCPToolManageService: tools="[]", icon=json.dumps({"content": icon, "background": icon_background}) if icon_type == "emoji" else icon, server_identifier=server_identifier, + timeout=timeout, + sse_read_timeout=sse_read_timeout, ) db.session.add(mcp_tool) db.session.commit() @@ -166,6 +170,8 @@ class MCPToolManageService: icon_type: str, icon_background: str, server_identifier: str, + timeout: float | None = None, + sse_read_timeout: float | None = None, ): mcp_provider = cls.get_mcp_provider_by_provider_id(provider_id, tenant_id) @@ -197,6 +203,10 @@ class MCPToolManageService: mcp_provider.tools = reconnect_result["tools"] mcp_provider.encrypted_credentials = reconnect_result["encrypted_credentials"] + if timeout is not None: + mcp_provider.timeout = timeout + if sse_read_timeout is not None: + mcp_provider.sse_read_timeout = sse_read_timeout db.session.commit() except IntegrityError as e: db.session.rollback() diff --git a/web/app/components/tools/mcp/modal.tsx b/web/app/components/tools/mcp/modal.tsx index b7202f5242..2df8349a91 100644 --- a/web/app/components/tools/mcp/modal.tsx +++ b/web/app/components/tools/mcp/modal.tsx @@ -27,6 +27,8 @@ export type DuplicateAppModalProps = { icon: string icon_background?: string | null server_identifier: string + timeout: number + sse_read_timeout: number }) => void onHide: () => void } @@ -64,6 +66,8 @@ const MCPModal = ({ const [appIcon, setAppIcon] = useState(getIcon(data)) const [showAppIconPicker, setShowAppIconPicker] = useState(false) const [serverIdentifier, setServerIdentifier] = React.useState(data?.server_identifier || '') + const [timeout, setMcpTimeout] = React.useState(30) + const [sseReadTimeout, setSseReadTimeout] = React.useState(300) const [isFetchingIcon, setIsFetchingIcon] = useState(false) const appIconRef = useRef(null) const isHovering = useHover(appIconRef) @@ -73,7 +77,7 @@ const MCPModal = ({ const urlPattern = /^(https?:\/\/)((([a-z\d]([a-z\d-]*[a-z\d])*)\.)+[a-z]{2,}|((\d{1,3}\.){3}\d{1,3})|localhost)(\:\d+)?(\/[-a-z\d%_.~+]*)*(\?[;&a-z\d%_.~+=-]*)?/i return urlPattern.test(string) } - catch (e) { + catch { return false } } @@ -123,6 +127,8 @@ const MCPModal = ({ icon: appIcon.type === 'emoji' ? appIcon.icon : appIcon.fileId, icon_background: appIcon.type === 'emoji' ? appIcon.background : undefined, server_identifier: serverIdentifier.trim(), + timeout: timeout || 30, + sse_read_timeout: sseReadTimeout || 300, }) if(isCreate) onHide() @@ -201,6 +207,30 @@ const MCPModal = ({
)}
+
+
+ {t('tools.mcp.modal.timeout')} +
+ setMcpTimeout(Number(e.target.value))} + onBlur={e => handleBlur(e.target.value.trim())} + placeholder={t('tools.mcp.modal.timeoutPlaceholder')} + /> +
+
+
+ {t('tools.mcp.modal.sseReadTimeout')} +
+ setSseReadTimeout(Number(e.target.value))} + onBlur={e => handleBlur(e.target.value.trim())} + placeholder={t('tools.mcp.modal.timeoutPlaceholder')} + /> +
diff --git a/web/app/components/tools/types.ts b/web/app/components/tools/types.ts index b83919ad18..01f436dedc 100644 --- a/web/app/components/tools/types.ts +++ b/web/app/components/tools/types.ts @@ -57,6 +57,8 @@ export type Collection = { server_url?: string updated_at?: number server_identifier?: string + timeout?: number + sse_read_timeout?: number } export type ToolParameter = { diff --git a/web/i18n/en-US/tools.ts b/web/i18n/en-US/tools.ts index 4e1ce1308a..dfbfb82d8b 100644 --- a/web/i18n/en-US/tools.ts +++ b/web/i18n/en-US/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: 'Cancel', save: 'Save', confirm: 'Add & Authorize', + timeout: 'Timeout', + sseReadTimeout: 'SSE Read Timeout', }, delete: 'Remove MCP Server', deleteConfirmTitle: 'Would you like to remove {{mcp}}?', diff --git a/web/i18n/zh-Hans/tools.ts b/web/i18n/zh-Hans/tools.ts index 5c1eb13236..82be1c9bb0 100644 --- a/web/i18n/zh-Hans/tools.ts +++ b/web/i18n/zh-Hans/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: '取消', save: '保存', confirm: '添加并授权', + timeout: '超时时间', + sseReadTimeout: 'SSE 读取超时时间', }, delete: '删除 MCP 服务', deleteConfirmTitle: '你想要删除 {{mcp}} 吗?', diff --git a/web/service/use-tools.ts b/web/service/use-tools.ts index 6b457be759..4db6039ed4 100644 --- a/web/service/use-tools.ts +++ b/web/service/use-tools.ts @@ -85,6 +85,8 @@ export const useCreateMCP = () => { icon_type: AppIconType icon: string icon_background?: string | null + timeout?: number + sse_read_timeout?: number }) => { return post('workspaces/current/tool-provider/mcp', { body: { @@ -109,6 +111,8 @@ export const useUpdateMCP = ({ icon: string icon_background?: string | null provider_id: string + timeout?: number + sse_read_timeout?: number }) => { return put('workspaces/current/tool-provider/mcp', { body: { From 02f7677d925c66a7b0775bb7de99d86eff40850a Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 13:29:33 +0800 Subject: [PATCH 04/27] chore: translate i18n files (#23789) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/i18n/de-DE/tools.ts | 2 ++ web/i18n/es-ES/tools.ts | 2 ++ web/i18n/fa-IR/tools.ts | 2 ++ web/i18n/fr-FR/tools.ts | 2 ++ web/i18n/hi-IN/tools.ts | 2 ++ web/i18n/it-IT/tools.ts | 2 ++ web/i18n/ja-JP/tools.ts | 2 ++ web/i18n/ko-KR/tools.ts | 2 ++ web/i18n/pl-PL/tools.ts | 2 ++ web/i18n/pt-BR/tools.ts | 2 ++ web/i18n/ro-RO/tools.ts | 2 ++ web/i18n/ru-RU/tools.ts | 2 ++ web/i18n/sl-SI/tools.ts | 2 ++ web/i18n/th-TH/tools.ts | 2 ++ web/i18n/tr-TR/tools.ts | 2 ++ web/i18n/uk-UA/tools.ts | 2 ++ web/i18n/vi-VN/tools.ts | 2 ++ web/i18n/zh-Hant/tools.ts | 2 ++ 18 files changed, 36 insertions(+) diff --git a/web/i18n/de-DE/tools.ts b/web/i18n/de-DE/tools.ts index d684e3bd77..377eb2d1f7 100644 --- a/web/i18n/de-DE/tools.ts +++ b/web/i18n/de-DE/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: 'Abbrechen', save: 'Speichern', confirm: 'Hinzufügen & Autorisieren', + sseReadTimeout: 'SSE-Lesezeitüberschreitung', + timeout: 'Zeitüberschreitung', }, delete: 'MCP-Server entfernen', deleteConfirmTitle: 'Möchten Sie {{mcp}} entfernen?', diff --git a/web/i18n/es-ES/tools.ts b/web/i18n/es-ES/tools.ts index afb6dfa1e3..045cc57a3c 100644 --- a/web/i18n/es-ES/tools.ts +++ b/web/i18n/es-ES/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: 'Cancelar', save: 'Guardar', confirm: 'Añadir y Autorizar', + sseReadTimeout: 'Tiempo de espera de lectura SSE', + timeout: 'Tiempo de espera', }, delete: 'Eliminar servidor MCP', deleteConfirmTitle: '¿Eliminar {{mcp}}?', diff --git a/web/i18n/fa-IR/tools.ts b/web/i18n/fa-IR/tools.ts index 6e7d941e7c..6b1a829e50 100644 --- a/web/i18n/fa-IR/tools.ts +++ b/web/i18n/fa-IR/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'لغو', save: 'ذخیره', confirm: 'افزودن و مجوزدهی', + timeout: 'مهلت', + sseReadTimeout: 'زمان.out خواندن SSE', }, delete: 'حذف سرور MCP', deleteConfirmTitle: 'آیا مایل به حذف {mcp} هستید؟', diff --git a/web/i18n/fr-FR/tools.ts b/web/i18n/fr-FR/tools.ts index b6dc4f6307..a176382bdb 100644 --- a/web/i18n/fr-FR/tools.ts +++ b/web/i18n/fr-FR/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Annuler', save: 'Enregistrer', confirm: 'Ajouter & Authoriser', + sseReadTimeout: 'Délai d\'attente de lecture SSE', + timeout: 'Délai d\'attente', }, delete: 'Supprimer le Serveur MCP', deleteConfirmTitle: 'Souhaitez-vous supprimer {mcp}?', diff --git a/web/i18n/hi-IN/tools.ts b/web/i18n/hi-IN/tools.ts index b3d5a343f1..7e52959498 100644 --- a/web/i18n/hi-IN/tools.ts +++ b/web/i18n/hi-IN/tools.ts @@ -197,6 +197,8 @@ const translation = { cancel: 'रद्द करें', save: 'सहेजें', confirm: 'जोड़ें और अधिकृत करें', + timeout: 'टाइमआउट', + sseReadTimeout: 'एसएसई पढ़ने का टाइमआउट', }, delete: 'MCP सर्वर हटाएँ', deleteConfirmTitle: '{mcp} हटाना चाहते हैं?', diff --git a/web/i18n/it-IT/tools.ts b/web/i18n/it-IT/tools.ts index 8d64061104..f592d77b78 100644 --- a/web/i18n/it-IT/tools.ts +++ b/web/i18n/it-IT/tools.ts @@ -202,6 +202,8 @@ const translation = { cancel: 'Annulla', save: 'Salva', confirm: 'Aggiungi & Autorizza', + timeout: 'Tempo scaduto', + sseReadTimeout: 'Timeout di lettura SSE', }, delete: 'Rimuovi Server MCP', deleteConfirmTitle: 'Vuoi rimuovere {mcp}?', diff --git a/web/i18n/ja-JP/tools.ts b/web/i18n/ja-JP/tools.ts index 5eebc54fc0..f7c0055260 100644 --- a/web/i18n/ja-JP/tools.ts +++ b/web/i18n/ja-JP/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: 'キャンセル', save: '保存', confirm: '追加して承認', + timeout: 'タイムアウト', + sseReadTimeout: 'SSE 読み取りタイムアウト', }, delete: 'MCPサーバーを削除', deleteConfirmTitle: '{{mcp}} を削除しますか?', diff --git a/web/i18n/ko-KR/tools.ts b/web/i18n/ko-KR/tools.ts index d1a1d709c0..2598b4490a 100644 --- a/web/i18n/ko-KR/tools.ts +++ b/web/i18n/ko-KR/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: '취소', save: '저장', confirm: '추가 및 승인', + timeout: '타임아웃', + sseReadTimeout: 'SSE 읽기 타임아웃', }, delete: 'MCP 서버 제거', deleteConfirmTitle: '{mcp}를 제거하시겠습니까?', diff --git a/web/i18n/pl-PL/tools.ts b/web/i18n/pl-PL/tools.ts index d72f9cafe5..f27a0c1e34 100644 --- a/web/i18n/pl-PL/tools.ts +++ b/web/i18n/pl-PL/tools.ts @@ -196,6 +196,8 @@ const translation = { cancel: 'Anuluj', save: 'Zapisz', confirm: 'Dodaj i autoryzuj', + timeout: 'Limit czasu', + sseReadTimeout: 'Przekroczenie czasu oczekiwania na odczyt SSE', }, delete: 'Usuń serwer MCP', deleteConfirmTitle: 'Usunąć {mcp}?', diff --git a/web/i18n/pt-BR/tools.ts b/web/i18n/pt-BR/tools.ts index aa9df17c69..dafe45b3e7 100644 --- a/web/i18n/pt-BR/tools.ts +++ b/web/i18n/pt-BR/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Cancelar', save: 'Salvar', confirm: 'Adicionar e Autorizar', + sseReadTimeout: 'Tempo limite de leitura SSE', + timeout: 'Tempo esgotado', }, delete: 'Remover Servidor MCP', deleteConfirmTitle: 'Você gostaria de remover {{mcp}}?', diff --git a/web/i18n/ro-RO/tools.ts b/web/i18n/ro-RO/tools.ts index 9add6aae46..e5eb5bbcd6 100644 --- a/web/i18n/ro-RO/tools.ts +++ b/web/i18n/ro-RO/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Anulare', save: 'Salvare', confirm: 'Adăugare și Autorizare', + timeout: 'Timp de așteptare', + sseReadTimeout: 'Timp de așteptare pentru citirea SSE', }, delete: 'Eliminare Server MCP', deleteConfirmTitle: 'Ștergeți {mcp}?', diff --git a/web/i18n/ru-RU/tools.ts b/web/i18n/ru-RU/tools.ts index e20e5664d8..047a845ef3 100644 --- a/web/i18n/ru-RU/tools.ts +++ b/web/i18n/ru-RU/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Отмена', save: 'Сохранить', confirm: 'Добавить и авторизовать', + timeout: 'Тайм-аут', + sseReadTimeout: 'Таймаут чтения SSE', }, delete: 'Удалить MCP сервер', deleteConfirmTitle: 'Вы действительно хотите удалить {mcp}?', diff --git a/web/i18n/sl-SI/tools.ts b/web/i18n/sl-SI/tools.ts index 4bf3b607cc..bcca2753fb 100644 --- a/web/i18n/sl-SI/tools.ts +++ b/web/i18n/sl-SI/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Prekliči', save: 'Shrani', confirm: 'Dodaj in avtoriziraj', + timeout: 'Časovna omejitev', + sseReadTimeout: 'SSE časovna omejitev branja', }, delete: 'Odstrani strežnik MCP', deleteConfirmTitle: 'Odstraniti {mcp}?', diff --git a/web/i18n/th-TH/tools.ts b/web/i18n/th-TH/tools.ts index 6406865201..3a32234bf4 100644 --- a/web/i18n/th-TH/tools.ts +++ b/web/i18n/th-TH/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'ยกเลิก', save: 'บันทึก', confirm: 'เพิ่มและอนุญาต', + timeout: 'หมดเวลา', + sseReadTimeout: 'หมดเวลาการอ่าน SSE', }, delete: 'ลบเซิร์ฟเวอร์ MCP', deleteConfirmTitle: 'คุณต้องการลบ {mcp} หรือไม่?', diff --git a/web/i18n/tr-TR/tools.ts b/web/i18n/tr-TR/tools.ts index 5ae37c474f..16d01ea44d 100644 --- a/web/i18n/tr-TR/tools.ts +++ b/web/i18n/tr-TR/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'İptal', save: 'Kaydet', confirm: 'Ekle ve Yetkilendir', + timeout: 'Zaman aşımı', + sseReadTimeout: 'SSE Okuma Zaman Aşımı', }, delete: 'MCP Sunucusunu Kaldır', deleteConfirmTitle: '{mcp} kaldırılsın mı?', diff --git a/web/i18n/uk-UA/tools.ts b/web/i18n/uk-UA/tools.ts index 476e4f14eb..5c00ecebd3 100644 --- a/web/i18n/uk-UA/tools.ts +++ b/web/i18n/uk-UA/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Скасувати', save: 'Зберегти', confirm: 'Додати та Авторизувати', + timeout: 'Час вичерпано', + sseReadTimeout: 'Тайм-аут читання SSE', }, delete: 'Видалити сервер MCP', deleteConfirmTitle: 'Видалити {mcp}?', diff --git a/web/i18n/vi-VN/tools.ts b/web/i18n/vi-VN/tools.ts index 896b83da03..a4f95cbd9d 100644 --- a/web/i18n/vi-VN/tools.ts +++ b/web/i18n/vi-VN/tools.ts @@ -192,6 +192,8 @@ const translation = { cancel: 'Hủy', save: 'Lưu', confirm: 'Thêm & Ủy quyền', + sseReadTimeout: 'Thời gian chờ Đọc SSE', + timeout: 'Thời gian chờ', }, delete: 'Xóa Máy chủ MCP', deleteConfirmTitle: 'Xóa {mcp}?', diff --git a/web/i18n/zh-Hant/tools.ts b/web/i18n/zh-Hant/tools.ts index 9dad3a74cf..821e90a084 100644 --- a/web/i18n/zh-Hant/tools.ts +++ b/web/i18n/zh-Hant/tools.ts @@ -191,6 +191,8 @@ const translation = { cancel: '取消', save: '儲存', confirm: '新增並授權', + sseReadTimeout: 'SSE 讀取超時', + timeout: '超時', }, delete: '刪除 MCP 伺服器', deleteConfirmTitle: '您確定要刪除 {{mcp}} 嗎?', From a62371940f98fe0fee4e8b85403736eac96c41fc Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 12 Aug 2025 13:29:51 +0800 Subject: [PATCH 05/27] fix: remove misleading clear buttons and improve SimpleSelect UX (#23791) --- web/app/components/base/select/index.tsx | 23 ++++++++++++++----- .../account-setting/language-page/index.tsx | 2 ++ 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/web/app/components/base/select/index.tsx b/web/app/components/base/select/index.tsx index d9285c1061..1f29d5bd6a 100644 --- a/web/app/components/base/select/index.tsx +++ b/web/app/components/base/select/index.tsx @@ -192,6 +192,7 @@ const SimpleSelect: FC = ({ const localPlaceholder = placeholder || t('common.placeholder.select') const [selectedItem, setSelectedItem] = useState(null) + const [open, setOpen] = useState(false) useEffect(() => { let defaultSelect = null @@ -220,8 +221,11 @@ const SimpleSelect: FC = ({ { // get data-open, use setTimeout to ensure the attribute is set setTimeout(() => { - if (listboxRef.current) - onOpenChange?.(listboxRef.current.getAttribute('data-open') !== null) + if (listboxRef.current) { + const isOpen = listboxRef.current.getAttribute('data-open') !== null + setOpen(isOpen) + onOpenChange?.(isOpen) + } }) }} className={classNames(`flex h-full w-full items-center rounded-lg border-0 bg-components-input-bg-normal pl-3 pr-10 focus-visible:bg-state-base-hover-alt focus-visible:outline-none group-hover/simple-select:bg-state-base-hover-alt sm:text-sm sm:leading-6 ${disabled ? 'cursor-not-allowed' : 'cursor-pointer'}`, className)}> {selectedItem?.name ?? localPlaceholder} @@ -240,10 +244,17 @@ const SimpleSelect: FC = ({ /> ) : ( - diff --git a/web/app/components/header/account-setting/language-page/index.tsx b/web/app/components/header/account-setting/language-page/index.tsx index bf3537b5df..f51b265412 100644 --- a/web/app/components/header/account-setting/language-page/index.tsx +++ b/web/app/components/header/account-setting/language-page/index.tsx @@ -70,6 +70,7 @@ export default function LanguagePage() { items={languages.filter(item => item.supported)} onSelect={item => handleSelectLanguage(item)} disabled={editing} + notClearable={true} />
@@ -79,6 +80,7 @@ export default function LanguagePage() { items={timezones} onSelect={item => handleSelectTimezone(item)} disabled={editing} + notClearable={true} />
From a09935d9b92aaa0ef9af68b38bcdee180601e725 Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Tue, 12 Aug 2025 14:45:56 +0800 Subject: [PATCH 06/27] chore: restore @mdx-js dependencies in package.json and pnpm-lock.yaml (#23792) --- web/package.json | 6 +++--- web/pnpm-lock.yaml | 18 +++++++++--------- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/web/package.json b/web/package.json index 742f674ee9..a492104906 100644 --- a/web/package.json +++ b/web/package.json @@ -56,10 +56,7 @@ "@lexical/selection": "^0.30.0", "@lexical/text": "^0.30.0", "@lexical/utils": "^0.30.0", - "@mdx-js/loader": "^3.1.0", - "@mdx-js/react": "^3.1.0", "@monaco-editor/react": "^4.6.0", - "@next/mdx": "~15.3.5", "@octokit/core": "^6.1.2", "@octokit/request-error": "^6.1.5", "@remixicon/react": "^4.5.0", @@ -160,8 +157,11 @@ "@eslint/js": "^9.20.0", "@faker-js/faker": "^9.0.3", "@happy-dom/jest-environment": "^17.4.4", + "@mdx-js/loader": "^3.1.0", + "@mdx-js/react": "^3.1.0", "@next/bundle-analyzer": "^15.4.1", "@next/eslint-plugin-next": "~15.4.5", + "@next/mdx": "~15.3.5", "@rgrove/parse-xml": "^4.1.0", "@storybook/addon-essentials": "8.5.0", "@storybook/addon-interactions": "8.5.0", diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml index 2f03968bc1..5bce1418a2 100644 --- a/web/pnpm-lock.yaml +++ b/web/pnpm-lock.yaml @@ -96,18 +96,9 @@ importers: '@lexical/utils': specifier: ^0.30.0 version: 0.30.0 - '@mdx-js/loader': - specifier: ^3.1.0 - version: 3.1.0(acorn@8.15.0)(webpack@5.100.2(esbuild@0.25.0)(uglify-js@3.19.3)) - '@mdx-js/react': - specifier: ^3.1.0 - version: 3.1.0(@types/react@19.1.8)(react@19.1.0) '@monaco-editor/react': specifier: ^4.6.0 version: 4.7.0(monaco-editor@0.52.2)(react-dom@19.1.0(react@19.1.0))(react@19.1.0) - '@next/mdx': - specifier: ~15.3.5 - version: 15.3.5(@mdx-js/loader@3.1.0(acorn@8.15.0)(webpack@5.100.2(esbuild@0.25.0)(uglify-js@3.19.3)))(@mdx-js/react@3.1.0(@types/react@19.1.8)(react@19.1.0)) '@octokit/core': specifier: ^6.1.2 version: 6.1.6 @@ -403,12 +394,21 @@ importers: '@happy-dom/jest-environment': specifier: ^17.4.4 version: 17.6.3 + '@mdx-js/loader': + specifier: ^3.1.0 + version: 3.1.0(acorn@8.15.0)(webpack@5.100.2(esbuild@0.25.0)(uglify-js@3.19.3)) + '@mdx-js/react': + specifier: ^3.1.0 + version: 3.1.0(@types/react@19.1.8)(react@19.1.0) '@next/bundle-analyzer': specifier: ^15.4.1 version: 15.4.1 '@next/eslint-plugin-next': specifier: ~15.4.5 version: 15.4.5 + '@next/mdx': + specifier: ~15.3.5 + version: 15.3.5(@mdx-js/loader@3.1.0(acorn@8.15.0)(webpack@5.100.2(esbuild@0.25.0)(uglify-js@3.19.3)))(@mdx-js/react@3.1.0(@types/react@19.1.8)(react@19.1.0)) '@rgrove/parse-xml': specifier: ^4.1.0 version: 4.2.0 From de0dae9d9b98e5d39d672024c5bb43c721a7fcd6 Mon Sep 17 00:00:00 2001 From: GuanMu Date: Tue, 12 Aug 2025 14:48:35 +0800 Subject: [PATCH 07/27] Fix node search (#23795) --- .../workflow/hooks/use-shortcuts.ts | 1 - .../workflow/hooks/use-workflow-search.tsx | 28 ++++++++++++++++++- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/web/app/components/workflow/hooks/use-shortcuts.ts b/web/app/components/workflow/hooks/use-shortcuts.ts index def4eef9ce..b2d71555d7 100644 --- a/web/app/components/workflow/hooks/use-shortcuts.ts +++ b/web/app/components/workflow/hooks/use-shortcuts.ts @@ -218,7 +218,6 @@ export const useShortcuts = (): void => { useKeyPress( 'shift', (e) => { - console.log('Shift down', e) if (shouldHandleShortcut(e)) dimOtherNodes() }, diff --git a/web/app/components/workflow/hooks/use-workflow-search.tsx b/web/app/components/workflow/hooks/use-workflow-search.tsx index b512d3d140..a659c8c204 100644 --- a/web/app/components/workflow/hooks/use-workflow-search.tsx +++ b/web/app/components/workflow/hooks/use-workflow-search.tsx @@ -7,6 +7,11 @@ import type { CommonNodeType } from '../types' import { workflowNodesAction } from '@/app/components/goto-anything/actions/workflow-nodes' import BlockIcon from '@/app/components/workflow/block-icon' import { setupNodeSelectionListener } from '../utils/node-navigation' +import { BlockEnum } from '../types' +import { useStore } from '../store' +import type { Emoji } from '@/app/components/tools/types' +import { CollectionType } from '@/app/components/tools/types' +import { canFindTool } from '@/utils' /** * Hook to register workflow nodes search functionality @@ -16,6 +21,11 @@ export const useWorkflowSearch = () => { const { handleNodeSelect } = useNodesInteractions() // Filter and process nodes for search + const buildInTools = useStore(s => s.buildInTools) + const customTools = useStore(s => s.customTools) + const workflowTools = useStore(s => s.workflowTools) + const mcpTools = useStore(s => s.mcpTools) + const searchableNodes = useMemo(() => { const filteredNodes = nodes.filter((node) => { if (!node.id || !node.data || node.type === 'sticky') return false @@ -31,6 +41,20 @@ export const useWorkflowSearch = () => { .map((node) => { const nodeData = node.data as CommonNodeType + // compute tool icon if node is a Tool + let toolIcon: string | Emoji | undefined + if (nodeData?.type === BlockEnum.Tool) { + let targetTools = workflowTools + if (nodeData.provider_type === CollectionType.builtIn) + targetTools = buildInTools + else if (nodeData.provider_type === CollectionType.custom) + targetTools = customTools + else if (nodeData.provider_type === CollectionType.mcp) + targetTools = mcpTools + + toolIcon = targetTools.find(toolWithProvider => canFindTool(toolWithProvider.id, nodeData.provider_id))?.icon + } + return { id: node.id, title: nodeData?.title || nodeData?.type || 'Untitled', @@ -38,11 +62,12 @@ export const useWorkflowSearch = () => { desc: nodeData?.desc || '', blockType: nodeData?.type, nodeData, + toolIcon, } }) return result - }, [nodes]) + }, [nodes, buildInTools, customTools, workflowTools, mcpTools]) // Create search function for workflow nodes const searchWorkflowNodes = useCallback((query: string) => { @@ -83,6 +108,7 @@ export const useWorkflowSearch = () => { type={node.blockType} className="shrink-0" size="sm" + toolIcon={node.toolIcon} /> ), metadata: { From 7820e31a92347b197ac22b7e8a682145996a8f6e Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Tue, 12 Aug 2025 18:14:57 +0800 Subject: [PATCH 08/27] fix: add missing translation keys for goto anything command selector (#23815) --- web/i18n/de-DE/app.ts | 2 ++ web/i18n/en-US/app.ts | 2 ++ web/i18n/es-ES/app.ts | 2 ++ web/i18n/fa-IR/app.ts | 2 ++ web/i18n/fa-IR/tools.ts | 1 - web/i18n/fr-FR/app.ts | 2 ++ web/i18n/fr-FR/tools.ts | 1 - web/i18n/hi-IN/app.ts | 2 ++ web/i18n/hi-IN/tools.ts | 1 - web/i18n/it-IT/app.ts | 2 ++ web/i18n/it-IT/tools.ts | 1 - web/i18n/ja-JP/app.ts | 2 ++ web/i18n/ko-KR/app.ts | 2 ++ web/i18n/pl-PL/app.ts | 2 ++ web/i18n/pl-PL/tools.ts | 1 - web/i18n/pt-BR/app.ts | 2 ++ web/i18n/pt-BR/tools.ts | 1 - web/i18n/ro-RO/app.ts | 2 ++ web/i18n/ro-RO/tools.ts | 1 - web/i18n/ru-RU/app.ts | 2 ++ web/i18n/ru-RU/tools.ts | 1 - web/i18n/sl-SI/app.ts | 2 ++ web/i18n/sl-SI/tools.ts | 1 - web/i18n/th-TH/app.ts | 2 ++ web/i18n/th-TH/tools.ts | 1 - web/i18n/tr-TR/app.ts | 2 ++ web/i18n/tr-TR/tools.ts | 1 - web/i18n/uk-UA/app.ts | 2 ++ web/i18n/uk-UA/tools.ts | 1 - web/i18n/vi-VN/app.ts | 2 ++ web/i18n/vi-VN/tools.ts | 1 - web/i18n/zh-Hans/app.ts | 2 ++ web/i18n/zh-Hant/app.ts | 2 ++ 33 files changed, 40 insertions(+), 13 deletions(-) diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts index 9eab16c694..bead4e68c8 100644 --- a/web/i18n/de-DE/app.ts +++ b/web/i18n/de-DE/app.ts @@ -296,6 +296,8 @@ const translation = { resultCount: '{{count}} Ergebnis', resultCount_other: '{{count}} Ergebnisse', inScope: 'in {{scope}}s', + noMatchingCommands: 'Keine übereinstimmenden Befehle gefunden', + tryDifferentSearch: 'Versuchen Sie es mit einem anderen Suchbegriff', }, } diff --git a/web/i18n/en-US/app.ts b/web/i18n/en-US/app.ts index eb4f3c404b..f7b75c75ac 100644 --- a/web/i18n/en-US/app.ts +++ b/web/i18n/en-US/app.ts @@ -294,6 +294,8 @@ const translation = { knowledgeBases: 'Knowledge Bases', workflowNodes: 'Workflow Nodes', }, + noMatchingCommands: 'No matching commands found', + tryDifferentSearch: 'Try a different search term', }, } diff --git a/web/i18n/es-ES/app.ts b/web/i18n/es-ES/app.ts index d08149da88..24fa7680f4 100644 --- a/web/i18n/es-ES/app.ts +++ b/web/i18n/es-ES/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} resultado', resultCount_other: '{{count}} resultados', inScope: 'en {{scope}}s', + tryDifferentSearch: 'Prueba con un término de búsqueda diferente', + noMatchingCommands: 'No se encontraron comandos coincidentes', }, } diff --git a/web/i18n/fa-IR/app.ts b/web/i18n/fa-IR/app.ts index b0fbf7ebd8..9c470273a6 100644 --- a/web/i18n/fa-IR/app.ts +++ b/web/i18n/fa-IR/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} نتیجه', resultCount_other: '{{count}} نتیجه', inScope: 'در {{scope}}s', + noMatchingCommands: 'هیچ دستوری منطبق یافت نشد', + tryDifferentSearch: 'عبارت جستجوی دیگری را امتحان کنید', }, } diff --git a/web/i18n/fa-IR/tools.ts b/web/i18n/fa-IR/tools.ts index 6b1a829e50..82f2767015 100644 --- a/web/i18n/fa-IR/tools.ts +++ b/web/i18n/fa-IR/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'کلید Http Header، می‌توانید آن را با "Authorization" ترک کنید اگر نمی‌دانید چیست یا آن را به یک مقدار سفارشی تنظیم کنید', types: { none: 'هیچ', - api_key: 'کلید API', apiKeyPlaceholder: 'نام هدر HTTP برای کلید API', apiValuePlaceholder: 'کلید API را وارد کنید', api_key_header: 'عنوان', diff --git a/web/i18n/fr-FR/app.ts b/web/i18n/fr-FR/app.ts index 6245a8534a..d0aab89918 100644 --- a/web/i18n/fr-FR/app.ts +++ b/web/i18n/fr-FR/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} résultat', resultCount_other: '{{count}} résultats', inScope: 'dans {{scope}}s', + noMatchingCommands: 'Aucune commande correspondante n’a été trouvée', + tryDifferentSearch: 'Essayez un autre terme de recherche', }, } diff --git a/web/i18n/fr-FR/tools.ts b/web/i18n/fr-FR/tools.ts index a176382bdb..9e1d5e50ba 100644 --- a/web/i18n/fr-FR/tools.ts +++ b/web/i18n/fr-FR/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'Clé de l\'en-tête HTTP. Vous pouvez la laisser telle quelle avec "Autorisation" si vous n\'avez aucune idée de ce que c\'est, ou la définir sur une valeur personnalisée.', types: { none: 'Aucun', - api_key: 'Clé API', apiKeyPlaceholder: 'Nom de l\'en-tête HTTP pour la clé API', apiValuePlaceholder: 'Entrez la clé API', api_key_query: 'Paramètre de requête', diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts index c365b691e2..fc799f335c 100644 --- a/web/i18n/hi-IN/app.ts +++ b/web/i18n/hi-IN/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} परिणाम', resultCount_other: '{{count}} परिणाम', inScope: '{{scope}}s में', + tryDifferentSearch: 'एक अलग खोज शब्द आजमाएँ', + noMatchingCommands: 'कोई मिलती-जुलती कमांड्स नहीं मिलीं', }, } diff --git a/web/i18n/hi-IN/tools.ts b/web/i18n/hi-IN/tools.ts index 7e52959498..a3479df6d6 100644 --- a/web/i18n/hi-IN/tools.ts +++ b/web/i18n/hi-IN/tools.ts @@ -86,7 +86,6 @@ const translation = { 'Http हैडर कुंजी, यदि आपको कुछ पता नहीं है तो "Authorization" के साथ छोड़ सकते हैं या इसे कस्टम मूल्य पर सेट कर सकते हैं', types: { none: 'कोई नहीं', - api_key: 'API कुंजी', apiKeyPlaceholder: 'API कुंजी के लिए HTTP हैडर नाम', apiValuePlaceholder: 'API कुंजी दर्ज करें', api_key_query: 'अनुक्रमणिका पैरामीटर', diff --git a/web/i18n/it-IT/app.ts b/web/i18n/it-IT/app.ts index 74ea6b7aa7..f4a2c6138c 100644 --- a/web/i18n/it-IT/app.ts +++ b/web/i18n/it-IT/app.ts @@ -300,6 +300,8 @@ const translation = { resultCount: '{{count}} risultato', resultCount_other: '{{count}} risultati', inScope: 'in {{scope}}s', + tryDifferentSearch: 'Prova un termine di ricerca diverso', + noMatchingCommands: 'Nessun comando corrispondente trovato', }, } diff --git a/web/i18n/it-IT/tools.ts b/web/i18n/it-IT/tools.ts index f592d77b78..db305118a4 100644 --- a/web/i18n/it-IT/tools.ts +++ b/web/i18n/it-IT/tools.ts @@ -86,7 +86,6 @@ const translation = { 'Http Header Key, Puoi lasciarlo come `Authorization` se non sai cos\'è o impostarlo su un valore personalizzato', types: { none: 'Nessuno', - api_key: 'API Key', apiKeyPlaceholder: 'Nome dell\'intestazione HTTP per API Key', apiValuePlaceholder: 'Inserisci API Key', api_key_query: 'Parametro di query', diff --git a/web/i18n/ja-JP/app.ts b/web/i18n/ja-JP/app.ts index ac30d91261..ae72f188b1 100644 --- a/web/i18n/ja-JP/app.ts +++ b/web/i18n/ja-JP/app.ts @@ -293,6 +293,8 @@ const translation = { knowledgeBases: 'ナレッジベース', workflowNodes: 'ワークフローノード', }, + noMatchingCommands: '一致するコマンドが見つかりません', + tryDifferentSearch: '別の検索語句をお試しください', }, } diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts index 6a75ab4021..4b246200a0 100644 --- a/web/i18n/ko-KR/app.ts +++ b/web/i18n/ko-KR/app.ts @@ -314,6 +314,8 @@ const translation = { resultCount: '{{count}} 개 결과', resultCount_other: '{{count}} 개 결과', inScope: '{{scope}}s 내에서', + tryDifferentSearch: '다른 검색어 사용해 보기', + noMatchingCommands: '일치하는 명령을 찾을 수 없습니다.', }, } diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts index dbf0d90d39..da8bee7ea7 100644 --- a/web/i18n/pl-PL/app.ts +++ b/web/i18n/pl-PL/app.ts @@ -295,6 +295,8 @@ const translation = { resultCount: '{{count}} wynik', resultCount_other: '{{count}} wyników', inScope: 'w {{scope}}s', + noMatchingCommands: 'Nie znaleziono pasujących poleceń', + tryDifferentSearch: 'Spróbuj użyć innego hasła', }, } diff --git a/web/i18n/pl-PL/tools.ts b/web/i18n/pl-PL/tools.ts index f27a0c1e34..dc05f6b239 100644 --- a/web/i18n/pl-PL/tools.ts +++ b/web/i18n/pl-PL/tools.ts @@ -56,7 +56,6 @@ const translation = { 'Klucz nagłówka HTTP, Możesz pozostawić go z "Autoryzacja" jeśli nie wiesz co to jest lub ustaw go na niestandardową wartość', types: { none: 'Brak', - api_key: 'Klucz API', apiKeyPlaceholder: 'Nazwa nagłówka HTTP dla Klucza API', apiValuePlaceholder: 'Wprowadź Klucz API', api_key_query: 'Parametr zapytania', diff --git a/web/i18n/pt-BR/app.ts b/web/i18n/pt-BR/app.ts index 6400669849..66e44f7916 100644 --- a/web/i18n/pt-BR/app.ts +++ b/web/i18n/pt-BR/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} resultado', resultCount_other: '{{count}} resultados', inScope: 'em {{scope}}s', + noMatchingCommands: 'Nenhum comando correspondente encontrado', + tryDifferentSearch: 'Tente um termo de pesquisa diferente', }, } diff --git a/web/i18n/pt-BR/tools.ts b/web/i18n/pt-BR/tools.ts index dafe45b3e7..4b12902b0c 100644 --- a/web/i18n/pt-BR/tools.ts +++ b/web/i18n/pt-BR/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'Chave do Cabeçalho HTTP, você pode deixar como "Authorization" se não tiver ideia do que é ou definir um valor personalizado', types: { none: 'Nenhum', - api_key: 'Chave de API', apiKeyPlaceholder: 'Nome do cabeçalho HTTP para a Chave de API', apiValuePlaceholder: 'Digite a Chave de API', api_key_query: 'Parâmetro de consulta', diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts index 56e493b43d..0d58b87ae5 100644 --- a/web/i18n/ro-RO/app.ts +++ b/web/i18n/ro-RO/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} rezultat', resultCount_other: '{{count}} rezultate', inScope: 'în {{scope}}s', + noMatchingCommands: 'Nu s-au găsit comenzi potrivite', + tryDifferentSearch: 'Încercați un alt termen de căutare', }, } diff --git a/web/i18n/ro-RO/tools.ts b/web/i18n/ro-RO/tools.ts index e5eb5bbcd6..71d9fa50f7 100644 --- a/web/i18n/ro-RO/tools.ts +++ b/web/i18n/ro-RO/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'Cheie antet HTTP, puteți lăsa "Autorizare" dacă nu știți ce este sau setați-o la o valoare personalizată', types: { none: 'Niciuna', - api_key: 'Cheie API', apiKeyPlaceholder: 'Nume antet HTTP pentru cheia API', apiValuePlaceholder: 'Introduceți cheia API', api_key_header: 'Antet', diff --git a/web/i18n/ru-RU/app.ts b/web/i18n/ru-RU/app.ts index 7f5f53a668..c00b805c00 100644 --- a/web/i18n/ru-RU/app.ts +++ b/web/i18n/ru-RU/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} результат', resultCount_other: '{{count}} результатов', inScope: 'в {{scope}}s', + noMatchingCommands: 'Соответствующие команды не найдены', + tryDifferentSearch: 'Попробуйте использовать другой поисковый запрос', }, } diff --git a/web/i18n/ru-RU/tools.ts b/web/i18n/ru-RU/tools.ts index 047a845ef3..b02663d86b 100644 --- a/web/i18n/ru-RU/tools.ts +++ b/web/i18n/ru-RU/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'Ключ заголовка HTTP, вы можете оставить его как "Authorization", если не знаете, что это такое, или установить его на пользовательское значение', types: { none: 'Нет', - api_key: 'Ключ API', apiKeyPlaceholder: 'Название заголовка HTTP для ключа API', apiValuePlaceholder: 'Введите ключ API', api_key_header: 'Заголовок', diff --git a/web/i18n/sl-SI/app.ts b/web/i18n/sl-SI/app.ts index 1031c2a32e..49effb6b68 100644 --- a/web/i18n/sl-SI/app.ts +++ b/web/i18n/sl-SI/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} rezultat', resultCount_other: '{{count}} rezultatov', inScope: 'v {{scope}}s', + tryDifferentSearch: 'Poskusite uporabiti drug iskalni izraz', + noMatchingCommands: 'Ujemajoči se ukazi niso našli', }, } diff --git a/web/i18n/sl-SI/tools.ts b/web/i18n/sl-SI/tools.ts index bcca2753fb..6a9b4b92bd 100644 --- a/web/i18n/sl-SI/tools.ts +++ b/web/i18n/sl-SI/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'Ključ HTTP glave, pustite kot "Authorization", če ne veste, kaj je to, ali pa nastavite na vrednost po meri', types: { none: 'Brez', - api_key: 'API ključ', apiKeyPlaceholder: 'Ime HTTP glave za API ključ', apiValuePlaceholder: 'Vnesite API ključ', api_key_query: 'Vprašanje Param', diff --git a/web/i18n/th-TH/app.ts b/web/i18n/th-TH/app.ts index b21fff0399..fd7fc15f19 100644 --- a/web/i18n/th-TH/app.ts +++ b/web/i18n/th-TH/app.ts @@ -290,6 +290,8 @@ const translation = { resultCount: '{{count}} ผลลัพธ์', resultCount_other: '{{count}} ผลลัพธ์', inScope: 'ใน {{scope}}s', + noMatchingCommands: 'ไม่พบคําสั่งที่ตรงกัน', + tryDifferentSearch: 'ลองใช้ข้อความค้นหาอื่น', }, } diff --git a/web/i18n/th-TH/tools.ts b/web/i18n/th-TH/tools.ts index 3a32234bf4..54cf5ccd11 100644 --- a/web/i18n/th-TH/tools.ts +++ b/web/i18n/th-TH/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'Http Header Key คุณสามารถปล่อยให้เป็น "การอนุญาต" ได้หากคุณไม่รู้ว่ามันคืออะไรหรือตั้งค่าเป็นค่าที่กําหนดเอง', types: { none: 'ไม่มีใคร', - api_key: 'คีย์ API', apiKeyPlaceholder: 'ชื่อส่วนหัว HTTP สําหรับคีย์ API', apiValuePlaceholder: 'ป้อนคีย์ API', api_key_header: 'หัวเรื่อง', diff --git a/web/i18n/tr-TR/app.ts b/web/i18n/tr-TR/app.ts index 023112b961..b961054728 100644 --- a/web/i18n/tr-TR/app.ts +++ b/web/i18n/tr-TR/app.ts @@ -290,6 +290,8 @@ const translation = { resultCount: '{{count}} sonuç', resultCount_other: '{{count}} sonuç', inScope: '{{scope}}s içinde', + tryDifferentSearch: 'Farklı bir arama terimi deneyin', + noMatchingCommands: 'Eşleşen komut bulunamadı', }, } diff --git a/web/i18n/tr-TR/tools.ts b/web/i18n/tr-TR/tools.ts index 16d01ea44d..890af6e9f2 100644 --- a/web/i18n/tr-TR/tools.ts +++ b/web/i18n/tr-TR/tools.ts @@ -82,7 +82,6 @@ const translation = { keyTooltip: 'Http Başlığı Anahtarı, ne olduğunu bilmiyorsanız "Authorization" olarak bırakabilirsiniz veya özel bir değere ayarlayabilirsiniz', types: { none: 'Yok', - api_key: 'API Anahtarı', apiKeyPlaceholder: 'API Anahtarı için HTTP başlık adı', apiValuePlaceholder: 'API Anahtarını girin', api_key_header: 'Başlık', diff --git a/web/i18n/uk-UA/app.ts b/web/i18n/uk-UA/app.ts index c785b55b42..9686f58d4d 100644 --- a/web/i18n/uk-UA/app.ts +++ b/web/i18n/uk-UA/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} результат', resultCount_other: '{{count}} результатів', inScope: 'у {{scope}}s', + noMatchingCommands: 'Відповідних команд не знайдено', + tryDifferentSearch: 'Спробуйте інший пошуковий термін', }, } diff --git a/web/i18n/uk-UA/tools.ts b/web/i18n/uk-UA/tools.ts index 5c00ecebd3..0b7dd2d1e8 100644 --- a/web/i18n/uk-UA/tools.ts +++ b/web/i18n/uk-UA/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'Ключ HTTP-заголовка. Якщо ви не знаєте, залиште його як "Authorization" або встановіть власне значення', types: { none: 'Відсутня', - api_key: 'API-ключ', apiKeyPlaceholder: 'Назва HTTP-заголовка для API-ключа', apiValuePlaceholder: 'Введіть API-ключ', api_key_header: 'Заголовок', diff --git a/web/i18n/vi-VN/app.ts b/web/i18n/vi-VN/app.ts index cca946dd01..be0e709c24 100644 --- a/web/i18n/vi-VN/app.ts +++ b/web/i18n/vi-VN/app.ts @@ -294,6 +294,8 @@ const translation = { resultCount: '{{count}} kết quả', resultCount_other: '{{count}} kết quả', inScope: 'trong {{scope}}s', + tryDifferentSearch: 'Thử một cụm từ tìm kiếm khác', + noMatchingCommands: 'Không tìm thấy lệnh phù hợp', }, } diff --git a/web/i18n/vi-VN/tools.ts b/web/i18n/vi-VN/tools.ts index a4f95cbd9d..afd6683c72 100644 --- a/web/i18n/vi-VN/tools.ts +++ b/web/i18n/vi-VN/tools.ts @@ -54,7 +54,6 @@ const translation = { keyTooltip: 'Khóa tiêu đề HTTP, bạn có thể để trống nếu không biết hoặc đặt một giá trị tùy chỉnh', types: { none: 'Không', - api_key: 'Khóa API', apiKeyPlaceholder: 'Tên tiêu đề HTTP cho Khóa API', apiValuePlaceholder: 'Nhập Khóa API', api_key_query: 'Tham số truy vấn', diff --git a/web/i18n/zh-Hans/app.ts b/web/i18n/zh-Hans/app.ts index 6bb4837a65..89791f073b 100644 --- a/web/i18n/zh-Hans/app.ts +++ b/web/i18n/zh-Hans/app.ts @@ -293,6 +293,8 @@ const translation = { knowledgeBases: '知识库', workflowNodes: '工作流节点', }, + noMatchingCommands: '未找到匹配的命令', + tryDifferentSearch: '请尝试不同的搜索词', }, } diff --git a/web/i18n/zh-Hant/app.ts b/web/i18n/zh-Hant/app.ts index 13c6570d20..4080cde1b4 100644 --- a/web/i18n/zh-Hant/app.ts +++ b/web/i18n/zh-Hant/app.ts @@ -293,6 +293,8 @@ const translation = { resultCount: '{{count}} 個結果', resultCount_other: '{{count}} 個結果', inScope: '在 {{scope}}s 中', + noMatchingCommands: '未找到匹配的命令', + tryDifferentSearch: '嘗試其他搜尋字詞', }, } From cb46726fa4668510f007dca3d10815c714b8dc1a Mon Sep 17 00:00:00 2001 From: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Date: Tue, 12 Aug 2025 18:16:07 +0800 Subject: [PATCH 09/27] Add Test Containers Based Tests for File Service (#23771) --- .../services/test_file_service.py | 913 ++++++++++++++++++ 1 file changed, 913 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_file_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_file_service.py b/api/tests/test_containers_integration_tests/services/test_file_service.py new file mode 100644 index 0000000000..965c9c6242 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_file_service.py @@ -0,0 +1,913 @@ +import hashlib +from io import BytesIO +from unittest.mock import patch + +import pytest +from faker import Faker +from werkzeug.exceptions import NotFound + +from configs import dify_config +from models.account import Account, Tenant +from models.enums import CreatorUserRole +from models.model import EndUser, UploadFile +from services.errors.file import FileTooLargeError, UnsupportedFileTypeError +from services.file_service import FileService + + +class TestFileService: + """Integration tests for FileService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.file_service.storage") as mock_storage, + patch("services.file_service.file_helpers") as mock_file_helpers, + patch("services.file_service.ExtractProcessor") as mock_extract_processor, + ): + # Setup default mock returns + mock_storage.save.return_value = None + mock_storage.load.return_value = BytesIO(b"mock file content") + mock_file_helpers.get_signed_file_url.return_value = "https://example.com/signed-url" + mock_file_helpers.verify_image_signature.return_value = True + mock_file_helpers.verify_file_signature.return_value = True + mock_extract_processor.load_from_upload_file.return_value = "extracted text content" + + yield { + "storage": mock_storage, + "file_helpers": mock_file_helpers, + "extract_processor": mock_extract_processor, + } + + def _create_test_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test account for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + Account: Created account instance + """ + fake = Faker() + + # Create account + account = Account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + status="active", + ) + + from extensions.ext_database import db + + db.session.add(account) + db.session.commit() + + # Create tenant for the account + tenant = Tenant( + name=fake.company(), + status="normal", + ) + db.session.add(tenant) + db.session.commit() + + # Create tenant-account join + from models.account import TenantAccountJoin, TenantAccountRole + + join = TenantAccountJoin( + tenant_id=tenant.id, + account_id=account.id, + role=TenantAccountRole.OWNER.value, + current=True, + ) + db.session.add(join) + db.session.commit() + + # Set current tenant for account + account.current_tenant = tenant + + return account + + def _create_test_end_user(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test end user for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + EndUser: Created end user instance + """ + fake = Faker() + + end_user = EndUser( + tenant_id=str(fake.uuid4()), + type="web", + name=fake.name(), + is_anonymous=False, + session_id=fake.uuid4(), + ) + + from extensions.ext_database import db + + db.session.add(end_user) + db.session.commit() + + return end_user + + def _create_test_upload_file(self, db_session_with_containers, mock_external_service_dependencies, account): + """ + Helper method to create a test upload file for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + account: Account instance + + Returns: + UploadFile: Created upload file instance + """ + fake = Faker() + + upload_file = UploadFile( + tenant_id=account.current_tenant_id if hasattr(account, "current_tenant_id") else str(fake.uuid4()), + storage_type="local", + key=f"upload_files/test/{fake.uuid4()}.txt", + name="test_file.txt", + size=1024, + extension="txt", + mime_type="text/plain", + created_by_role=CreatorUserRole.ACCOUNT, + created_by=account.id, + created_at=fake.date_time(), + used=False, + hash=hashlib.sha3_256(b"test content").hexdigest(), + source_url="", + ) + + from extensions.ext_database import db + + db.session.add(upload_file) + db.session.commit() + + return upload_file + + # Test upload_file method + def test_upload_file_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful file upload with valid parameters. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test_document.pdf" + content = b"test file content" + mimetype = "application/pdf" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + assert upload_file is not None + assert upload_file.name == filename + assert upload_file.size == len(content) + assert upload_file.extension == "pdf" + assert upload_file.mime_type == mimetype + assert upload_file.created_by == account.id + assert upload_file.created_by_role == CreatorUserRole.ACCOUNT.value + assert upload_file.used is False + assert upload_file.hash == hashlib.sha3_256(content).hexdigest() + + # Verify storage was called + mock_external_service_dependencies["storage"].save.assert_called_once() + + # Verify database state + from extensions.ext_database import db + + db.session.refresh(upload_file) + assert upload_file.id is not None + + def test_upload_file_with_end_user(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file upload with end user instead of account. + """ + fake = Faker() + end_user = self._create_test_end_user(db_session_with_containers, mock_external_service_dependencies) + + filename = "test_image.jpg" + content = b"test image content" + mimetype = "image/jpeg" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=end_user, + ) + + assert upload_file is not None + assert upload_file.created_by == end_user.id + assert upload_file.created_by_role == CreatorUserRole.END_USER.value + + def test_upload_file_with_datasets_source(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file upload with datasets source parameter. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test_document.pdf" + content = b"test file content" + mimetype = "application/pdf" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + source="datasets", + source_url="https://example.com/source", + ) + + assert upload_file is not None + assert upload_file.source_url == "https://example.com/source" + + def test_upload_file_invalid_filename_characters( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file upload with invalid filename characters. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test/file.txt" + content = b"test content" + mimetype = "text/plain" + + with pytest.raises(ValueError, match="Filename contains invalid characters"): + FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + def test_upload_file_filename_too_long(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file upload with filename that exceeds length limit. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + # Create a filename longer than 200 characters + long_name = "a" * 250 + filename = f"{long_name}.txt" + content = b"test content" + mimetype = "text/plain" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + # Verify filename was truncated (the logic truncates the base name to 200 chars + extension) + # So the total length should be <= 200 + len(extension) + 1 (for the dot) + assert len(upload_file.name) <= 200 + len(upload_file.extension) + 1 + assert upload_file.name.endswith(".txt") + # Verify the base name was truncated + base_name = upload_file.name[:-4] # Remove .txt + assert len(base_name) <= 200 + + def test_upload_file_datasets_unsupported_type( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file upload for datasets with unsupported file type. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test_image.jpg" + content = b"test content" + mimetype = "image/jpeg" + + with pytest.raises(UnsupportedFileTypeError): + FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + source="datasets", + ) + + def test_upload_file_too_large(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file upload with file size exceeding limit. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "large_image.jpg" + # Create content larger than the limit + content = b"x" * (dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT * 1024 * 1024 + 1) + mimetype = "image/jpeg" + + with pytest.raises(FileTooLargeError): + FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + # Test is_file_size_within_limit method + def test_is_file_size_within_limit_image_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file size check for image files within limit. + """ + extension = "jpg" + file_size = dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT * 1024 * 1024 # Exactly at limit + + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + + assert result is True + + def test_is_file_size_within_limit_video_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file size check for video files within limit. + """ + extension = "mp4" + file_size = dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT * 1024 * 1024 # Exactly at limit + + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + + assert result is True + + def test_is_file_size_within_limit_audio_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file size check for audio files within limit. + """ + extension = "mp3" + file_size = dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT * 1024 * 1024 # Exactly at limit + + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + + assert result is True + + def test_is_file_size_within_limit_document_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file size check for document files within limit. + """ + extension = "pdf" + file_size = dify_config.UPLOAD_FILE_SIZE_LIMIT * 1024 * 1024 # Exactly at limit + + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + + assert result is True + + def test_is_file_size_within_limit_image_exceeded( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file size check for image files exceeding limit. + """ + extension = "jpg" + file_size = dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT * 1024 * 1024 + 1 # Exceeds limit + + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + + assert result is False + + def test_is_file_size_within_limit_unknown_extension( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file size check for unknown file extension. + """ + extension = "xyz" + file_size = dify_config.UPLOAD_FILE_SIZE_LIMIT * 1024 * 1024 # Uses default limit + + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + + assert result is True + + # Test upload_text method + def test_upload_text_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful text upload. + """ + fake = Faker() + text = "This is a test text content" + text_name = "test_text.txt" + + # Mock current_user + with patch("services.file_service.current_user") as mock_current_user: + mock_current_user.current_tenant_id = str(fake.uuid4()) + mock_current_user.id = str(fake.uuid4()) + + upload_file = FileService.upload_text(text=text, text_name=text_name) + + assert upload_file is not None + assert upload_file.name == text_name + assert upload_file.size == len(text) + assert upload_file.extension == "txt" + assert upload_file.mime_type == "text/plain" + assert upload_file.used is True + assert upload_file.used_by == mock_current_user.id + + # Verify storage was called + mock_external_service_dependencies["storage"].save.assert_called_once() + + def test_upload_text_name_too_long(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test text upload with name that exceeds length limit. + """ + fake = Faker() + text = "test content" + long_name = "a" * 250 # Longer than 200 characters + + # Mock current_user + with patch("services.file_service.current_user") as mock_current_user: + mock_current_user.current_tenant_id = str(fake.uuid4()) + mock_current_user.id = str(fake.uuid4()) + + upload_file = FileService.upload_text(text=text, text_name=long_name) + + # Verify name was truncated + assert len(upload_file.name) <= 200 + assert upload_file.name == "a" * 200 + + # Test get_file_preview method + def test_get_file_preview_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful file preview generation. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have document extension + upload_file.extension = "pdf" + from extensions.ext_database import db + + db.session.commit() + + result = FileService.get_file_preview(file_id=upload_file.id) + + assert result == "extracted text content" + mock_external_service_dependencies["extract_processor"].load_from_upload_file.assert_called_once() + + def test_get_file_preview_file_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file preview with non-existent file. + """ + fake = Faker() + non_existent_id = str(fake.uuid4()) + + with pytest.raises(NotFound, match="File not found"): + FileService.get_file_preview(file_id=non_existent_id) + + def test_get_file_preview_unsupported_file_type( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file preview with unsupported file type. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have non-document extension + upload_file.extension = "jpg" + from extensions.ext_database import db + + db.session.commit() + + with pytest.raises(UnsupportedFileTypeError): + FileService.get_file_preview(file_id=upload_file.id) + + def test_get_file_preview_text_truncation(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file preview with text that exceeds preview limit. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have document extension + upload_file.extension = "pdf" + from extensions.ext_database import db + + db.session.commit() + + # Mock long text content + long_text = "x" * 5000 # Longer than PREVIEW_WORDS_LIMIT + mock_external_service_dependencies["extract_processor"].load_from_upload_file.return_value = long_text + + result = FileService.get_file_preview(file_id=upload_file.id) + + assert len(result) == 3000 # PREVIEW_WORDS_LIMIT + assert result == "x" * 3000 + + # Test get_image_preview method + def test_get_image_preview_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful image preview generation. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have image extension + upload_file.extension = "jpg" + from extensions.ext_database import db + + db.session.commit() + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "test_signature" + + generator, mime_type = FileService.get_image_preview( + file_id=upload_file.id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + assert generator is not None + assert mime_type == upload_file.mime_type + mock_external_service_dependencies["file_helpers"].verify_image_signature.assert_called_once() + + def test_get_image_preview_invalid_signature(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test image preview with invalid signature. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Mock invalid signature + mock_external_service_dependencies["file_helpers"].verify_image_signature.return_value = False + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "invalid_signature" + + with pytest.raises(NotFound, match="File not found or signature is invalid"): + FileService.get_image_preview( + file_id=upload_file.id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + def test_get_image_preview_file_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test image preview with non-existent file. + """ + fake = Faker() + non_existent_id = str(fake.uuid4()) + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "test_signature" + + with pytest.raises(NotFound, match="File not found or signature is invalid"): + FileService.get_image_preview( + file_id=non_existent_id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + def test_get_image_preview_unsupported_file_type( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test image preview with non-image file type. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have non-image extension + upload_file.extension = "pdf" + from extensions.ext_database import db + + db.session.commit() + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "test_signature" + + with pytest.raises(UnsupportedFileTypeError): + FileService.get_image_preview( + file_id=upload_file.id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + # Test get_file_generator_by_file_id method + def test_get_file_generator_by_file_id_success( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test successful file generator retrieval. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "test_signature" + + generator, file_obj = FileService.get_file_generator_by_file_id( + file_id=upload_file.id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + assert generator is not None + assert file_obj == upload_file + mock_external_service_dependencies["file_helpers"].verify_file_signature.assert_called_once() + + def test_get_file_generator_by_file_id_invalid_signature( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file generator retrieval with invalid signature. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Mock invalid signature + mock_external_service_dependencies["file_helpers"].verify_file_signature.return_value = False + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "invalid_signature" + + with pytest.raises(NotFound, match="File not found or signature is invalid"): + FileService.get_file_generator_by_file_id( + file_id=upload_file.id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + def test_get_file_generator_by_file_id_file_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file generator retrieval with non-existent file. + """ + fake = Faker() + non_existent_id = str(fake.uuid4()) + + timestamp = "1234567890" + nonce = "test_nonce" + sign = "test_signature" + + with pytest.raises(NotFound, match="File not found or signature is invalid"): + FileService.get_file_generator_by_file_id( + file_id=non_existent_id, + timestamp=timestamp, + nonce=nonce, + sign=sign, + ) + + # Test get_public_image_preview method + def test_get_public_image_preview_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful public image preview generation. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have image extension + upload_file.extension = "jpg" + from extensions.ext_database import db + + db.session.commit() + + generator, mime_type = FileService.get_public_image_preview(file_id=upload_file.id) + + assert generator is not None + assert mime_type == upload_file.mime_type + mock_external_service_dependencies["storage"].load.assert_called_once() + + def test_get_public_image_preview_file_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test public image preview with non-existent file. + """ + fake = Faker() + non_existent_id = str(fake.uuid4()) + + with pytest.raises(NotFound, match="File not found or signature is invalid"): + FileService.get_public_image_preview(file_id=non_existent_id) + + def test_get_public_image_preview_unsupported_file_type( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test public image preview with non-image file type. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + upload_file = self._create_test_upload_file( + db_session_with_containers, mock_external_service_dependencies, account + ) + + # Update file to have non-image extension + upload_file.extension = "pdf" + from extensions.ext_database import db + + db.session.commit() + + with pytest.raises(UnsupportedFileTypeError): + FileService.get_public_image_preview(file_id=upload_file.id) + + # Test edge cases and boundary conditions + def test_upload_file_empty_content(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file upload with empty content. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "empty.txt" + content = b"" + mimetype = "text/plain" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + assert upload_file is not None + assert upload_file.size == 0 + + def test_upload_file_special_characters_in_name( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file upload with special characters in filename (but valid ones). + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test-file_with_underscores_and.dots.txt" + content = b"test content" + mimetype = "text/plain" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + assert upload_file is not None + assert upload_file.name == filename + + def test_upload_file_different_case_extensions( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test file upload with different case extensions. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test.PDF" + content = b"test content" + mimetype = "application/pdf" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + ) + + assert upload_file is not None + assert upload_file.extension == "pdf" # Should be converted to lowercase + + def test_upload_text_empty_text(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test text upload with empty text. + """ + fake = Faker() + text = "" + text_name = "empty.txt" + + # Mock current_user + with patch("services.file_service.current_user") as mock_current_user: + mock_current_user.current_tenant_id = str(fake.uuid4()) + mock_current_user.id = str(fake.uuid4()) + + upload_file = FileService.upload_text(text=text, text_name=text_name) + + assert upload_file is not None + assert upload_file.size == 0 + + def test_file_size_limits_edge_cases(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file size limits with edge case values. + """ + # Test exactly at limit + for extension, limit_config in [ + ("jpg", dify_config.UPLOAD_IMAGE_FILE_SIZE_LIMIT), + ("mp4", dify_config.UPLOAD_VIDEO_FILE_SIZE_LIMIT), + ("mp3", dify_config.UPLOAD_AUDIO_FILE_SIZE_LIMIT), + ("pdf", dify_config.UPLOAD_FILE_SIZE_LIMIT), + ]: + file_size = limit_config * 1024 * 1024 + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + assert result is True + + # Test one byte over limit + file_size = limit_config * 1024 * 1024 + 1 + result = FileService.is_file_size_within_limit(extension=extension, file_size=file_size) + assert result is False + + def test_upload_file_with_source_url(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test file upload with source URL that gets overridden by signed URL. + """ + fake = Faker() + account = self._create_test_account(db_session_with_containers, mock_external_service_dependencies) + + filename = "test.pdf" + content = b"test content" + mimetype = "application/pdf" + source_url = "https://original-source.com/file.pdf" + + upload_file = FileService.upload_file( + filename=filename, + content=content, + mimetype=mimetype, + user=account, + source_url=source_url, + ) + + # When source_url is provided, it should be preserved + assert upload_file.source_url == source_url + + # The signed URL should only be set when source_url is empty + # Let's test that scenario + upload_file2 = FileService.upload_file( + filename="test2.pdf", + content=b"test content 2", + mimetype="application/pdf", + user=account, + source_url="", # Empty source_url + ) + + # Should have the signed URL when source_url is empty + assert upload_file2.source_url == "https://example.com/signed-url" From c7f36d1a5ab395d0b4480e536e00bee7b1138d7b Mon Sep 17 00:00:00 2001 From: cathy <38449456+CathyL0@users.noreply.github.com> Date: Tue, 12 Aug 2025 18:17:19 +0800 Subject: [PATCH 10/27] chore: goto anything mouse keyboard interaction (#23805) --- web/app/components/goto-anything/command-selector.tsx | 2 +- web/app/components/goto-anything/index.tsx | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/web/app/components/goto-anything/command-selector.tsx b/web/app/components/goto-anything/command-selector.tsx index d66a22eab7..c2a266969f 100644 --- a/web/app/components/goto-anything/command-selector.tsx +++ b/web/app/components/goto-anything/command-selector.tsx @@ -61,7 +61,7 @@ const CommandSelector: FC = ({ actions, onCommandSelect, searchFilter, co className="flex cursor-pointer items-center rounded-md p-2.5 transition-all - duration-150 hover:bg-state-base-hover aria-[selected=true]:bg-state-base-hover" + duration-150 hover:bg-state-base-hover-alt aria-[selected=true]:bg-state-base-hover" onSelect={() => onCommandSelect(action.shortcut)} > diff --git a/web/app/components/goto-anything/index.tsx b/web/app/components/goto-anything/index.tsx index 29789e11ed..bdd84f4f22 100644 --- a/web/app/components/goto-anything/index.tsx +++ b/web/app/components/goto-anything/index.tsx @@ -245,6 +245,7 @@ const GotoAnything: FC = ({ className='outline-none' value={cmdVal} onValueChange={setCmdVal} + disablePointerSelection >
@@ -322,7 +323,7 @@ const GotoAnything: FC = ({ handleNavigate(result)} > {result.icon} From 97693188758599133100fd2d4677e09ae41b72ac Mon Sep 17 00:00:00 2001 From: Yongtao Huang Date: Tue, 12 Aug 2025 23:27:31 +0800 Subject: [PATCH 11/27] Fix missing import in app.ts (#23831) --- web/models/app.ts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/web/models/app.ts b/web/models/app.ts index 5798670426..630dba9c19 100644 --- a/web/models/app.ts +++ b/web/models/app.ts @@ -1,9 +1,9 @@ -import type { AliyunConfig, LangFuseConfig, LangSmithConfig, OpikConfig, PhoenixConfig, TracingProvider, WeaveConfig } from '@/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/type' -import type { App, AppTemplate, SiteConfig } from '@/types/app' +import type { AliyunConfig, ArizeConfig, LangFuseConfig, LangSmithConfig, OpikConfig, PhoenixConfig, TracingProvider, WeaveConfig } from '@/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/type' +import type { App, AppMode, AppTemplate, SiteConfig } from '@/types/app' import type { Dependency } from '@/app/components/plugins/types' /* export type App = { - id: strin + id: string name: string description: string mode: AppMode From a77dfb69b0b1edacd13c5d60eca64a4097e9bf32 Mon Sep 17 00:00:00 2001 From: Bowen Liang Date: Tue, 12 Aug 2025 23:41:39 +0800 Subject: [PATCH 12/27] chore: update uv to 0.8.9 (#23833) --- .github/actions/setup-uv/action.yml | 4 ++-- api/Dockerfile | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/setup-uv/action.yml b/.github/actions/setup-uv/action.yml index 0499b44dba..6990f6becf 100644 --- a/.github/actions/setup-uv/action.yml +++ b/.github/actions/setup-uv/action.yml @@ -8,7 +8,7 @@ inputs: uv-version: description: UV version to set up required: true - default: '~=0.7.11' + default: '0.8.9' uv-lockfile: description: Path to the UV lockfile to restore cache from required: true @@ -26,7 +26,7 @@ runs: python-version: ${{ inputs.python-version }} - name: Install uv - uses: astral-sh/setup-uv@v5 + uses: astral-sh/setup-uv@v6 with: version: ${{ inputs.uv-version }} python-version: ${{ inputs.python-version }} diff --git a/api/Dockerfile b/api/Dockerfile index d69291f7ea..79a4892768 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -4,7 +4,7 @@ FROM python:3.12-slim-bookworm AS base WORKDIR /app/api # Install uv -ENV UV_VERSION=0.7.11 +ENV UV_VERSION=0.8.9 RUN pip install --no-cache-dir uv==${UV_VERSION} From 973a390298ce2ff5038a8a8a82ce198f3e42bdaf Mon Sep 17 00:00:00 2001 From: GuanMu Date: Tue, 12 Aug 2025 23:47:50 +0800 Subject: [PATCH 13/27] Feature/run cmd (#23822) --- .../goto-anything/actions/command-bus.ts | 26 +++++ .../components/goto-anything/actions/index.ts | 2 + .../goto-anything/actions/run-language.tsx | 33 +++++++ .../goto-anything/actions/run-theme.tsx | 61 ++++++++++++ .../components/goto-anything/actions/run.tsx | 97 +++++++++++++++++++ .../components/goto-anything/actions/types.ts | 10 +- .../goto-anything/command-selector.tsx | 1 + web/app/components/goto-anything/index.tsx | 17 +++- web/i18n/en-US/app.ts | 13 +++ web/i18n/zh-Hans/app.ts | 13 +++ 10 files changed, 267 insertions(+), 6 deletions(-) create mode 100644 web/app/components/goto-anything/actions/command-bus.ts create mode 100644 web/app/components/goto-anything/actions/run-language.tsx create mode 100644 web/app/components/goto-anything/actions/run-theme.tsx create mode 100644 web/app/components/goto-anything/actions/run.tsx diff --git a/web/app/components/goto-anything/actions/command-bus.ts b/web/app/components/goto-anything/actions/command-bus.ts new file mode 100644 index 0000000000..6ff7e81a23 --- /dev/null +++ b/web/app/components/goto-anything/actions/command-bus.ts @@ -0,0 +1,26 @@ +export type CommandHandler = (args?: Record) => void | Promise + +const handlers = new Map() + +export const registerCommand = (name: string, handler: CommandHandler) => { + handlers.set(name, handler) +} + +export const unregisterCommand = (name: string) => { + handlers.delete(name) +} + +export const executeCommand = async (name: string, args?: Record) => { + const handler = handlers.get(name) + if (!handler) + return + await handler(args) +} + +export const registerCommands = (map: Record) => { + Object.entries(map).forEach(([name, handler]) => registerCommand(name, handler)) +} + +export const unregisterCommands = (names: string[]) => { + names.forEach(unregisterCommand) +} diff --git a/web/app/components/goto-anything/actions/index.ts b/web/app/components/goto-anything/actions/index.ts index 87369784a1..0f25194dbe 100644 --- a/web/app/components/goto-anything/actions/index.ts +++ b/web/app/components/goto-anything/actions/index.ts @@ -3,11 +3,13 @@ import { knowledgeAction } from './knowledge' import { pluginAction } from './plugin' import { workflowNodesAction } from './workflow-nodes' import type { ActionItem, SearchResult } from './types' +import { commandAction } from './run' export const Actions = { app: appAction, knowledge: knowledgeAction, plugin: pluginAction, + run: commandAction, node: workflowNodesAction, } diff --git a/web/app/components/goto-anything/actions/run-language.tsx b/web/app/components/goto-anything/actions/run-language.tsx new file mode 100644 index 0000000000..0076fec0a0 --- /dev/null +++ b/web/app/components/goto-anything/actions/run-language.tsx @@ -0,0 +1,33 @@ +import type { CommandSearchResult } from './types' +import { languages } from '@/i18n-config/language' +import { RiTranslate } from '@remixicon/react' +import i18n from '@/i18n-config/i18next-config' + +export const buildLanguageCommands = (query: string): CommandSearchResult[] => { + const q = query.toLowerCase() + const list = languages.filter(item => item.supported && ( + !q || item.name.toLowerCase().includes(q) || String(item.value).toLowerCase().includes(q) + )) + return list.map(item => ({ + id: `lang-${item.value}`, + title: item.name, + description: i18n.t('app.gotoAnything.actions.languageChangeDesc'), + type: 'command' as const, + data: { command: 'i18n.set', args: { locale: item.value } }, + })) +} + +export const buildLanguageRootItem = (): CommandSearchResult => { + return { + id: 'category-language', + title: i18n.t('app.gotoAnything.actions.languageCategoryTitle'), + description: i18n.t('app.gotoAnything.actions.languageCategoryDesc'), + type: 'command', + icon: ( +
+ +
+ ), + data: { command: 'nav.search', args: { query: '@run language ' } }, + } +} diff --git a/web/app/components/goto-anything/actions/run-theme.tsx b/web/app/components/goto-anything/actions/run-theme.tsx new file mode 100644 index 0000000000..9f72844ee9 --- /dev/null +++ b/web/app/components/goto-anything/actions/run-theme.tsx @@ -0,0 +1,61 @@ +import type { CommandSearchResult } from './types' +import type { ReactNode } from 'react' +import { RiComputerLine, RiMoonLine, RiPaletteLine, RiSunLine } from '@remixicon/react' +import i18n from '@/i18n-config/i18next-config' + +const THEME_ITEMS: { id: 'light' | 'dark' | 'system'; titleKey: string; descKey: string; icon: ReactNode }[] = [ + { + id: 'system', + titleKey: 'app.gotoAnything.actions.themeSystem', + descKey: 'app.gotoAnything.actions.themeSystemDesc', + icon: , + }, + { + id: 'light', + titleKey: 'app.gotoAnything.actions.themeLight', + descKey: 'app.gotoAnything.actions.themeLightDesc', + icon: , + }, + { + id: 'dark', + titleKey: 'app.gotoAnything.actions.themeDark', + descKey: 'app.gotoAnything.actions.themeDarkDesc', + icon: , + }, +] + +export const buildThemeCommands = (query: string, locale?: string): CommandSearchResult[] => { + const q = query.toLowerCase() + const list = THEME_ITEMS.filter(item => + !q + || i18n.t(item.titleKey, { lng: locale }).toLowerCase().includes(q) + || item.id.includes(q), + ) + return list.map(item => ({ + id: item.id, + title: i18n.t(item.titleKey, { lng: locale }), + description: i18n.t(item.descKey, { lng: locale }), + type: 'command' as const, + icon: ( +
+ {item.icon} +
+ ), + data: { command: 'theme.set', args: { value: item.id } }, + })) +} + +export const buildThemeRootItem = (): CommandSearchResult => { + return { + id: 'category-theme', + title: i18n.t('app.gotoAnything.actions.themeCategoryTitle'), + description: i18n.t('app.gotoAnything.actions.themeCategoryDesc'), + type: 'command', + icon: ( +
+ +
+ ), + data: { command: 'nav.search', args: { query: '@run theme ' } }, + } +} diff --git a/web/app/components/goto-anything/actions/run.tsx b/web/app/components/goto-anything/actions/run.tsx new file mode 100644 index 0000000000..624cf942d5 --- /dev/null +++ b/web/app/components/goto-anything/actions/run.tsx @@ -0,0 +1,97 @@ +'use client' +import { useEffect } from 'react' +import type { ActionItem, CommandSearchResult } from './types' +import { buildLanguageCommands, buildLanguageRootItem } from './run-language' +import { buildThemeCommands, buildThemeRootItem } from './run-theme' +import i18n from '@/i18n-config/i18next-config' +import { executeCommand, registerCommands, unregisterCommands } from './command-bus' +import { useTheme } from 'next-themes' +import { setLocaleOnClient } from '@/i18n-config' + +const rootParser = (query: string): CommandSearchResult[] => { + const q = query.toLowerCase() + const items: CommandSearchResult[] = [] + if (!q || 'theme'.includes(q)) + items.push(buildThemeRootItem()) + if (!q || 'language'.includes(q) || 'lang'.includes(q)) + items.push(buildLanguageRootItem()) + return items +} + +type RunContext = { + setTheme?: (value: 'light' | 'dark' | 'system') => void + setLocale?: (locale: string) => Promise + search?: (query: string) => void +} + +export const commandAction: ActionItem = { + key: '@run', + shortcut: '@run', + title: i18n.t('app.gotoAnything.actions.runTitle'), + description: i18n.t('app.gotoAnything.actions.runDesc'), + action: (result) => { + if (result.type !== 'command') return + const { command, args } = result.data + if (command === 'theme.set') { + executeCommand('theme.set', args) + return + } + if (command === 'i18n.set') { + executeCommand('i18n.set', args) + return + } + if (command === 'nav.search') + executeCommand('nav.search', args) + }, + search: async (_, searchTerm = '') => { + const q = searchTerm.trim() + if (q.startsWith('theme')) + return buildThemeCommands(q.replace(/^theme\s*/, ''), i18n.language) + if (q.startsWith('language') || q.startsWith('lang')) + return buildLanguageCommands(q.replace(/^(language|lang)\s*/, '')) + + // root categories + return rootParser(q) + }, +} + +// Register/unregister default handlers for @run commands with external dependencies. +export const registerRunCommands = (deps: { + setTheme?: (value: 'light' | 'dark' | 'system') => void + setLocale?: (locale: string) => Promise + search?: (query: string) => void +}) => { + registerCommands({ + 'theme.set': async (args) => { + deps.setTheme?.(args?.value) + }, + 'i18n.set': async (args) => { + const locale = args?.locale + if (locale) + await deps.setLocale?.(locale) + }, + 'nav.search': (args) => { + const q = args?.query + if (q) + deps.search?.(q) + }, + }) +} + +export const unregisterRunCommands = () => { + unregisterCommands(['theme.set', 'i18n.set', 'nav.search']) +} + +export const RunCommandProvider = ({ onNavSearch }: { onNavSearch?: (q: string) => void }) => { + const theme = useTheme() + useEffect(() => { + registerRunCommands({ + setTheme: theme.setTheme, + setLocale: setLocaleOnClient, + search: onNavSearch, + }) + return () => unregisterRunCommands() + }, [theme.setTheme, onNavSearch]) + + return null +} diff --git a/web/app/components/goto-anything/actions/types.ts b/web/app/components/goto-anything/actions/types.ts index 7a838737cb..a95e28eecc 100644 --- a/web/app/components/goto-anything/actions/types.ts +++ b/web/app/components/goto-anything/actions/types.ts @@ -5,7 +5,7 @@ import type { Plugin } from '../../plugins/types' import type { DataSet } from '@/models/datasets' import type { CommonNodeType } from '../../workflow/types' -export type SearchResultType = 'app' | 'knowledge' | 'plugin' | 'workflow-node' +export type SearchResultType = 'app' | 'knowledge' | 'plugin' | 'workflow-node' | 'command' export type BaseSearchResult = { id: string @@ -37,10 +37,14 @@ export type WorkflowNodeSearchResult = { } } & BaseSearchResult -export type SearchResult = AppSearchResult | PluginSearchResult | KnowledgeSearchResult | WorkflowNodeSearchResult +export type CommandSearchResult = { + type: 'command' +} & BaseSearchResult<{ command: string; args?: Record }> + +export type SearchResult = AppSearchResult | PluginSearchResult | KnowledgeSearchResult | WorkflowNodeSearchResult | CommandSearchResult export type ActionItem = { - key: '@app' | '@knowledge' | '@plugin' | '@node' + key: '@app' | '@knowledge' | '@plugin' | '@node' | '@run' shortcut: string title: string | TypeWithI18N description: string diff --git a/web/app/components/goto-anything/command-selector.tsx b/web/app/components/goto-anything/command-selector.tsx index c2a266969f..2b62c92a59 100644 --- a/web/app/components/goto-anything/command-selector.tsx +++ b/web/app/components/goto-anything/command-selector.tsx @@ -73,6 +73,7 @@ const CommandSelector: FC = ({ actions, onCommandSelect, searchFilter, co '@app': 'app.gotoAnything.actions.searchApplicationsDesc', '@plugin': 'app.gotoAnything.actions.searchPluginsDesc', '@knowledge': 'app.gotoAnything.actions.searchKnowledgeBasesDesc', + '@run': 'app.gotoAnything.actions.runDesc', '@node': 'app.gotoAnything.actions.searchWorkflowNodesDesc', } return t(keyMap[action.key]) diff --git a/web/app/components/goto-anything/index.tsx b/web/app/components/goto-anything/index.tsx index bdd84f4f22..2d2d56eea1 100644 --- a/web/app/components/goto-anything/index.tsx +++ b/web/app/components/goto-anything/index.tsx @@ -18,6 +18,7 @@ import InstallFromMarketplace from '../plugins/install-plugin/install-from-marke import type { Plugin } from '../plugins/types' import { Command } from 'cmdk' import CommandSelector from './command-selector' +import { RunCommandProvider } from './actions/run' type Props = { onHide?: () => void @@ -33,7 +34,11 @@ const GotoAnything: FC = ({ const [searchQuery, setSearchQuery] = useState('') const [cmdVal, setCmdVal] = useState('') const inputRef = useRef(null) - + const handleNavSearch = useCallback((q: string) => { + setShow(true) + setSearchQuery(q) + requestAnimationFrame(() => inputRef.current?.focus()) + }, []) // Filter actions based on context const Actions = useMemo(() => { // Create a filtered copy of actions based on current page context @@ -43,8 +48,8 @@ const GotoAnything: FC = ({ } else { // Exclude node action on non-workflow pages - const { app, knowledge, plugin } = AllActions - return { app, knowledge, plugin } + const { app, knowledge, plugin, run } = AllActions + return { app, knowledge, plugin, run } } }, [isWorkflowPage]) @@ -128,6 +133,11 @@ const GotoAnything: FC = ({ setSearchQuery('') switch (result.type) { + case 'command': { + const action = Object.values(Actions).find(a => a.key === '@run') + action?.action?.(result) + break + } case 'plugin': setActivePlugin(result.data) break @@ -381,6 +391,7 @@ const GotoAnything: FC = ({
+ { activePlugin && ( Date: Tue, 12 Aug 2025 23:56:50 +0800 Subject: [PATCH 14/27] chore: translate i18n files (#23841) Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> --- web/i18n/de-DE/app.ts | 13 +++++++++++++ web/i18n/es-ES/app.ts | 13 +++++++++++++ web/i18n/fa-IR/app.ts | 13 +++++++++++++ web/i18n/fr-FR/app.ts | 13 +++++++++++++ web/i18n/hi-IN/app.ts | 13 +++++++++++++ web/i18n/it-IT/app.ts | 13 +++++++++++++ web/i18n/ja-JP/app.ts | 13 +++++++++++++ web/i18n/ko-KR/app.ts | 13 +++++++++++++ web/i18n/pl-PL/app.ts | 13 +++++++++++++ web/i18n/pt-BR/app.ts | 13 +++++++++++++ web/i18n/ro-RO/app.ts | 13 +++++++++++++ web/i18n/ru-RU/app.ts | 13 +++++++++++++ web/i18n/sl-SI/app.ts | 13 +++++++++++++ web/i18n/th-TH/app.ts | 13 +++++++++++++ web/i18n/tr-TR/app.ts | 13 +++++++++++++ web/i18n/uk-UA/app.ts | 13 +++++++++++++ web/i18n/vi-VN/app.ts | 13 +++++++++++++ web/i18n/zh-Hant/app.ts | 13 +++++++++++++ 18 files changed, 234 insertions(+) diff --git a/web/i18n/de-DE/app.ts b/web/i18n/de-DE/app.ts index bead4e68c8..30e94a3c42 100644 --- a/web/i18n/de-DE/app.ts +++ b/web/i18n/de-DE/app.ts @@ -265,6 +265,19 @@ const translation = { searchApplicationsDesc: 'Suchen und navigieren Sie zu Ihren Anwendungen', searchPluginsDesc: 'Suchen und navigieren Sie zu Ihren Plugins', searchWorkflowNodesDesc: 'Suchen und Springen zu Knoten im aktuellen Workflow nach Name oder Typ', + themeCategoryDesc: 'Anwendungsthema wechseln', + themeSystemDesc: 'Folgen Sie dem Aussehen Ihres Betriebssystems', + themeLight: 'Helles Design', + runTitle: 'Befehle', + languageCategoryTitle: 'Sprache', + themeDark: 'Dunkles Thema', + languageChangeDesc: 'UI-Sprache ändern', + languageCategoryDesc: 'Wechseln Sie die Schnittstellensprache', + themeSystem: 'Systemthema', + themeLightDesc: 'Verwenden Sie das helle Erscheinungsbild', + themeDarkDesc: 'Verwenden Sie das dunkle Erscheinungsbild', + runDesc: 'Führen Sie schnelle Befehle aus (Thema, Sprache, ...)', + themeCategoryTitle: 'Thema', }, emptyState: { noPluginsFound: 'Keine Plugins gefunden', diff --git a/web/i18n/es-ES/app.ts b/web/i18n/es-ES/app.ts index 24fa7680f4..cf88462d34 100644 --- a/web/i18n/es-ES/app.ts +++ b/web/i18n/es-ES/app.ts @@ -263,6 +263,19 @@ const translation = { searchApplicationsDesc: 'Buscar y navegar a sus aplicaciones', searchPluginsDesc: 'Busca y navega a tus plugins', searchWorkflowNodesHelp: 'Esta función solo funciona cuando se visualiza un flujo de trabajo. Primero vaya a un flujo de trabajo.', + languageCategoryTitle: 'Idioma', + runDesc: 'Ejecutar comandos rápidos (tema, idioma, ...)', + runTitle: 'Comandos', + themeSystem: 'Tema del sistema', + themeDark: 'Tema oscuro', + themeLight: 'Tema Claro', + themeCategoryTitle: 'Tema', + themeLightDesc: 'Usar apariencia clara', + themeSystemDesc: 'Sigue la apariencia de tu sistema operativo', + languageChangeDesc: 'Cambiar el idioma de la interfaz', + themeDarkDesc: 'Usar apariencia oscura', + languageCategoryDesc: 'Cambiar el idioma de la interfaz', + themeCategoryDesc: 'Cambiar el tema de la aplicación', }, emptyState: { noAppsFound: 'No se encontraron aplicaciones', diff --git a/web/i18n/fa-IR/app.ts b/web/i18n/fa-IR/app.ts index 9c470273a6..dd9bda3223 100644 --- a/web/i18n/fa-IR/app.ts +++ b/web/i18n/fa-IR/app.ts @@ -263,6 +263,19 @@ const translation = { searchKnowledgeBasesDesc: 'پایگاه های دانش خود را جستجو کرده و به آن ناوبری کنید', searchPluginsDesc: 'افزونه های خود را جستجو کرده و به آنها پیمایش کنید', searchWorkflowNodesDesc: 'گره ها را در گردش کار فعلی بر اساس نام یا نوع پیدا کنید و به آنها بروید', + themeCategoryTitle: 'تم', + languageCategoryTitle: 'زبان', + runTitle: 'دستورات', + themeSystem: 'تم سیستم', + themeLight: 'تم روشن', + themeDark: 'تم تاریک', + languageCategoryDesc: 'زبان رابط را تغییر دهید', + themeCategoryDesc: 'تغییر تم برنامه', + themeDarkDesc: 'از ظاهر تیره استفاده کنید', + themeLightDesc: 'از ظاهر روشن استفاده کنید', + languageChangeDesc: 'زبان رابط کاربری را تغییر دهید', + themeSystemDesc: 'به ظاهر سیستم‌عامل خود پایبند باشید', + runDesc: 'دستورات سریع اجرا کنید (موضوع، زبان، ...)', }, emptyState: { noKnowledgeBasesFound: 'هیچ پایگاه دانش یافت نشد', diff --git a/web/i18n/fr-FR/app.ts b/web/i18n/fr-FR/app.ts index d0aab89918..e4817a6721 100644 --- a/web/i18n/fr-FR/app.ts +++ b/web/i18n/fr-FR/app.ts @@ -263,6 +263,19 @@ const translation = { searchKnowledgeBases: 'Rechercher dans les bases de connaissances', searchApplications: 'Rechercher des applications', searchWorkflowNodesHelp: 'Cette fonctionnalité ne fonctionne que lors de l\'affichage d\'un flux de travail. Accédez d\'abord à un flux de travail.', + runTitle: 'Commandes', + languageCategoryTitle: 'Langue', + themeSystem: 'Thème du système', + themeDark: 'Thème Sombre', + themeCategoryTitle: 'Thème', + themeLight: 'Thème clair', + themeCategoryDesc: 'Changer le thème de l\'application', + themeLightDesc: 'Utiliser une apparence légère', + languageChangeDesc: 'Changer la langue de l\'interface', + themeDarkDesc: 'Utiliser l\'apparence sombre', + themeSystemDesc: 'Suivez l\'apparence de votre système d\'exploitation', + languageCategoryDesc: 'Changer la langue de l\'interface', + runDesc: 'Exécuter des commandes rapides (thème, langue, ...)', }, emptyState: { noKnowledgeBasesFound: 'Aucune base de connaissances trouvée', diff --git a/web/i18n/hi-IN/app.ts b/web/i18n/hi-IN/app.ts index fc799f335c..bcdaa6002a 100644 --- a/web/i18n/hi-IN/app.ts +++ b/web/i18n/hi-IN/app.ts @@ -263,6 +263,19 @@ const translation = { searchKnowledgeBasesDesc: 'अपने ज्ञान आधारों की खोज करें और उन्हें नेविगेट करें', searchApplicationsDesc: 'अपने अनुप्रयोगों की खोज करें और उन्हें नेविगेट करें', searchWorkflowNodesHelp: 'यह सुविधा केवल तब काम करती है जब आप एक कार्यप्रवाह देख रहे हों। पहले एक कार्यप्रवाह पर जाएं।', + themeCategoryTitle: 'थीम', + runTitle: 'आदेश', + languageCategoryTitle: 'भाषा', + languageCategoryDesc: 'इंटरफेस भाषा बदलें', + themeSystem: 'सिस्टम थीम', + themeLight: 'लाइट थीम', + themeDarkDesc: 'अंधेरे रूप का उपयोग करें', + themeDark: 'डार्क थीम', + themeLightDesc: 'हल्की उपस्थिति का प्रयोग करें', + languageChangeDesc: 'यूआई भाषा बदलें', + themeCategoryDesc: 'ऐप्लिकेशन थीम बदलें', + themeSystemDesc: 'अपने ऑपरेटिंग सिस्टम की उपस्थिति का पालन करें', + runDesc: 'त्वरित आदेश चलाएँ (थीम, भाषा, ...)', }, emptyState: { noPluginsFound: 'कोई प्लगइन नहीं मिले', diff --git a/web/i18n/it-IT/app.ts b/web/i18n/it-IT/app.ts index f4a2c6138c..e7ed57c4a4 100644 --- a/web/i18n/it-IT/app.ts +++ b/web/i18n/it-IT/app.ts @@ -269,6 +269,19 @@ const translation = { searchPlugins: 'Plugin di ricerca', searchWorkflowNodesDesc: 'Trovare e passare ai nodi nel flusso di lavoro corrente in base al nome o al tipo', searchKnowledgeBases: 'Cerca nelle Basi di Conoscenza', + themeCategoryTitle: 'Tema', + themeDarkDesc: 'Usa l\'aspetto scuro', + languageCategoryTitle: 'Lingua', + themeLight: 'Tema Chiaro', + themeSystem: 'Tema di sistema', + runTitle: 'Comandi', + themeDark: 'Tema scuro', + themeLightDesc: 'Usa un aspetto chiaro', + themeCategoryDesc: 'Cambia tema dell\'applicazione', + languageCategoryDesc: 'Cambia lingua dell\'interfaccia', + languageChangeDesc: 'Cambia lingua dell\'interfaccia', + runDesc: 'Esegui comandi rapidi (tema, lingua, ...)', + themeSystemDesc: 'Segui l\'aspetto del tuo sistema operativo', }, emptyState: { noKnowledgeBasesFound: 'Nessuna base di conoscenza trovata', diff --git a/web/i18n/ja-JP/app.ts b/web/i18n/ja-JP/app.ts index ae72f188b1..c13fa90a0e 100644 --- a/web/i18n/ja-JP/app.ts +++ b/web/i18n/ja-JP/app.ts @@ -278,6 +278,19 @@ const translation = { searchWorkflowNodes: 'ワークフローノードを検索', searchWorkflowNodesDesc: '現在のワークフロー内のノードを名前またはタイプで検索してジャンプ', searchWorkflowNodesHelp: 'この機能はワークフロー表示時のみ利用できます。まずワークフローに移動してください。', + themeCategoryTitle: 'テーマ', + runTitle: 'コマンド', + languageCategoryTitle: '言語', + themeLightDesc: '明るい外観を使用する', + themeSystemDesc: 'OSの外観に従ってください', + themeLight: 'ライトテーマ', + themeDark: 'ダークテーマ', + languageChangeDesc: 'UI言語を変更する', + themeDarkDesc: 'ダークモードを使用する', + themeSystem: 'システムテーマ', + languageCategoryDesc: 'インターフェース言語を切り替える', + themeCategoryDesc: 'アプリケーションのテーマを切り替える', + runDesc: 'クイックコマンドを実行する(テーマ、言語、...)', }, emptyState: { noAppsFound: 'アプリが見つかりません', diff --git a/web/i18n/ko-KR/app.ts b/web/i18n/ko-KR/app.ts index 4b246200a0..741b97e0c7 100644 --- a/web/i18n/ko-KR/app.ts +++ b/web/i18n/ko-KR/app.ts @@ -283,6 +283,19 @@ const translation = { searchKnowledgeBasesDesc: '기술 자료를 검색하고 탐색합니다.', searchWorkflowNodesHelp: '이 기능은 워크플로를 볼 때만 작동합니다. 먼저 워크플로로 이동합니다.', searchKnowledgeBases: '기술 자료 검색', + themeCategoryTitle: '주제', + themeSystem: '시스템 테마', + themeDark: '어두운 테마', + languageChangeDesc: 'UI 언어 변경', + languageCategoryTitle: '언어', + runTitle: '명령어', + themeLight: '라이트 테마', + themeDarkDesc: '어두운 모양 사용', + themeLightDesc: '밝은 외관 사용', + themeCategoryDesc: '애플리케이션 테마 전환', + languageCategoryDesc: '인터페이스 언어 전환', + runDesc: '빠른 명령 실행 (테마, 언어 등...)', + themeSystemDesc: '운영 체제의 외관을 따르세요', }, emptyState: { noAppsFound: '앱을 찾을 수 없습니다.', diff --git a/web/i18n/pl-PL/app.ts b/web/i18n/pl-PL/app.ts index da8bee7ea7..dec9dc475f 100644 --- a/web/i18n/pl-PL/app.ts +++ b/web/i18n/pl-PL/app.ts @@ -264,6 +264,19 @@ const translation = { searchWorkflowNodesDesc: 'Znajdowanie węzłów w bieżącym przepływie pracy i przechodzenie do nich według nazwy lub typu', searchKnowledgeBases: 'Szukaj w bazach wiedzy', searchWorkflowNodes: 'Wyszukiwanie węzłów przepływu pracy', + themeSystem: 'Motyw systemu', + themeCategoryTitle: 'Temat', + languageCategoryTitle: 'Język', + themeDark: 'Ciemny motyw', + runTitle: 'Polecenia', + themeLight: 'Jasny motyw', + themeCategoryDesc: 'Zmień motyw aplikacji', + languageCategoryDesc: 'Zmień język interfejsu', + themeDarkDesc: 'Użyj ciemnego wyglądu', + themeLightDesc: 'Użyj jasnego wyglądu', + languageChangeDesc: 'Zmień język interfejsu', + themeSystemDesc: 'Podążaj za wyglądem swojego systemu operacyjnego', + runDesc: 'Uruchom szybkie polecenia (motyw, język, ...)', }, emptyState: { noAppsFound: 'Nie znaleziono aplikacji', diff --git a/web/i18n/pt-BR/app.ts b/web/i18n/pt-BR/app.ts index 66e44f7916..9276f58129 100644 --- a/web/i18n/pt-BR/app.ts +++ b/web/i18n/pt-BR/app.ts @@ -263,6 +263,19 @@ const translation = { searchWorkflowNodesHelp: 'Esse recurso só funciona ao visualizar um fluxo de trabalho. Navegue até um fluxo de trabalho primeiro.', searchKnowledgeBasesDesc: 'Pesquise e navegue até suas bases de conhecimento', searchWorkflowNodes: 'Nós de fluxo de trabalho de pesquisa', + themeDarkDesc: 'Use aparência escura', + themeCategoryDesc: 'Mudar o tema do aplicativo', + themeLight: 'Tema Claro', + runDesc: 'Execute comandos rápidos (tema, idioma, ...)', + themeCategoryTitle: 'Tema', + runTitle: 'Comandos', + languageCategoryTitle: 'Idioma', + themeSystem: 'Tema do Sistema', + languageChangeDesc: 'Mudar o idioma da interface', + themeDark: 'Tema Escuro', + themeLightDesc: 'Use aparência clara', + themeSystemDesc: 'Siga a aparência do seu sistema operacional', + languageCategoryDesc: 'Mudar o idioma da interface', }, emptyState: { noAppsFound: 'Nenhum aplicativo encontrado', diff --git a/web/i18n/ro-RO/app.ts b/web/i18n/ro-RO/app.ts index 0d58b87ae5..59eeaee6a2 100644 --- a/web/i18n/ro-RO/app.ts +++ b/web/i18n/ro-RO/app.ts @@ -263,6 +263,19 @@ const translation = { searchWorkflowNodesDesc: 'Găsiți și treceți la nodurile din fluxul de lucru curent după nume sau tip', searchWorkflowNodesHelp: 'Această caracteristică funcționează numai atunci când vizualizați un flux de lucru. Navigați mai întâi la un flux de lucru.', searchPlugins: 'Căutare plugin-uri', + languageChangeDesc: 'Schimbați limba interfeței', + runTitle: 'Comenzi', + runDesc: 'Rule comenzi rapide (temă, limbă, ...)', + themeDark: 'Temă întunecată', + themeLightDesc: 'Folosește aspectul luminos', + themeCategoryTitle: 'Temă', + languageCategoryTitle: 'Limba', + themeDarkDesc: 'Folosește aspectul întunecat', + themeLight: 'Temă deschisă', + themeSystem: 'Tema sistemului', + themeCategoryDesc: 'Schimbă tema aplicației', + languageCategoryDesc: 'Schimbați limba interfeței', + themeSystemDesc: 'Urmăriți aspectul sistemului de operare', }, emptyState: { noAppsFound: 'Nu s-au găsit aplicații', diff --git a/web/i18n/ru-RU/app.ts b/web/i18n/ru-RU/app.ts index c00b805c00..ed98b94f03 100644 --- a/web/i18n/ru-RU/app.ts +++ b/web/i18n/ru-RU/app.ts @@ -263,6 +263,19 @@ const translation = { searchApplicationsDesc: 'Поиск и переход к приложениям', searchWorkflowNodesHelp: 'Эта функция работает только при просмотре рабочего процесса. Сначала перейдите к рабочему процессу.', searchWorkflowNodesDesc: 'Поиск узлов в текущем рабочем процессе и переход к ним по имени или типу', + themeCategoryDesc: 'Переключить тему приложения', + runTitle: 'Команды', + themeDark: 'Темная тема', + themeCategoryTitle: 'Тема', + languageCategoryTitle: 'Язык', + themeSystem: 'Системная тема', + runDesc: 'Запустите быстрые команды (тема, язык, ...)', + themeLight: 'Светлая тема', + themeDarkDesc: 'Используйте темный внешний вид', + languageChangeDesc: 'Изменить язык интерфейса', + languageCategoryDesc: 'Переключить язык интерфейса', + themeLightDesc: 'Используйте светлый внешний вид', + themeSystemDesc: 'Следуйте внешнему виду вашей операционной системы', }, emptyState: { noPluginsFound: 'Плагины не найдены', diff --git a/web/i18n/sl-SI/app.ts b/web/i18n/sl-SI/app.ts index 49effb6b68..c4a275999d 100644 --- a/web/i18n/sl-SI/app.ts +++ b/web/i18n/sl-SI/app.ts @@ -263,6 +263,19 @@ const translation = { searchWorkflowNodesDesc: 'Iskanje vozlišč in skok nanje v trenutnem poteku dela po imenu ali vrsti', searchKnowledgeBases: 'Iskanje po zbirkah znanja', searchPluginsDesc: 'Iskanje in krmarjenje do vtičnikov', + themeCategoryTitle: 'Tema', + themeLight: 'Svetla tematika', + runTitle: 'Ukazi', + themeSystem: 'Sistem tema', + themeDarkDesc: 'Uporabite temen način', + themeLightDesc: 'Uporabite svetlo prikazovanje', + themeCategoryDesc: 'Preklopi temo aplikacije', + themeDark: 'Temna tema', + languageCategoryDesc: 'Preklopi jezik vmesnika', + languageCategoryTitle: 'Jezik', + themeSystemDesc: 'Sledite videzu svojega operacijskega sistema', + runDesc: 'Zaženi hitre ukaze (teme, jezik, ...)', + languageChangeDesc: 'Spremeni jezik vmesnika', }, emptyState: { noPluginsFound: 'Vtičnikov ni mogoče najti', diff --git a/web/i18n/th-TH/app.ts b/web/i18n/th-TH/app.ts index fd7fc15f19..ee0e53895e 100644 --- a/web/i18n/th-TH/app.ts +++ b/web/i18n/th-TH/app.ts @@ -259,6 +259,19 @@ const translation = { searchApplicationsDesc: 'ค้นหาและนําทางไปยังแอปพลิเคชันของคุณ', searchWorkflowNodesHelp: 'คุณลักษณะนี้ใช้ได้เฉพาะเมื่อดูเวิร์กโฟลว์เท่านั้น นําทางไปยังเวิร์กโฟลว์ก่อน', searchWorkflowNodesDesc: 'ค้นหาและข้ามไปยังโหนดในเวิร์กโฟลว์ปัจจุบันตามชื่อหรือประเภท', + themeCategoryTitle: 'ธีม', + languageCategoryTitle: 'ภาษา', + runTitle: 'คำสั่ง', + themeDark: 'ธีมมืด', + languageChangeDesc: 'เปลี่ยนภาษา UI', + themeSystem: 'ธีมระบบ', + themeLight: 'ธีมสว่าง', + runDesc: 'เรียกใช้คำสั่งอย่างรวดเร็ว (ธีม, ภาษา, ... )', + themeDarkDesc: 'ใช้รูปลักษณ์เข้ม', + themeCategoryDesc: 'เปลี่ยนธีมแอปพลิเคชัน', + languageCategoryDesc: 'เปลี่ยนภาษาของอินเทอร์เฟซ', + themeLightDesc: 'ใช้รูปลักษณ์ที่มีความสว่าง', + themeSystemDesc: 'ติดตามรูปลักษณ์ของระบบปฏิบัติการของคุณ', }, emptyState: { noPluginsFound: 'ไม่พบปลั๊กอิน', diff --git a/web/i18n/tr-TR/app.ts b/web/i18n/tr-TR/app.ts index b961054728..d0ac18d3cd 100644 --- a/web/i18n/tr-TR/app.ts +++ b/web/i18n/tr-TR/app.ts @@ -259,6 +259,19 @@ const translation = { searchPlugins: 'Arama Eklentileri', searchWorkflowNodesHelp: 'Bu özellik yalnızca bir iş akışını görüntülerken çalışır. Önce bir iş akışına gidin.', searchApplicationsDesc: 'Uygulamalarınızı arayın ve uygulamalarınıza gidin', + languageChangeDesc: 'UI dilini değiştir', + themeSystem: 'Sistem Teması', + runTitle: 'Komutlar', + themeLightDesc: 'Aydınlık görünüm kullan', + themeSystemDesc: 'İşletim sisteminizin görünümünü takip edin', + languageCategoryTitle: 'Dil', + themeCategoryTitle: 'Tema', + themeLight: 'Aydınlık Tema', + themeDark: 'Karanlık Tema', + languageCategoryDesc: 'Arayüz dilini değiştir', + themeDarkDesc: 'Koyu görünümü kullan', + themeCategoryDesc: 'Uygulama temasını değiştir', + runDesc: 'Hızlı komutlar çalıştır (tema, dil, ...)', }, emptyState: { noAppsFound: 'Uygulama bulunamadı', diff --git a/web/i18n/uk-UA/app.ts b/web/i18n/uk-UA/app.ts index 9686f58d4d..aea7bf525e 100644 --- a/web/i18n/uk-UA/app.ts +++ b/web/i18n/uk-UA/app.ts @@ -263,6 +263,19 @@ const translation = { searchPlugins: 'Пошукові плагіни', searchKnowledgeBasesDesc: 'Шукайте та переходьте до своїх баз знань', searchWorkflowNodesDesc: 'Знаходьте вузли в поточному робочому процесі та переходьте до них за іменем або типом', + themeSystem: 'Тема системи', + languageCategoryTitle: 'Мова', + themeCategoryTitle: 'Тема', + themeLight: 'Світла тема', + runTitle: 'Команди', + languageChangeDesc: 'Змінити мову інтерфейсу', + themeDark: 'Темний режим', + themeDarkDesc: 'Використовуйте темний режим', + runDesc: 'Run quick commands (theme, language, ...)', + themeCategoryDesc: 'Переключити тему застосунку', + themeLightDesc: 'Використовуйте світлий вигляд', + themeSystemDesc: 'Дотримуйтесь зовнішнього вигляду вашої операційної системи', + languageCategoryDesc: 'Переключити мову інтерфейсу', }, emptyState: { noPluginsFound: 'Плагінів не знайдено', diff --git a/web/i18n/vi-VN/app.ts b/web/i18n/vi-VN/app.ts index be0e709c24..4c625496aa 100644 --- a/web/i18n/vi-VN/app.ts +++ b/web/i18n/vi-VN/app.ts @@ -263,6 +263,19 @@ const translation = { searchApplications: 'Tìm kiếm ứng dụng', searchWorkflowNodesDesc: 'Tìm và chuyển đến các nút trong quy trình làm việc hiện tại theo tên hoặc loại', searchKnowledgeBasesDesc: 'Tìm kiếm và điều hướng đến cơ sở kiến thức của bạn', + themeCategoryTitle: 'Chủ đề', + themeSystem: 'Chủ đề hệ thống', + themeDarkDesc: 'Sử dụng giao diện tối', + themeLight: 'Chủ đề sáng', + runTitle: 'Lệnh', + languageCategoryTitle: 'Ngôn ngữ', + themeLightDesc: 'Sử dụng giao diện sáng', + themeDark: 'Chủ đề tối', + languageChangeDesc: 'Thay đổi ngôn ngữ giao diện', + languageCategoryDesc: 'Chuyển đổi ngôn ngữ giao diện', + themeSystemDesc: 'Theo giao diện của hệ điều hành của bạn', + runDesc: 'Chạy các lệnh nhanh (chủ đề, ngôn ngữ, ... )', + themeCategoryDesc: 'Chuyển đổi giao diện ứng dụng', }, emptyState: { noWorkflowNodesFound: 'Không tìm thấy nút quy trình làm việc', diff --git a/web/i18n/zh-Hant/app.ts b/web/i18n/zh-Hant/app.ts index 4080cde1b4..111e766fc0 100644 --- a/web/i18n/zh-Hant/app.ts +++ b/web/i18n/zh-Hant/app.ts @@ -262,6 +262,19 @@ const translation = { searchApplicationsDesc: '搜索並導航到您的應用程式', searchPlugins: '搜索外掛程式', searchWorkflowNodesDesc: '按名稱或類型查找並跳轉到當前工作流中的節點', + themeCategoryTitle: '主題', + themeSystemDesc: '遵循你的操作系統外觀', + themeLightDesc: '使用輕盈的外觀', + themeDark: '黑暗主題', + themeSystem: '系統主題', + themeDarkDesc: '使用深色外觀', + runTitle: '指令', + languageCategoryDesc: '切換介面語言', + themeLight: '淺色主題', + languageCategoryTitle: '語言', + themeCategoryDesc: '切換應用程式主題', + languageChangeDesc: '更改 UI 語言', + runDesc: '執行快速命令(主題、語言等...)', }, emptyState: { noAppsFound: '未找到應用', From b3399642c55ee23a1211b5324ed55c61b1724fba Mon Sep 17 00:00:00 2001 From: xinlmain Date: Wed, 13 Aug 2025 02:28:06 +0800 Subject: [PATCH 15/27] feat: Add an asynchronous repository to improve workflow performance (#20050) Co-authored-by: liangxin Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: liangxin --- .devcontainer/post_create_command.sh | 2 +- api/README.md | 2 +- api/configs/feature/__init__.py | 10 +- api/core/repositories/__init__.py | 4 + .../celery_workflow_execution_repository.py | 126 +++++++ ...lery_workflow_node_execution_repository.py | 190 ++++++++++ api/core/repositories/factory.py | 10 +- api/core/workflow/workflow_type_encoder.py | 4 + api/docker/entrypoint.sh | 2 +- api/tasks/workflow_execution_tasks.py | 136 +++++++ api/tasks/workflow_node_execution_tasks.py | 171 +++++++++ ...st_celery_workflow_execution_repository.py | 247 +++++++++++++ ...lery_workflow_node_execution_repository.py | 349 ++++++++++++++++++ .../core/repositories/test_factory.py | 324 +++++++--------- dev/start-worker | 2 +- docker/.env.example | 12 +- docker/docker-compose.yaml | 2 +- 17 files changed, 1376 insertions(+), 217 deletions(-) create mode 100644 api/core/repositories/celery_workflow_execution_repository.py create mode 100644 api/core/repositories/celery_workflow_node_execution_repository.py create mode 100644 api/tasks/workflow_execution_tasks.py create mode 100644 api/tasks/workflow_node_execution_tasks.py create mode 100644 api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py create mode 100644 api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py diff --git a/.devcontainer/post_create_command.sh b/.devcontainer/post_create_command.sh index 022f71bfb4..c25bde87b0 100755 --- a/.devcontainer/post_create_command.sh +++ b/.devcontainer/post_create_command.sh @@ -5,7 +5,7 @@ cd web && pnpm install pipx install uv echo 'alias start-api="cd /workspaces/dify/api && uv run python -m flask run --host 0.0.0.0 --port=5001 --debug"' >> ~/.bashrc -echo 'alias start-worker="cd /workspaces/dify/api && uv run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion"' >> ~/.bashrc +echo 'alias start-worker="cd /workspaces/dify/api && uv run python -m celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage"' >> ~/.bashrc echo 'alias start-web="cd /workspaces/dify/web && pnpm dev"' >> ~/.bashrc echo 'alias start-web-prod="cd /workspaces/dify/web && pnpm build && pnpm start"' >> ~/.bashrc echo 'alias start-containers="cd /workspaces/dify/docker && docker-compose -f docker-compose.middleware.yaml -p dify --env-file middleware.env up -d"' >> ~/.bashrc diff --git a/api/README.md b/api/README.md index 6ab923070e..b5298edf92 100644 --- a/api/README.md +++ b/api/README.md @@ -74,7 +74,7 @@ 10. If you need to handle and debug the async tasks (e.g. dataset importing and documents indexing), please start the worker service. ```bash - uv run celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin + uv run celery -A app.celery worker -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage ``` Addition, if you want to debug the celery scheduled tasks, you can use the following command in another terminal: diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index 4dbc8207f1..0b2f99aece 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -552,12 +552,18 @@ class RepositoryConfig(BaseSettings): """ CORE_WORKFLOW_EXECUTION_REPOSITORY: str = Field( - description="Repository implementation for WorkflowExecution. Specify as a module path", + description="Repository implementation for WorkflowExecution. Options: " + "'core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository' (default), " + "'core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository'", default="core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository", ) CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: str = Field( - description="Repository implementation for WorkflowNodeExecution. Specify as a module path", + description="Repository implementation for WorkflowNodeExecution. Options: " + "'core.repositories.sqlalchemy_workflow_node_execution_repository." + "SQLAlchemyWorkflowNodeExecutionRepository' (default), " + "'core.repositories.celery_workflow_node_execution_repository." + "CeleryWorkflowNodeExecutionRepository'", default="core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository", ) diff --git a/api/core/repositories/__init__.py b/api/core/repositories/__init__.py index 052ba1c2cb..d83823d7b9 100644 --- a/api/core/repositories/__init__.py +++ b/api/core/repositories/__init__.py @@ -5,10 +5,14 @@ This package contains concrete implementations of the repository interfaces defined in the core.workflow.repository package. """ +from core.repositories.celery_workflow_execution_repository import CeleryWorkflowExecutionRepository +from core.repositories.celery_workflow_node_execution_repository import CeleryWorkflowNodeExecutionRepository from core.repositories.factory import DifyCoreRepositoryFactory, RepositoryImportError from core.repositories.sqlalchemy_workflow_node_execution_repository import SQLAlchemyWorkflowNodeExecutionRepository __all__ = [ + "CeleryWorkflowExecutionRepository", + "CeleryWorkflowNodeExecutionRepository", "DifyCoreRepositoryFactory", "RepositoryImportError", "SQLAlchemyWorkflowNodeExecutionRepository", diff --git a/api/core/repositories/celery_workflow_execution_repository.py b/api/core/repositories/celery_workflow_execution_repository.py new file mode 100644 index 0000000000..df1f8db67f --- /dev/null +++ b/api/core/repositories/celery_workflow_execution_repository.py @@ -0,0 +1,126 @@ +""" +Celery-based implementation of the WorkflowExecutionRepository. + +This implementation uses Celery tasks for asynchronous storage operations, +providing improved performance by offloading database operations to background workers. +""" + +import logging +from typing import Optional, Union + +from sqlalchemy.engine import Engine +from sqlalchemy.orm import sessionmaker + +from core.workflow.entities.workflow_execution import WorkflowExecution +from core.workflow.repositories.workflow_execution_repository import WorkflowExecutionRepository +from libs.helper import extract_tenant_id +from models import Account, CreatorUserRole, EndUser +from models.enums import WorkflowRunTriggeredFrom +from tasks.workflow_execution_tasks import ( + save_workflow_execution_task, +) + +logger = logging.getLogger(__name__) + + +class CeleryWorkflowExecutionRepository(WorkflowExecutionRepository): + """ + Celery-based implementation of the WorkflowExecutionRepository interface. + + This implementation provides asynchronous storage capabilities by using Celery tasks + to handle database operations in background workers. This improves performance by + reducing the blocking time for workflow execution storage operations. + + Key features: + - Asynchronous save operations using Celery tasks + - Support for multi-tenancy through tenant/app filtering + - Automatic retry and error handling through Celery + """ + + _session_factory: sessionmaker + _tenant_id: str + _app_id: Optional[str] + _triggered_from: Optional[WorkflowRunTriggeredFrom] + _creator_user_id: str + _creator_user_role: CreatorUserRole + + def __init__( + self, + session_factory: sessionmaker | Engine, + user: Union[Account, EndUser], + app_id: Optional[str], + triggered_from: Optional[WorkflowRunTriggeredFrom], + ): + """ + Initialize the repository with Celery task configuration and context information. + + Args: + session_factory: SQLAlchemy sessionmaker or engine for fallback operations + user: Account or EndUser object containing tenant_id, user ID, and role information + app_id: App ID for filtering by application (can be None) + triggered_from: Source of the execution trigger (DEBUGGING or APP_RUN) + """ + # Store session factory for fallback operations + if isinstance(session_factory, Engine): + self._session_factory = sessionmaker(bind=session_factory, expire_on_commit=False) + elif isinstance(session_factory, sessionmaker): + self._session_factory = session_factory + else: + raise ValueError( + f"Invalid session_factory type {type(session_factory).__name__}; expected sessionmaker or Engine" + ) + + # Extract tenant_id from user + tenant_id = extract_tenant_id(user) + if not tenant_id: + raise ValueError("User must have a tenant_id or current_tenant_id") + self._tenant_id = tenant_id # type: ignore[assignment] # We've already checked tenant_id is not None + + # Store app context + self._app_id = app_id + + # Extract user context + self._triggered_from = triggered_from + self._creator_user_id = user.id + + # Determine user role based on user type + self._creator_user_role = CreatorUserRole.ACCOUNT if isinstance(user, Account) else CreatorUserRole.END_USER + + logger.info( + "Initialized CeleryWorkflowExecutionRepository for tenant %s, app %s, triggered_from %s", + self._tenant_id, + self._app_id, + self._triggered_from, + ) + + def save(self, execution: WorkflowExecution) -> None: + """ + Save or update a WorkflowExecution instance asynchronously using Celery. + + This method queues the save operation as a Celery task and returns immediately, + providing improved performance for high-throughput scenarios. + + Args: + execution: The WorkflowExecution instance to save or update + """ + try: + # Serialize execution for Celery task + execution_data = execution.model_dump() + + # Queue the save operation as a Celery task (fire and forget) + save_workflow_execution_task.delay( + execution_data=execution_data, + tenant_id=self._tenant_id, + app_id=self._app_id or "", + triggered_from=self._triggered_from.value if self._triggered_from else "", + creator_user_id=self._creator_user_id, + creator_user_role=self._creator_user_role.value, + ) + + logger.debug("Queued async save for workflow execution: %s", execution.id_) + + except Exception as e: + logger.exception("Failed to queue save operation for execution %s", execution.id_) + # In case of Celery failure, we could implement a fallback to synchronous save + # For now, we'll re-raise the exception + raise diff --git a/api/core/repositories/celery_workflow_node_execution_repository.py b/api/core/repositories/celery_workflow_node_execution_repository.py new file mode 100644 index 0000000000..5b410a7b56 --- /dev/null +++ b/api/core/repositories/celery_workflow_node_execution_repository.py @@ -0,0 +1,190 @@ +""" +Celery-based implementation of the WorkflowNodeExecutionRepository. + +This implementation uses Celery tasks for asynchronous storage operations, +providing improved performance by offloading database operations to background workers. +""" + +import logging +from collections.abc import Sequence +from typing import Optional, Union + +from sqlalchemy.engine import Engine +from sqlalchemy.orm import sessionmaker + +from core.workflow.entities.workflow_node_execution import WorkflowNodeExecution +from core.workflow.repositories.workflow_node_execution_repository import ( + OrderConfig, + WorkflowNodeExecutionRepository, +) +from libs.helper import extract_tenant_id +from models import Account, CreatorUserRole, EndUser +from models.workflow import WorkflowNodeExecutionTriggeredFrom +from tasks.workflow_node_execution_tasks import ( + save_workflow_node_execution_task, +) + +logger = logging.getLogger(__name__) + + +class CeleryWorkflowNodeExecutionRepository(WorkflowNodeExecutionRepository): + """ + Celery-based implementation of the WorkflowNodeExecutionRepository interface. + + This implementation provides asynchronous storage capabilities by using Celery tasks + to handle database operations in background workers. This improves performance by + reducing the blocking time for workflow node execution storage operations. + + Key features: + - Asynchronous save operations using Celery tasks + - In-memory cache for immediate reads + - Support for multi-tenancy through tenant/app filtering + - Automatic retry and error handling through Celery + """ + + _session_factory: sessionmaker + _tenant_id: str + _app_id: Optional[str] + _triggered_from: Optional[WorkflowNodeExecutionTriggeredFrom] + _creator_user_id: str + _creator_user_role: CreatorUserRole + _execution_cache: dict[str, WorkflowNodeExecution] + _workflow_execution_mapping: dict[str, list[str]] + + def __init__( + self, + session_factory: sessionmaker | Engine, + user: Union[Account, EndUser], + app_id: Optional[str], + triggered_from: Optional[WorkflowNodeExecutionTriggeredFrom], + ): + """ + Initialize the repository with Celery task configuration and context information. + + Args: + session_factory: SQLAlchemy sessionmaker or engine for fallback operations + user: Account or EndUser object containing tenant_id, user ID, and role information + app_id: App ID for filtering by application (can be None) + triggered_from: Source of the execution trigger (SINGLE_STEP or WORKFLOW_RUN) + """ + # Store session factory for fallback operations + if isinstance(session_factory, Engine): + self._session_factory = sessionmaker(bind=session_factory, expire_on_commit=False) + elif isinstance(session_factory, sessionmaker): + self._session_factory = session_factory + else: + raise ValueError( + f"Invalid session_factory type {type(session_factory).__name__}; expected sessionmaker or Engine" + ) + + # Extract tenant_id from user + tenant_id = extract_tenant_id(user) + if not tenant_id: + raise ValueError("User must have a tenant_id or current_tenant_id") + self._tenant_id = tenant_id # type: ignore[assignment] # We've already checked tenant_id is not None + + # Store app context + self._app_id = app_id + + # Extract user context + self._triggered_from = triggered_from + self._creator_user_id = user.id + + # Determine user role based on user type + self._creator_user_role = CreatorUserRole.ACCOUNT if isinstance(user, Account) else CreatorUserRole.END_USER + + # In-memory cache for workflow node executions + self._execution_cache: dict[str, WorkflowNodeExecution] = {} + + # Cache for mapping workflow_execution_ids to execution IDs for efficient retrieval + self._workflow_execution_mapping: dict[str, list[str]] = {} + + logger.info( + "Initialized CeleryWorkflowNodeExecutionRepository for tenant %s, app %s, triggered_from %s", + self._tenant_id, + self._app_id, + self._triggered_from, + ) + + def save(self, execution: WorkflowNodeExecution) -> None: + """ + Save or update a WorkflowNodeExecution instance to cache and asynchronously to database. + + This method stores the execution in cache immediately for fast reads and queues + the save operation as a Celery task without tracking the task status. + + Args: + execution: The WorkflowNodeExecution instance to save or update + """ + try: + # Store in cache immediately for fast reads + self._execution_cache[execution.id] = execution + + # Update workflow execution mapping for efficient retrieval + if execution.workflow_execution_id: + if execution.workflow_execution_id not in self._workflow_execution_mapping: + self._workflow_execution_mapping[execution.workflow_execution_id] = [] + if execution.id not in self._workflow_execution_mapping[execution.workflow_execution_id]: + self._workflow_execution_mapping[execution.workflow_execution_id].append(execution.id) + + # Serialize execution for Celery task + execution_data = execution.model_dump() + + # Queue the save operation as a Celery task (fire and forget) + save_workflow_node_execution_task.delay( + execution_data=execution_data, + tenant_id=self._tenant_id, + app_id=self._app_id or "", + triggered_from=self._triggered_from.value if self._triggered_from else "", + creator_user_id=self._creator_user_id, + creator_user_role=self._creator_user_role.value, + ) + + logger.debug("Cached and queued async save for workflow node execution: %s", execution.id) + + except Exception as e: + logger.exception("Failed to cache or queue save operation for node execution %s", execution.id) + # In case of Celery failure, we could implement a fallback to synchronous save + # For now, we'll re-raise the exception + raise + + def get_by_workflow_run( + self, + workflow_run_id: str, + order_config: Optional[OrderConfig] = None, + ) -> Sequence[WorkflowNodeExecution]: + """ + Retrieve all WorkflowNodeExecution instances for a specific workflow run from cache. + + Args: + workflow_run_id: The workflow run ID + order_config: Optional configuration for ordering results + + Returns: + A sequence of WorkflowNodeExecution instances + """ + try: + # Get execution IDs for this workflow run from cache + execution_ids = self._workflow_execution_mapping.get(workflow_run_id, []) + + # Retrieve executions from cache + result = [] + for execution_id in execution_ids: + if execution_id in self._execution_cache: + result.append(self._execution_cache[execution_id]) + + # Apply ordering if specified + if order_config and result: + # Sort based on the order configuration + reverse = order_config.order_direction == "desc" + + # Sort by multiple fields if specified + for field_name in reversed(order_config.order_by): + result.sort(key=lambda x: getattr(x, field_name, 0), reverse=reverse) + + logger.debug("Retrieved %d workflow node executions for run %s from cache", len(result), workflow_run_id) + return result + + except Exception as e: + logger.exception("Failed to get workflow node executions for run %s from cache", workflow_run_id) + return [] diff --git a/api/core/repositories/factory.py b/api/core/repositories/factory.py index 6e636883ae..09c775f3a6 100644 --- a/api/core/repositories/factory.py +++ b/api/core/repositories/factory.py @@ -94,11 +94,9 @@ class DifyCoreRepositoryFactory: def _validate_constructor_signature(repository_class: type, required_params: list[str]) -> None: """ Validate that a repository class constructor accepts required parameters. - Args: repository_class: The class to validate required_params: List of required parameter names - Raises: RepositoryImportError: If the constructor doesn't accept required parameters """ @@ -158,10 +156,8 @@ class DifyCoreRepositoryFactory: try: repository_class = cls._import_class(class_path) cls._validate_repository_interface(repository_class, WorkflowExecutionRepository) - cls._validate_constructor_signature( - repository_class, ["session_factory", "user", "app_id", "triggered_from"] - ) + # All repository types now use the same constructor parameters return repository_class( # type: ignore[no-any-return] session_factory=session_factory, user=user, @@ -204,10 +200,8 @@ class DifyCoreRepositoryFactory: try: repository_class = cls._import_class(class_path) cls._validate_repository_interface(repository_class, WorkflowNodeExecutionRepository) - cls._validate_constructor_signature( - repository_class, ["session_factory", "user", "app_id", "triggered_from"] - ) + # All repository types now use the same constructor parameters return repository_class( # type: ignore[no-any-return] session_factory=session_factory, user=user, diff --git a/api/core/workflow/workflow_type_encoder.py b/api/core/workflow/workflow_type_encoder.py index 2c634d25ec..08e12e2681 100644 --- a/api/core/workflow/workflow_type_encoder.py +++ b/api/core/workflow/workflow_type_encoder.py @@ -1,4 +1,5 @@ from collections.abc import Mapping +from decimal import Decimal from typing import Any from pydantic import BaseModel @@ -17,6 +18,9 @@ class WorkflowRuntimeTypeConverter: return value if isinstance(value, (bool, int, str, float)): return value + if isinstance(value, Decimal): + # Convert Decimal to float for JSON serialization + return float(value) if isinstance(value, Segment): return self._to_json_encodable_recursive(value.value) if isinstance(value, File): diff --git a/api/docker/entrypoint.sh b/api/docker/entrypoint.sh index a850ea9a50..da147fe895 100755 --- a/api/docker/entrypoint.sh +++ b/api/docker/entrypoint.sh @@ -32,7 +32,7 @@ if [[ "${MODE}" == "worker" ]]; then exec celery -A app.celery worker -P ${CELERY_WORKER_CLASS:-gevent} $CONCURRENCY_OPTION \ --max-tasks-per-child ${MAX_TASK_PRE_CHILD:-50} --loglevel ${LOG_LEVEL:-INFO} \ - -Q ${CELERY_QUEUES:-dataset,mail,ops_trace,app_deletion,plugin} + -Q ${CELERY_QUEUES:-dataset,mail,ops_trace,app_deletion,plugin,workflow_storage} elif [[ "${MODE}" == "beat" ]]; then exec celery -A app.celery beat --loglevel ${LOG_LEVEL:-INFO} diff --git a/api/tasks/workflow_execution_tasks.py b/api/tasks/workflow_execution_tasks.py new file mode 100644 index 0000000000..2f9fb628ca --- /dev/null +++ b/api/tasks/workflow_execution_tasks.py @@ -0,0 +1,136 @@ +""" +Celery tasks for asynchronous workflow execution storage operations. + +These tasks provide asynchronous storage capabilities for workflow execution data, +improving performance by offloading storage operations to background workers. +""" + +import json +import logging + +from celery import shared_task # type: ignore[import-untyped] +from sqlalchemy import select +from sqlalchemy.orm import sessionmaker + +from core.workflow.entities.workflow_execution import WorkflowExecution +from core.workflow.workflow_type_encoder import WorkflowRuntimeTypeConverter +from extensions.ext_database import db +from models import CreatorUserRole, WorkflowRun +from models.enums import WorkflowRunTriggeredFrom + +logger = logging.getLogger(__name__) + + +@shared_task(queue="workflow_storage", bind=True, max_retries=3, default_retry_delay=60) +def save_workflow_execution_task( + self, + execution_data: dict, + tenant_id: str, + app_id: str, + triggered_from: str, + creator_user_id: str, + creator_user_role: str, +) -> bool: + """ + Asynchronously save or update a workflow execution to the database. + + Args: + execution_data: Serialized WorkflowExecution data + tenant_id: Tenant ID for multi-tenancy + app_id: Application ID + triggered_from: Source of the execution trigger + creator_user_id: ID of the user who created the execution + creator_user_role: Role of the user who created the execution + + Returns: + True if successful, False otherwise + """ + try: + # Create a new session for this task + session_factory = sessionmaker(bind=db.engine, expire_on_commit=False) + + with session_factory() as session: + # Deserialize execution data + execution = WorkflowExecution.model_validate(execution_data) + + # Check if workflow run already exists + existing_run = session.scalar(select(WorkflowRun).where(WorkflowRun.id == execution.id_)) + + if existing_run: + # Update existing workflow run + _update_workflow_run_from_execution(existing_run, execution) + logger.debug("Updated existing workflow run: %s", execution.id_) + else: + # Create new workflow run + workflow_run = _create_workflow_run_from_execution( + execution=execution, + tenant_id=tenant_id, + app_id=app_id, + triggered_from=WorkflowRunTriggeredFrom(triggered_from), + creator_user_id=creator_user_id, + creator_user_role=CreatorUserRole(creator_user_role), + ) + session.add(workflow_run) + logger.debug("Created new workflow run: %s", execution.id_) + + session.commit() + return True + + except Exception as e: + logger.exception("Failed to save workflow execution %s", execution_data.get("id_", "unknown")) + # Retry the task with exponential backoff + raise self.retry(exc=e, countdown=60 * (2**self.request.retries)) + + +def _create_workflow_run_from_execution( + execution: WorkflowExecution, + tenant_id: str, + app_id: str, + triggered_from: WorkflowRunTriggeredFrom, + creator_user_id: str, + creator_user_role: CreatorUserRole, +) -> WorkflowRun: + """ + Create a WorkflowRun database model from a WorkflowExecution domain entity. + """ + workflow_run = WorkflowRun() + workflow_run.id = execution.id_ + workflow_run.tenant_id = tenant_id + workflow_run.app_id = app_id + workflow_run.workflow_id = execution.workflow_id + workflow_run.type = execution.workflow_type.value + workflow_run.triggered_from = triggered_from.value + workflow_run.version = execution.workflow_version + json_converter = WorkflowRuntimeTypeConverter() + workflow_run.graph = json.dumps(json_converter.to_json_encodable(execution.graph)) + workflow_run.inputs = json.dumps(json_converter.to_json_encodable(execution.inputs)) + workflow_run.status = execution.status.value + workflow_run.outputs = ( + json.dumps(json_converter.to_json_encodable(execution.outputs)) if execution.outputs else "{}" + ) + workflow_run.error = execution.error_message + workflow_run.elapsed_time = execution.elapsed_time + workflow_run.total_tokens = execution.total_tokens + workflow_run.total_steps = execution.total_steps + workflow_run.created_by_role = creator_user_role.value + workflow_run.created_by = creator_user_id + workflow_run.created_at = execution.started_at + workflow_run.finished_at = execution.finished_at + + return workflow_run + + +def _update_workflow_run_from_execution(workflow_run: WorkflowRun, execution: WorkflowExecution) -> None: + """ + Update a WorkflowRun database model from a WorkflowExecution domain entity. + """ + json_converter = WorkflowRuntimeTypeConverter() + workflow_run.status = execution.status.value + workflow_run.outputs = ( + json.dumps(json_converter.to_json_encodable(execution.outputs)) if execution.outputs else "{}" + ) + workflow_run.error = execution.error_message + workflow_run.elapsed_time = execution.elapsed_time + workflow_run.total_tokens = execution.total_tokens + workflow_run.total_steps = execution.total_steps + workflow_run.finished_at = execution.finished_at diff --git a/api/tasks/workflow_node_execution_tasks.py b/api/tasks/workflow_node_execution_tasks.py new file mode 100644 index 0000000000..dfc8a33564 --- /dev/null +++ b/api/tasks/workflow_node_execution_tasks.py @@ -0,0 +1,171 @@ +""" +Celery tasks for asynchronous workflow node execution storage operations. + +These tasks provide asynchronous storage capabilities for workflow node execution data, +improving performance by offloading storage operations to background workers. +""" + +import json +import logging + +from celery import shared_task # type: ignore[import-untyped] +from sqlalchemy import select +from sqlalchemy.orm import sessionmaker + +from core.workflow.entities.workflow_node_execution import ( + WorkflowNodeExecution, +) +from core.workflow.workflow_type_encoder import WorkflowRuntimeTypeConverter +from extensions.ext_database import db +from models import CreatorUserRole, WorkflowNodeExecutionModel +from models.workflow import WorkflowNodeExecutionTriggeredFrom + +logger = logging.getLogger(__name__) + + +@shared_task(queue="workflow_storage", bind=True, max_retries=3, default_retry_delay=60) +def save_workflow_node_execution_task( + self, + execution_data: dict, + tenant_id: str, + app_id: str, + triggered_from: str, + creator_user_id: str, + creator_user_role: str, +) -> bool: + """ + Asynchronously save or update a workflow node execution to the database. + + Args: + execution_data: Serialized WorkflowNodeExecution data + tenant_id: Tenant ID for multi-tenancy + app_id: Application ID + triggered_from: Source of the execution trigger + creator_user_id: ID of the user who created the execution + creator_user_role: Role of the user who created the execution + + Returns: + True if successful, False otherwise + """ + try: + # Create a new session for this task + session_factory = sessionmaker(bind=db.engine, expire_on_commit=False) + + with session_factory() as session: + # Deserialize execution data + execution = WorkflowNodeExecution.model_validate(execution_data) + + # Check if node execution already exists + existing_execution = session.scalar( + select(WorkflowNodeExecutionModel).where(WorkflowNodeExecutionModel.id == execution.id) + ) + + if existing_execution: + # Update existing node execution + _update_node_execution_from_domain(existing_execution, execution) + logger.debug("Updated existing workflow node execution: %s", execution.id) + else: + # Create new node execution + node_execution = _create_node_execution_from_domain( + execution=execution, + tenant_id=tenant_id, + app_id=app_id, + triggered_from=WorkflowNodeExecutionTriggeredFrom(triggered_from), + creator_user_id=creator_user_id, + creator_user_role=CreatorUserRole(creator_user_role), + ) + session.add(node_execution) + logger.debug("Created new workflow node execution: %s", execution.id) + + session.commit() + return True + + except Exception as e: + logger.exception("Failed to save workflow node execution %s", execution_data.get("id", "unknown")) + # Retry the task with exponential backoff + raise self.retry(exc=e, countdown=60 * (2**self.request.retries)) + + +def _create_node_execution_from_domain( + execution: WorkflowNodeExecution, + tenant_id: str, + app_id: str, + triggered_from: WorkflowNodeExecutionTriggeredFrom, + creator_user_id: str, + creator_user_role: CreatorUserRole, +) -> WorkflowNodeExecutionModel: + """ + Create a WorkflowNodeExecutionModel database model from a WorkflowNodeExecution domain entity. + """ + node_execution = WorkflowNodeExecutionModel() + node_execution.id = execution.id + node_execution.tenant_id = tenant_id + node_execution.app_id = app_id + node_execution.workflow_id = execution.workflow_id + node_execution.triggered_from = triggered_from.value + node_execution.workflow_run_id = execution.workflow_execution_id + node_execution.index = execution.index + node_execution.predecessor_node_id = execution.predecessor_node_id + node_execution.node_id = execution.node_id + node_execution.node_type = execution.node_type.value + node_execution.title = execution.title + node_execution.node_execution_id = execution.node_execution_id + + # Serialize complex data as JSON + json_converter = WorkflowRuntimeTypeConverter() + node_execution.inputs = json.dumps(json_converter.to_json_encodable(execution.inputs)) if execution.inputs else "{}" + node_execution.process_data = ( + json.dumps(json_converter.to_json_encodable(execution.process_data)) if execution.process_data else "{}" + ) + node_execution.outputs = ( + json.dumps(json_converter.to_json_encodable(execution.outputs)) if execution.outputs else "{}" + ) + # Convert metadata enum keys to strings for JSON serialization + if execution.metadata: + metadata_for_json = { + key.value if hasattr(key, "value") else str(key): value for key, value in execution.metadata.items() + } + node_execution.execution_metadata = json.dumps(json_converter.to_json_encodable(metadata_for_json)) + else: + node_execution.execution_metadata = "{}" + + node_execution.status = execution.status.value + node_execution.error = execution.error + node_execution.elapsed_time = execution.elapsed_time + node_execution.created_by_role = creator_user_role.value + node_execution.created_by = creator_user_id + node_execution.created_at = execution.created_at + node_execution.finished_at = execution.finished_at + + return node_execution + + +def _update_node_execution_from_domain( + node_execution: WorkflowNodeExecutionModel, execution: WorkflowNodeExecution +) -> None: + """ + Update a WorkflowNodeExecutionModel database model from a WorkflowNodeExecution domain entity. + """ + # Update serialized data + json_converter = WorkflowRuntimeTypeConverter() + node_execution.inputs = json.dumps(json_converter.to_json_encodable(execution.inputs)) if execution.inputs else "{}" + node_execution.process_data = ( + json.dumps(json_converter.to_json_encodable(execution.process_data)) if execution.process_data else "{}" + ) + node_execution.outputs = ( + json.dumps(json_converter.to_json_encodable(execution.outputs)) if execution.outputs else "{}" + ) + # Convert metadata enum keys to strings for JSON serialization + if execution.metadata: + metadata_for_json = { + key.value if hasattr(key, "value") else str(key): value for key, value in execution.metadata.items() + } + node_execution.execution_metadata = json.dumps(json_converter.to_json_encodable(metadata_for_json)) + else: + node_execution.execution_metadata = "{}" + + # Update other fields + node_execution.status = execution.status.value + node_execution.error = execution.error + node_execution.elapsed_time = execution.elapsed_time + node_execution.finished_at = execution.finished_at diff --git a/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py b/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py new file mode 100644 index 0000000000..450501c256 --- /dev/null +++ b/api/tests/unit_tests/core/repositories/test_celery_workflow_execution_repository.py @@ -0,0 +1,247 @@ +""" +Unit tests for CeleryWorkflowExecutionRepository. + +These tests verify the Celery-based asynchronous storage functionality +for workflow execution data. +""" + +from datetime import UTC, datetime +from unittest.mock import Mock, patch +from uuid import uuid4 + +import pytest + +from core.repositories.celery_workflow_execution_repository import CeleryWorkflowExecutionRepository +from core.workflow.entities.workflow_execution import WorkflowExecution, WorkflowType +from models import Account, EndUser +from models.enums import WorkflowRunTriggeredFrom + + +@pytest.fixture +def mock_session_factory(): + """Mock SQLAlchemy session factory.""" + from sqlalchemy import create_engine + from sqlalchemy.orm import sessionmaker + + # Create a real sessionmaker with in-memory SQLite for testing + engine = create_engine("sqlite:///:memory:") + return sessionmaker(bind=engine) + + +@pytest.fixture +def mock_account(): + """Mock Account user.""" + account = Mock(spec=Account) + account.id = str(uuid4()) + account.current_tenant_id = str(uuid4()) + return account + + +@pytest.fixture +def mock_end_user(): + """Mock EndUser.""" + user = Mock(spec=EndUser) + user.id = str(uuid4()) + user.tenant_id = str(uuid4()) + return user + + +@pytest.fixture +def sample_workflow_execution(): + """Sample WorkflowExecution for testing.""" + return WorkflowExecution.new( + id_=str(uuid4()), + workflow_id=str(uuid4()), + workflow_type=WorkflowType.WORKFLOW, + workflow_version="1.0", + graph={"nodes": [], "edges": []}, + inputs={"input1": "value1"}, + started_at=datetime.now(UTC).replace(tzinfo=None), + ) + + +class TestCeleryWorkflowExecutionRepository: + """Test cases for CeleryWorkflowExecutionRepository.""" + + def test_init_with_sessionmaker(self, mock_session_factory, mock_account): + """Test repository initialization with sessionmaker.""" + app_id = "test-app-id" + triggered_from = WorkflowRunTriggeredFrom.APP_RUN + + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id=app_id, + triggered_from=triggered_from, + ) + + assert repo._tenant_id == mock_account.current_tenant_id + assert repo._app_id == app_id + assert repo._triggered_from == triggered_from + assert repo._creator_user_id == mock_account.id + assert repo._creator_user_role is not None + + def test_init_basic_functionality(self, mock_session_factory, mock_account): + """Test repository initialization basic functionality.""" + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.DEBUGGING, + ) + + # Verify basic initialization + assert repo._tenant_id == mock_account.current_tenant_id + assert repo._app_id == "test-app" + assert repo._triggered_from == WorkflowRunTriggeredFrom.DEBUGGING + + def test_init_with_end_user(self, mock_session_factory, mock_end_user): + """Test repository initialization with EndUser.""" + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_end_user, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + assert repo._tenant_id == mock_end_user.tenant_id + + def test_init_without_tenant_id_raises_error(self, mock_session_factory): + """Test that initialization fails without tenant_id.""" + # Create a mock Account with no tenant_id + user = Mock(spec=Account) + user.current_tenant_id = None + user.id = str(uuid4()) + + with pytest.raises(ValueError, match="User must have a tenant_id"): + CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=user, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + @patch("core.repositories.celery_workflow_execution_repository.save_workflow_execution_task") + def test_save_queues_celery_task(self, mock_task, mock_session_factory, mock_account, sample_workflow_execution): + """Test that save operation queues a Celery task without tracking.""" + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + repo.save(sample_workflow_execution) + + # Verify Celery task was queued with correct parameters + mock_task.delay.assert_called_once() + call_args = mock_task.delay.call_args[1] + + assert call_args["execution_data"] == sample_workflow_execution.model_dump() + assert call_args["tenant_id"] == mock_account.current_tenant_id + assert call_args["app_id"] == "test-app" + assert call_args["triggered_from"] == WorkflowRunTriggeredFrom.APP_RUN.value + assert call_args["creator_user_id"] == mock_account.id + + # Verify no task tracking occurs (no _pending_saves attribute) + assert not hasattr(repo, "_pending_saves") + + @patch("core.repositories.celery_workflow_execution_repository.save_workflow_execution_task") + def test_save_handles_celery_failure( + self, mock_task, mock_session_factory, mock_account, sample_workflow_execution + ): + """Test that save operation handles Celery task failures.""" + mock_task.delay.side_effect = Exception("Celery is down") + + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + with pytest.raises(Exception, match="Celery is down"): + repo.save(sample_workflow_execution) + + @patch("core.repositories.celery_workflow_execution_repository.save_workflow_execution_task") + def test_save_operation_fire_and_forget( + self, mock_task, mock_session_factory, mock_account, sample_workflow_execution + ): + """Test that save operation works in fire-and-forget mode.""" + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + # Test that save doesn't block or maintain state + repo.save(sample_workflow_execution) + + # Verify no pending saves are tracked (no _pending_saves attribute) + assert not hasattr(repo, "_pending_saves") + + @patch("core.repositories.celery_workflow_execution_repository.save_workflow_execution_task") + def test_multiple_save_operations(self, mock_task, mock_session_factory, mock_account): + """Test multiple save operations work correctly.""" + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + # Create multiple executions + exec1 = WorkflowExecution.new( + id_=str(uuid4()), + workflow_id=str(uuid4()), + workflow_type=WorkflowType.WORKFLOW, + workflow_version="1.0", + graph={"nodes": [], "edges": []}, + inputs={"input1": "value1"}, + started_at=datetime.now(UTC).replace(tzinfo=None), + ) + exec2 = WorkflowExecution.new( + id_=str(uuid4()), + workflow_id=str(uuid4()), + workflow_type=WorkflowType.WORKFLOW, + workflow_version="1.0", + graph={"nodes": [], "edges": []}, + inputs={"input2": "value2"}, + started_at=datetime.now(UTC).replace(tzinfo=None), + ) + + # Save both executions + repo.save(exec1) + repo.save(exec2) + + # Should work without issues and not maintain state (no _pending_saves attribute) + assert not hasattr(repo, "_pending_saves") + + @patch("core.repositories.celery_workflow_execution_repository.save_workflow_execution_task") + def test_save_with_different_user_types(self, mock_task, mock_session_factory, mock_end_user): + """Test save operation with different user types.""" + repo = CeleryWorkflowExecutionRepository( + session_factory=mock_session_factory, + user=mock_end_user, + app_id="test-app", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + + execution = WorkflowExecution.new( + id_=str(uuid4()), + workflow_id=str(uuid4()), + workflow_type=WorkflowType.WORKFLOW, + workflow_version="1.0", + graph={"nodes": [], "edges": []}, + inputs={"input1": "value1"}, + started_at=datetime.now(UTC).replace(tzinfo=None), + ) + + repo.save(execution) + + # Verify task was called with EndUser context + mock_task.delay.assert_called_once() + call_args = mock_task.delay.call_args[1] + assert call_args["tenant_id"] == mock_end_user.tenant_id + assert call_args["creator_user_id"] == mock_end_user.id diff --git a/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py b/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py new file mode 100644 index 0000000000..b38d994f03 --- /dev/null +++ b/api/tests/unit_tests/core/repositories/test_celery_workflow_node_execution_repository.py @@ -0,0 +1,349 @@ +""" +Unit tests for CeleryWorkflowNodeExecutionRepository. + +These tests verify the Celery-based asynchronous storage functionality +for workflow node execution data. +""" + +from datetime import UTC, datetime +from unittest.mock import Mock, patch +from uuid import uuid4 + +import pytest + +from core.repositories.celery_workflow_node_execution_repository import CeleryWorkflowNodeExecutionRepository +from core.workflow.entities.workflow_node_execution import ( + WorkflowNodeExecution, + WorkflowNodeExecutionStatus, +) +from core.workflow.nodes.enums import NodeType +from core.workflow.repositories.workflow_node_execution_repository import OrderConfig +from models import Account, EndUser +from models.workflow import WorkflowNodeExecutionTriggeredFrom + + +@pytest.fixture +def mock_session_factory(): + """Mock SQLAlchemy session factory.""" + from sqlalchemy import create_engine + from sqlalchemy.orm import sessionmaker + + # Create a real sessionmaker with in-memory SQLite for testing + engine = create_engine("sqlite:///:memory:") + return sessionmaker(bind=engine) + + +@pytest.fixture +def mock_account(): + """Mock Account user.""" + account = Mock(spec=Account) + account.id = str(uuid4()) + account.current_tenant_id = str(uuid4()) + return account + + +@pytest.fixture +def mock_end_user(): + """Mock EndUser.""" + user = Mock(spec=EndUser) + user.id = str(uuid4()) + user.tenant_id = str(uuid4()) + return user + + +@pytest.fixture +def sample_workflow_node_execution(): + """Sample WorkflowNodeExecution for testing.""" + return WorkflowNodeExecution( + id=str(uuid4()), + node_execution_id=str(uuid4()), + workflow_id=str(uuid4()), + workflow_execution_id=str(uuid4()), + index=1, + node_id="test_node", + node_type=NodeType.START, + title="Test Node", + inputs={"input1": "value1"}, + status=WorkflowNodeExecutionStatus.RUNNING, + created_at=datetime.now(UTC).replace(tzinfo=None), + ) + + +class TestCeleryWorkflowNodeExecutionRepository: + """Test cases for CeleryWorkflowNodeExecutionRepository.""" + + def test_init_with_sessionmaker(self, mock_session_factory, mock_account): + """Test repository initialization with sessionmaker.""" + app_id = "test-app-id" + triggered_from = WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN + + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id=app_id, + triggered_from=triggered_from, + ) + + assert repo._tenant_id == mock_account.current_tenant_id + assert repo._app_id == app_id + assert repo._triggered_from == triggered_from + assert repo._creator_user_id == mock_account.id + assert repo._creator_user_role is not None + + def test_init_with_cache_initialized(self, mock_session_factory, mock_account): + """Test repository initialization with cache properly initialized.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.SINGLE_STEP, + ) + + assert repo._execution_cache == {} + assert repo._workflow_execution_mapping == {} + + def test_init_with_end_user(self, mock_session_factory, mock_end_user): + """Test repository initialization with EndUser.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_end_user, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + assert repo._tenant_id == mock_end_user.tenant_id + + def test_init_without_tenant_id_raises_error(self, mock_session_factory): + """Test that initialization fails without tenant_id.""" + # Create a mock Account with no tenant_id + user = Mock(spec=Account) + user.current_tenant_id = None + user.id = str(uuid4()) + + with pytest.raises(ValueError, match="User must have a tenant_id"): + CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=user, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + @patch("core.repositories.celery_workflow_node_execution_repository.save_workflow_node_execution_task") + def test_save_caches_and_queues_celery_task( + self, mock_task, mock_session_factory, mock_account, sample_workflow_node_execution + ): + """Test that save operation caches execution and queues a Celery task.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + repo.save(sample_workflow_node_execution) + + # Verify Celery task was queued with correct parameters + mock_task.delay.assert_called_once() + call_args = mock_task.delay.call_args[1] + + assert call_args["execution_data"] == sample_workflow_node_execution.model_dump() + assert call_args["tenant_id"] == mock_account.current_tenant_id + assert call_args["app_id"] == "test-app" + assert call_args["triggered_from"] == WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN.value + assert call_args["creator_user_id"] == mock_account.id + + # Verify execution is cached + assert sample_workflow_node_execution.id in repo._execution_cache + assert repo._execution_cache[sample_workflow_node_execution.id] == sample_workflow_node_execution + + # Verify workflow execution mapping is updated + assert sample_workflow_node_execution.workflow_execution_id in repo._workflow_execution_mapping + assert ( + sample_workflow_node_execution.id + in repo._workflow_execution_mapping[sample_workflow_node_execution.workflow_execution_id] + ) + + @patch("core.repositories.celery_workflow_node_execution_repository.save_workflow_node_execution_task") + def test_save_handles_celery_failure( + self, mock_task, mock_session_factory, mock_account, sample_workflow_node_execution + ): + """Test that save operation handles Celery task failures.""" + mock_task.delay.side_effect = Exception("Celery is down") + + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + with pytest.raises(Exception, match="Celery is down"): + repo.save(sample_workflow_node_execution) + + @patch("core.repositories.celery_workflow_node_execution_repository.save_workflow_node_execution_task") + def test_get_by_workflow_run_from_cache( + self, mock_task, mock_session_factory, mock_account, sample_workflow_node_execution + ): + """Test that get_by_workflow_run retrieves executions from cache.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + # Save execution to cache first + repo.save(sample_workflow_node_execution) + + workflow_run_id = sample_workflow_node_execution.workflow_execution_id + order_config = OrderConfig(order_by=["index"], order_direction="asc") + + result = repo.get_by_workflow_run(workflow_run_id, order_config) + + # Verify results were retrieved from cache + assert len(result) == 1 + assert result[0].id == sample_workflow_node_execution.id + assert result[0] is sample_workflow_node_execution + + def test_get_by_workflow_run_without_order_config(self, mock_session_factory, mock_account): + """Test get_by_workflow_run without order configuration.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + result = repo.get_by_workflow_run("workflow-run-id") + + # Should return empty list since nothing in cache + assert len(result) == 0 + + @patch("core.repositories.celery_workflow_node_execution_repository.save_workflow_node_execution_task") + def test_cache_operations(self, mock_task, mock_session_factory, mock_account, sample_workflow_node_execution): + """Test cache operations work correctly.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + # Test saving to cache + repo.save(sample_workflow_node_execution) + + # Verify cache contains the execution + assert sample_workflow_node_execution.id in repo._execution_cache + + # Test retrieving from cache + result = repo.get_by_workflow_run(sample_workflow_node_execution.workflow_execution_id) + assert len(result) == 1 + assert result[0].id == sample_workflow_node_execution.id + + @patch("core.repositories.celery_workflow_node_execution_repository.save_workflow_node_execution_task") + def test_multiple_executions_same_workflow(self, mock_task, mock_session_factory, mock_account): + """Test multiple executions for the same workflow.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + # Create multiple executions for the same workflow + workflow_run_id = str(uuid4()) + exec1 = WorkflowNodeExecution( + id=str(uuid4()), + node_execution_id=str(uuid4()), + workflow_id=str(uuid4()), + workflow_execution_id=workflow_run_id, + index=1, + node_id="node1", + node_type=NodeType.START, + title="Node 1", + inputs={"input1": "value1"}, + status=WorkflowNodeExecutionStatus.RUNNING, + created_at=datetime.now(UTC).replace(tzinfo=None), + ) + exec2 = WorkflowNodeExecution( + id=str(uuid4()), + node_execution_id=str(uuid4()), + workflow_id=str(uuid4()), + workflow_execution_id=workflow_run_id, + index=2, + node_id="node2", + node_type=NodeType.LLM, + title="Node 2", + inputs={"input2": "value2"}, + status=WorkflowNodeExecutionStatus.RUNNING, + created_at=datetime.now(UTC).replace(tzinfo=None), + ) + + # Save both executions + repo.save(exec1) + repo.save(exec2) + + # Verify both are cached and mapped + assert len(repo._execution_cache) == 2 + assert len(repo._workflow_execution_mapping[workflow_run_id]) == 2 + + # Test retrieval + result = repo.get_by_workflow_run(workflow_run_id) + assert len(result) == 2 + + @patch("core.repositories.celery_workflow_node_execution_repository.save_workflow_node_execution_task") + def test_ordering_functionality(self, mock_task, mock_session_factory, mock_account): + """Test ordering functionality works correctly.""" + repo = CeleryWorkflowNodeExecutionRepository( + session_factory=mock_session_factory, + user=mock_account, + app_id="test-app", + triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + ) + + # Create executions with different indices + workflow_run_id = str(uuid4()) + exec1 = WorkflowNodeExecution( + id=str(uuid4()), + node_execution_id=str(uuid4()), + workflow_id=str(uuid4()), + workflow_execution_id=workflow_run_id, + index=2, + node_id="node2", + node_type=NodeType.START, + title="Node 2", + inputs={}, + status=WorkflowNodeExecutionStatus.RUNNING, + created_at=datetime.now(UTC).replace(tzinfo=None), + ) + exec2 = WorkflowNodeExecution( + id=str(uuid4()), + node_execution_id=str(uuid4()), + workflow_id=str(uuid4()), + workflow_execution_id=workflow_run_id, + index=1, + node_id="node1", + node_type=NodeType.LLM, + title="Node 1", + inputs={}, + status=WorkflowNodeExecutionStatus.RUNNING, + created_at=datetime.now(UTC).replace(tzinfo=None), + ) + + # Save in random order + repo.save(exec1) + repo.save(exec2) + + # Test ascending order + order_config = OrderConfig(order_by=["index"], order_direction="asc") + result = repo.get_by_workflow_run(workflow_run_id, order_config) + assert len(result) == 2 + assert result[0].index == 1 + assert result[1].index == 2 + + # Test descending order + order_config = OrderConfig(order_by=["index"], order_direction="desc") + result = repo.get_by_workflow_run(workflow_run_id, order_config) + assert len(result) == 2 + assert result[0].index == 2 + assert result[1].index == 1 diff --git a/api/tests/unit_tests/core/repositories/test_factory.py b/api/tests/unit_tests/core/repositories/test_factory.py index fce4a6fb6b..5146e82e8f 100644 --- a/api/tests/unit_tests/core/repositories/test_factory.py +++ b/api/tests/unit_tests/core/repositories/test_factory.py @@ -59,7 +59,7 @@ class TestRepositoryFactory: def get_by_id(self): pass - # Create a mock interface with the same methods + # Create a mock interface class class MockInterface: def save(self): pass @@ -67,20 +67,20 @@ class TestRepositoryFactory: def get_by_id(self): pass - # Should not raise an exception + # Should not raise an exception when all methods are present DifyCoreRepositoryFactory._validate_repository_interface(MockRepository, MockInterface) def test_validate_repository_interface_missing_methods(self): """Test interface validation with missing methods.""" - # Create a mock class that doesn't implement all required methods + # Create a mock class that's missing required methods class IncompleteRepository: def save(self): pass # Missing get_by_id method - # Create a mock interface with required methods + # Create a mock interface that requires both methods class MockInterface: def save(self): pass @@ -88,57 +88,39 @@ class TestRepositoryFactory: def get_by_id(self): pass + def missing_method(self): + pass + with pytest.raises(RepositoryImportError) as exc_info: DifyCoreRepositoryFactory._validate_repository_interface(IncompleteRepository, MockInterface) assert "does not implement required methods" in str(exc_info.value) - assert "get_by_id" in str(exc_info.value) - def test_validate_constructor_signature_success(self): - """Test successful constructor signature validation.""" + def test_validate_repository_interface_with_private_methods(self): + """Test that private methods are ignored during interface validation.""" class MockRepository: - def __init__(self, session_factory, user, app_id, triggered_from): + def save(self): pass - # Should not raise an exception - DifyCoreRepositoryFactory._validate_constructor_signature( - MockRepository, ["session_factory", "user", "app_id", "triggered_from"] - ) - - def test_validate_constructor_signature_missing_params(self): - """Test constructor validation with missing parameters.""" - - class IncompleteRepository: - def __init__(self, session_factory, user): - # Missing app_id and triggered_from parameters + def _private_method(self): pass - with pytest.raises(RepositoryImportError) as exc_info: - DifyCoreRepositoryFactory._validate_constructor_signature( - IncompleteRepository, ["session_factory", "user", "app_id", "triggered_from"] - ) - assert "does not accept required parameters" in str(exc_info.value) - assert "app_id" in str(exc_info.value) - assert "triggered_from" in str(exc_info.value) - - def test_validate_constructor_signature_inspection_error(self, mocker: MockerFixture): - """Test constructor validation when inspection fails.""" - # Mock inspect.signature to raise an exception - mocker.patch("inspect.signature", side_effect=Exception("Inspection failed")) - - class MockRepository: - def __init__(self, session_factory): + # Create a mock interface with private methods + class MockInterface: + def save(self): pass - with pytest.raises(RepositoryImportError) as exc_info: - DifyCoreRepositoryFactory._validate_constructor_signature(MockRepository, ["session_factory"]) - assert "Failed to validate constructor signature" in str(exc_info.value) + def _private_method(self): + pass + + # Should not raise exception - private methods should be ignored + DifyCoreRepositoryFactory._validate_repository_interface(MockRepository, MockInterface) @patch("core.repositories.factory.dify_config") - def test_create_workflow_execution_repository_success(self, mock_config, mocker: MockerFixture): - """Test successful creation of WorkflowExecutionRepository.""" + def test_create_workflow_execution_repository_success(self, mock_config): + """Test successful WorkflowExecutionRepository creation.""" # Setup mock configuration - mock_config.WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + mock_config.CORE_WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" # Create mock dependencies mock_session_factory = MagicMock(spec=sessionmaker) @@ -146,7 +128,7 @@ class TestRepositoryFactory: app_id = "test-app-id" triggered_from = WorkflowRunTriggeredFrom.APP_RUN - # Mock the imported class to be a valid repository + # Create mock repository class and instance mock_repository_class = MagicMock() mock_repository_instance = MagicMock(spec=WorkflowExecutionRepository) mock_repository_class.return_value = mock_repository_instance @@ -155,7 +137,6 @@ class TestRepositoryFactory: with ( patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), patch.object(DifyCoreRepositoryFactory, "_validate_repository_interface"), - patch.object(DifyCoreRepositoryFactory, "_validate_constructor_signature"), ): result = DifyCoreRepositoryFactory.create_workflow_execution_repository( session_factory=mock_session_factory, @@ -177,7 +158,7 @@ class TestRepositoryFactory: def test_create_workflow_execution_repository_import_error(self, mock_config): """Test WorkflowExecutionRepository creation with import error.""" # Setup mock configuration with invalid class path - mock_config.WORKFLOW_EXECUTION_REPOSITORY = "invalid.module.InvalidClass" + mock_config.CORE_WORKFLOW_EXECUTION_REPOSITORY = "invalid.module.InvalidClass" mock_session_factory = MagicMock(spec=sessionmaker) mock_user = MagicMock(spec=Account) @@ -195,45 +176,46 @@ class TestRepositoryFactory: def test_create_workflow_execution_repository_validation_error(self, mock_config, mocker: MockerFixture): """Test WorkflowExecutionRepository creation with validation error.""" # Setup mock configuration - mock_config.WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + mock_config.CORE_WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" mock_session_factory = MagicMock(spec=sessionmaker) mock_user = MagicMock(spec=Account) - # Mock import to succeed but validation to fail + # Mock the import to succeed but validation to fail mock_repository_class = MagicMock() - with ( - patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), - patch.object( - DifyCoreRepositoryFactory, - "_validate_repository_interface", - side_effect=RepositoryImportError("Interface validation failed"), - ), - ): - with pytest.raises(RepositoryImportError) as exc_info: - DifyCoreRepositoryFactory.create_workflow_execution_repository( - session_factory=mock_session_factory, - user=mock_user, - app_id="test-app-id", - triggered_from=WorkflowRunTriggeredFrom.APP_RUN, - ) - assert "Interface validation failed" in str(exc_info.value) + mocker.patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class) + mocker.patch.object( + DifyCoreRepositoryFactory, + "_validate_repository_interface", + side_effect=RepositoryImportError("Interface validation failed"), + ) + + with pytest.raises(RepositoryImportError) as exc_info: + DifyCoreRepositoryFactory.create_workflow_execution_repository( + session_factory=mock_session_factory, + user=mock_user, + app_id="test-app-id", + triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + ) + assert "Interface validation failed" in str(exc_info.value) @patch("core.repositories.factory.dify_config") - def test_create_workflow_execution_repository_instantiation_error(self, mock_config, mocker: MockerFixture): + def test_create_workflow_execution_repository_instantiation_error(self, mock_config): """Test WorkflowExecutionRepository creation with instantiation error.""" # Setup mock configuration - mock_config.WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + mock_config.CORE_WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" mock_session_factory = MagicMock(spec=sessionmaker) mock_user = MagicMock(spec=Account) - # Mock import and validation to succeed but instantiation to fail - mock_repository_class = MagicMock(side_effect=Exception("Instantiation failed")) + # Create a mock repository class that raises exception on instantiation + mock_repository_class = MagicMock() + mock_repository_class.side_effect = Exception("Instantiation failed") + + # Mock the validation methods to succeed with ( patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), patch.object(DifyCoreRepositoryFactory, "_validate_repository_interface"), - patch.object(DifyCoreRepositoryFactory, "_validate_constructor_signature"), ): with pytest.raises(RepositoryImportError) as exc_info: DifyCoreRepositoryFactory.create_workflow_execution_repository( @@ -245,18 +227,18 @@ class TestRepositoryFactory: assert "Failed to create WorkflowExecutionRepository" in str(exc_info.value) @patch("core.repositories.factory.dify_config") - def test_create_workflow_node_execution_repository_success(self, mock_config, mocker: MockerFixture): - """Test successful creation of WorkflowNodeExecutionRepository.""" + def test_create_workflow_node_execution_repository_success(self, mock_config): + """Test successful WorkflowNodeExecutionRepository creation.""" # Setup mock configuration - mock_config.WORKFLOW_NODE_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + mock_config.CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" # Create mock dependencies mock_session_factory = MagicMock(spec=sessionmaker) mock_user = MagicMock(spec=EndUser) app_id = "test-app-id" - triggered_from = WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN + triggered_from = WorkflowNodeExecutionTriggeredFrom.SINGLE_STEP - # Mock the imported class to be a valid repository + # Create mock repository class and instance mock_repository_class = MagicMock() mock_repository_instance = MagicMock(spec=WorkflowNodeExecutionRepository) mock_repository_class.return_value = mock_repository_instance @@ -265,7 +247,6 @@ class TestRepositoryFactory: with ( patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), patch.object(DifyCoreRepositoryFactory, "_validate_repository_interface"), - patch.object(DifyCoreRepositoryFactory, "_validate_constructor_signature"), ): result = DifyCoreRepositoryFactory.create_workflow_node_execution_repository( session_factory=mock_session_factory, @@ -287,7 +268,7 @@ class TestRepositoryFactory: def test_create_workflow_node_execution_repository_import_error(self, mock_config): """Test WorkflowNodeExecutionRepository creation with import error.""" # Setup mock configuration with invalid class path - mock_config.WORKFLOW_NODE_EXECUTION_REPOSITORY = "invalid.module.InvalidClass" + mock_config.CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY = "invalid.module.InvalidClass" mock_session_factory = MagicMock(spec=sessionmaker) mock_user = MagicMock(spec=EndUser) @@ -297,28 +278,83 @@ class TestRepositoryFactory: session_factory=mock_session_factory, user=mock_user, app_id="test-app-id", - triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, + triggered_from=WorkflowNodeExecutionTriggeredFrom.SINGLE_STEP, ) assert "Cannot import repository class" in str(exc_info.value) - def test_repository_import_error_exception(self): - """Test RepositoryImportError exception.""" - error_message = "Test error message" - exception = RepositoryImportError(error_message) - assert str(exception) == error_message - assert isinstance(exception, Exception) + @patch("core.repositories.factory.dify_config") + def test_create_workflow_node_execution_repository_validation_error(self, mock_config, mocker: MockerFixture): + """Test WorkflowNodeExecutionRepository creation with validation error.""" + # Setup mock configuration + mock_config.CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + + mock_session_factory = MagicMock(spec=sessionmaker) + mock_user = MagicMock(spec=EndUser) + + # Mock the import to succeed but validation to fail + mock_repository_class = MagicMock() + mocker.patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class) + mocker.patch.object( + DifyCoreRepositoryFactory, + "_validate_repository_interface", + side_effect=RepositoryImportError("Interface validation failed"), + ) + + with pytest.raises(RepositoryImportError) as exc_info: + DifyCoreRepositoryFactory.create_workflow_node_execution_repository( + session_factory=mock_session_factory, + user=mock_user, + app_id="test-app-id", + triggered_from=WorkflowNodeExecutionTriggeredFrom.SINGLE_STEP, + ) + assert "Interface validation failed" in str(exc_info.value) @patch("core.repositories.factory.dify_config") - def test_create_with_engine_instead_of_sessionmaker(self, mock_config, mocker: MockerFixture): + def test_create_workflow_node_execution_repository_instantiation_error(self, mock_config): + """Test WorkflowNodeExecutionRepository creation with instantiation error.""" + # Setup mock configuration + mock_config.CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + + mock_session_factory = MagicMock(spec=sessionmaker) + mock_user = MagicMock(spec=EndUser) + + # Create a mock repository class that raises exception on instantiation + mock_repository_class = MagicMock() + mock_repository_class.side_effect = Exception("Instantiation failed") + + # Mock the validation methods to succeed + with ( + patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), + patch.object(DifyCoreRepositoryFactory, "_validate_repository_interface"), + ): + with pytest.raises(RepositoryImportError) as exc_info: + DifyCoreRepositoryFactory.create_workflow_node_execution_repository( + session_factory=mock_session_factory, + user=mock_user, + app_id="test-app-id", + triggered_from=WorkflowNodeExecutionTriggeredFrom.SINGLE_STEP, + ) + assert "Failed to create WorkflowNodeExecutionRepository" in str(exc_info.value) + + def test_repository_import_error_exception(self): + """Test RepositoryImportError exception handling.""" + error_message = "Custom error message" + error = RepositoryImportError(error_message) + assert str(error) == error_message + + @patch("core.repositories.factory.dify_config") + def test_create_with_engine_instead_of_sessionmaker(self, mock_config): """Test repository creation with Engine instead of sessionmaker.""" # Setup mock configuration - mock_config.WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" + mock_config.CORE_WORKFLOW_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" - # Create mock dependencies with Engine instead of sessionmaker + # Create mock dependencies using Engine instead of sessionmaker mock_engine = MagicMock(spec=Engine) mock_user = MagicMock(spec=Account) + app_id = "test-app-id" + triggered_from = WorkflowRunTriggeredFrom.APP_RUN - # Mock the imported class to be a valid repository + # Create mock repository class and instance mock_repository_class = MagicMock() mock_repository_instance = MagicMock(spec=WorkflowExecutionRepository) mock_repository_class.return_value = mock_repository_instance @@ -327,129 +363,19 @@ class TestRepositoryFactory: with ( patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), patch.object(DifyCoreRepositoryFactory, "_validate_repository_interface"), - patch.object(DifyCoreRepositoryFactory, "_validate_constructor_signature"), ): result = DifyCoreRepositoryFactory.create_workflow_execution_repository( session_factory=mock_engine, # Using Engine instead of sessionmaker user=mock_user, - app_id="test-app-id", - triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + app_id=app_id, + triggered_from=triggered_from, ) - # Verify the repository was created with the Engine + # Verify the repository was created with correct parameters mock_repository_class.assert_called_once_with( session_factory=mock_engine, user=mock_user, - app_id="test-app-id", - triggered_from=WorkflowRunTriggeredFrom.APP_RUN, + app_id=app_id, + triggered_from=triggered_from, ) assert result is mock_repository_instance - - @patch("core.repositories.factory.dify_config") - def test_create_workflow_node_execution_repository_validation_error(self, mock_config): - """Test WorkflowNodeExecutionRepository creation with validation error.""" - # Setup mock configuration - mock_config.WORKFLOW_NODE_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" - - mock_session_factory = MagicMock(spec=sessionmaker) - mock_user = MagicMock(spec=EndUser) - - # Mock import to succeed but validation to fail - mock_repository_class = MagicMock() - with ( - patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), - patch.object( - DifyCoreRepositoryFactory, - "_validate_repository_interface", - side_effect=RepositoryImportError("Interface validation failed"), - ), - ): - with pytest.raises(RepositoryImportError) as exc_info: - DifyCoreRepositoryFactory.create_workflow_node_execution_repository( - session_factory=mock_session_factory, - user=mock_user, - app_id="test-app-id", - triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, - ) - assert "Interface validation failed" in str(exc_info.value) - - @patch("core.repositories.factory.dify_config") - def test_create_workflow_node_execution_repository_instantiation_error(self, mock_config): - """Test WorkflowNodeExecutionRepository creation with instantiation error.""" - # Setup mock configuration - mock_config.WORKFLOW_NODE_EXECUTION_REPOSITORY = "unittest.mock.MagicMock" - - mock_session_factory = MagicMock(spec=sessionmaker) - mock_user = MagicMock(spec=EndUser) - - # Mock import and validation to succeed but instantiation to fail - mock_repository_class = MagicMock(side_effect=Exception("Instantiation failed")) - with ( - patch.object(DifyCoreRepositoryFactory, "_import_class", return_value=mock_repository_class), - patch.object(DifyCoreRepositoryFactory, "_validate_repository_interface"), - patch.object(DifyCoreRepositoryFactory, "_validate_constructor_signature"), - ): - with pytest.raises(RepositoryImportError) as exc_info: - DifyCoreRepositoryFactory.create_workflow_node_execution_repository( - session_factory=mock_session_factory, - user=mock_user, - app_id="test-app-id", - triggered_from=WorkflowNodeExecutionTriggeredFrom.WORKFLOW_RUN, - ) - assert "Failed to create WorkflowNodeExecutionRepository" in str(exc_info.value) - - def test_validate_repository_interface_with_private_methods(self): - """Test interface validation ignores private methods.""" - - # Create a mock class with private methods - class MockRepository: - def save(self): - pass - - def get_by_id(self): - pass - - def _private_method(self): - pass - - # Create a mock interface with private methods - class MockInterface: - def save(self): - pass - - def get_by_id(self): - pass - - def _private_method(self): - pass - - # Should not raise an exception (private methods are ignored) - DifyCoreRepositoryFactory._validate_repository_interface(MockRepository, MockInterface) - - def test_validate_constructor_signature_with_extra_params(self): - """Test constructor validation with extra parameters (should pass).""" - - class MockRepository: - def __init__(self, session_factory, user, app_id, triggered_from, extra_param=None): - pass - - # Should not raise an exception (extra parameters are allowed) - DifyCoreRepositoryFactory._validate_constructor_signature( - MockRepository, ["session_factory", "user", "app_id", "triggered_from"] - ) - - def test_validate_constructor_signature_with_kwargs(self): - """Test constructor validation with **kwargs (current implementation doesn't support this).""" - - class MockRepository: - def __init__(self, session_factory, user, **kwargs): - pass - - # Current implementation doesn't handle **kwargs, so this should raise an exception - with pytest.raises(RepositoryImportError) as exc_info: - DifyCoreRepositoryFactory._validate_constructor_signature( - MockRepository, ["session_factory", "user", "app_id", "triggered_from"] - ) - assert "does not accept required parameters" in str(exc_info.value) - assert "app_id" in str(exc_info.value) - assert "triggered_from" in str(exc_info.value) diff --git a/dev/start-worker b/dev/start-worker index 7007b265e0..66e446c831 100755 --- a/dev/start-worker +++ b/dev/start-worker @@ -8,4 +8,4 @@ cd "$SCRIPT_DIR/.." uv --directory api run \ celery -A app.celery worker \ - -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion + -P gevent -c 1 --loglevel INFO -Q dataset,generation,mail,ops_trace,app_deletion,plugin,workflow_storage diff --git a/docker/.env.example b/docker/.env.example index ed19fa6099..7a435ad66c 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -861,17 +861,23 @@ WORKFLOW_NODE_EXECUTION_STORAGE=rdbms # Repository configuration # Core workflow execution repository implementation +# Options: +# - core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository (default) +# - core.repositories.celery_workflow_execution_repository.CeleryWorkflowExecutionRepository CORE_WORKFLOW_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository # Core workflow node execution repository implementation +# Options: +# - core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository (default) +# - core.repositories.celery_workflow_node_execution_repository.CeleryWorkflowNodeExecutionRepository CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY=core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository -# API workflow node execution repository implementation -API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository - # API workflow run repository implementation API_WORKFLOW_RUN_REPOSITORY=repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository +# API workflow node execution repository implementation +API_WORKFLOW_NODE_EXECUTION_REPOSITORY=repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository + # HTTP request node in workflow configuration HTTP_REQUEST_NODE_MAX_BINARY_SIZE=10485760 HTTP_REQUEST_NODE_MAX_TEXT_SIZE=1048576 diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index 779fbf382a..101f8eb323 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -390,8 +390,8 @@ x-shared-env: &shared-api-worker-env WORKFLOW_NODE_EXECUTION_STORAGE: ${WORKFLOW_NODE_EXECUTION_STORAGE:-rdbms} CORE_WORKFLOW_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_execution_repository.SQLAlchemyWorkflowExecutionRepository} CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${CORE_WORKFLOW_NODE_EXECUTION_REPOSITORY:-core.repositories.sqlalchemy_workflow_node_execution_repository.SQLAlchemyWorkflowNodeExecutionRepository} - API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository} API_WORKFLOW_RUN_REPOSITORY: ${API_WORKFLOW_RUN_REPOSITORY:-repositories.sqlalchemy_api_workflow_run_repository.DifyAPISQLAlchemyWorkflowRunRepository} + API_WORKFLOW_NODE_EXECUTION_REPOSITORY: ${API_WORKFLOW_NODE_EXECUTION_REPOSITORY:-repositories.sqlalchemy_api_workflow_node_execution_repository.DifyAPISQLAlchemyWorkflowNodeExecutionRepository} HTTP_REQUEST_NODE_MAX_BINARY_SIZE: ${HTTP_REQUEST_NODE_MAX_BINARY_SIZE:-10485760} HTTP_REQUEST_NODE_MAX_TEXT_SIZE: ${HTTP_REQUEST_NODE_MAX_TEXT_SIZE:-1048576} HTTP_REQUEST_NODE_SSL_VERIFY: ${HTTP_REQUEST_NODE_SSL_VERIFY:-True} From ccc6d5975f6644ba2772c59592eb0b0298bfc802 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Wed, 13 Aug 2025 09:28:42 +0800 Subject: [PATCH 16/27] chore: rename misleading 'chore.yaml' issue template to 'refactor.yml' (#23847) --- .github/ISSUE_TEMPLATE/{chore.yaml => refactor.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/ISSUE_TEMPLATE/{chore.yaml => refactor.yml} (100%) diff --git a/.github/ISSUE_TEMPLATE/chore.yaml b/.github/ISSUE_TEMPLATE/refactor.yml similarity index 100% rename from .github/ISSUE_TEMPLATE/chore.yaml rename to .github/ISSUE_TEMPLATE/refactor.yml From 74ab057f5677f5efb8830f1c53f621683dc59087 Mon Sep 17 00:00:00 2001 From: -LAN- Date: Wed, 13 Aug 2025 09:46:02 +0800 Subject: [PATCH 17/27] refactor: improve Redis wrapper type hints and fix None value handling (#23845) --- api/extensions/ext_redis.py | 80 +++++++++++++++++++++++++++--- api/pyproject.toml | 1 + api/schedule/queue_monitor_task.py | 11 ++++ api/uv.lock | 17 ++++++- 4 files changed, 101 insertions(+), 8 deletions(-) diff --git a/api/extensions/ext_redis.py b/api/extensions/ext_redis.py index 14b9273e9d..914d6219cf 100644 --- a/api/extensions/ext_redis.py +++ b/api/extensions/ext_redis.py @@ -1,18 +1,23 @@ import functools import logging from collections.abc import Callable -from typing import Any, Union +from datetime import timedelta +from typing import TYPE_CHECKING, Any, Union import redis from redis import RedisError from redis.cache import CacheConfig from redis.cluster import ClusterNode, RedisCluster from redis.connection import Connection, SSLConnection +from redis.lock import Lock from redis.sentinel import Sentinel from configs import dify_config from dify_app import DifyApp +if TYPE_CHECKING: + from redis.lock import Lock + logger = logging.getLogger(__name__) @@ -28,8 +33,8 @@ class RedisClientWrapper: a failover in a Sentinel-managed Redis setup. Attributes: - _client (redis.Redis): The actual Redis client instance. It remains None until - initialized with the `initialize` method. + _client: The actual Redis client instance. It remains None until + initialized with the `initialize` method. Methods: initialize(client): Initializes the Redis client if it hasn't been initialized already. @@ -37,20 +42,78 @@ class RedisClientWrapper: if the client is not initialized. """ - def __init__(self): + _client: Union[redis.Redis, RedisCluster, None] + + def __init__(self) -> None: self._client = None - def initialize(self, client): + def initialize(self, client: Union[redis.Redis, RedisCluster]) -> None: if self._client is None: self._client = client - def __getattr__(self, item): + if TYPE_CHECKING: + # Type hints for IDE support and static analysis + # These are not executed at runtime but provide type information + def get(self, name: str | bytes) -> Any: ... + + def set( + self, + name: str | bytes, + value: Any, + ex: int | None = None, + px: int | None = None, + nx: bool = False, + xx: bool = False, + keepttl: bool = False, + get: bool = False, + exat: int | None = None, + pxat: int | None = None, + ) -> Any: ... + + def setex(self, name: str | bytes, time: int | timedelta, value: Any) -> Any: ... + def setnx(self, name: str | bytes, value: Any) -> Any: ... + def delete(self, *names: str | bytes) -> Any: ... + def incr(self, name: str | bytes, amount: int = 1) -> Any: ... + def expire( + self, + name: str | bytes, + time: int | timedelta, + nx: bool = False, + xx: bool = False, + gt: bool = False, + lt: bool = False, + ) -> Any: ... + def lock( + self, + name: str, + timeout: float | None = None, + sleep: float = 0.1, + blocking: bool = True, + blocking_timeout: float | None = None, + thread_local: bool = True, + ) -> Lock: ... + def zadd( + self, + name: str | bytes, + mapping: dict[str | bytes | int | float, float | int | str | bytes], + nx: bool = False, + xx: bool = False, + ch: bool = False, + incr: bool = False, + gt: bool = False, + lt: bool = False, + ) -> Any: ... + def zremrangebyscore(self, name: str | bytes, min: float | str, max: float | str) -> Any: ... + def zcard(self, name: str | bytes) -> Any: ... + def getdel(self, name: str | bytes) -> Any: ... + + def __getattr__(self, item: str) -> Any: if self._client is None: raise RuntimeError("Redis client is not initialized. Call init_app first.") return getattr(self._client, item) -redis_client = RedisClientWrapper() +redis_client: RedisClientWrapper = RedisClientWrapper() def init_app(app: DifyApp): @@ -80,6 +143,9 @@ def init_app(app: DifyApp): if dify_config.REDIS_USE_SENTINEL: assert dify_config.REDIS_SENTINELS is not None, "REDIS_SENTINELS must be set when REDIS_USE_SENTINEL is True" + assert dify_config.REDIS_SENTINEL_SERVICE_NAME is not None, ( + "REDIS_SENTINEL_SERVICE_NAME must be set when REDIS_USE_SENTINEL is True" + ) sentinel_hosts = [ (node.split(":")[0], int(node.split(":")[1])) for node in dify_config.REDIS_SENTINELS.split(",") ] diff --git a/api/pyproject.toml b/api/pyproject.toml index 4b395276ef..de472c870a 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -162,6 +162,7 @@ dev = [ "pandas-stubs~=2.2.3", "scipy-stubs>=1.15.3.0", "types-python-http-client>=3.3.7.20240910", + "types-redis>=4.6.0.20241004", ] ############################################################ diff --git a/api/schedule/queue_monitor_task.py b/api/schedule/queue_monitor_task.py index f0d3bed057..5868450a14 100644 --- a/api/schedule/queue_monitor_task.py +++ b/api/schedule/queue_monitor_task.py @@ -24,9 +24,20 @@ def queue_monitor_task(): queue_name = "dataset" threshold = dify_config.QUEUE_MONITOR_THRESHOLD + if threshold is None: + logging.warning(click.style("QUEUE_MONITOR_THRESHOLD is not configured, skipping monitoring", fg="yellow")) + return + try: queue_length = celery_redis.llen(f"{queue_name}") logging.info(click.style(f"Start monitor {queue_name}", fg="green")) + + if queue_length is None: + logging.error( + click.style(f"Failed to get queue length for {queue_name} - Redis may be unavailable", fg="red") + ) + return + logging.info(click.style(f"Queue length: {queue_length}", fg="green")) if queue_length >= threshold: diff --git a/api/uv.lock b/api/uv.lock index ea2c1bef5b..870975418f 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 3 +revision = 2 requires-python = ">=3.11, <3.13" resolution-markers = [ "python_full_version >= '3.12.4' and platform_python_implementation != 'PyPy' and sys_platform == 'linux'", @@ -1371,6 +1371,7 @@ dev = [ { name = "types-python-http-client" }, { name = "types-pywin32" }, { name = "types-pyyaml" }, + { name = "types-redis" }, { name = "types-regex" }, { name = "types-requests" }, { name = "types-requests-oauthlib" }, @@ -1557,6 +1558,7 @@ dev = [ { name = "types-python-http-client", specifier = ">=3.3.7.20240910" }, { name = "types-pywin32", specifier = "~=310.0.0" }, { name = "types-pyyaml", specifier = "~=6.0.12" }, + { name = "types-redis", specifier = ">=4.6.0.20241004" }, { name = "types-regex", specifier = "~=2024.11.6" }, { name = "types-requests", specifier = "~=2.32.0" }, { name = "types-requests-oauthlib", specifier = "~=2.0.0" }, @@ -6064,6 +6066,19 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/99/5f/e0af6f7f6a260d9af67e1db4f54d732abad514252a7a378a6c4d17dd1036/types_pyyaml-6.0.12.20250516-py3-none-any.whl", hash = "sha256:8478208feaeb53a34cb5d970c56a7cd76b72659442e733e268a94dc72b2d0530", size = 20312, upload-time = "2025-05-16T03:08:04.019Z" }, ] +[[package]] +name = "types-redis" +version = "4.6.0.20241004" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "cryptography" }, + { name = "types-pyopenssl" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/3a/95/c054d3ac940e8bac4ca216470c80c26688a0e79e09f520a942bb27da3386/types-redis-4.6.0.20241004.tar.gz", hash = "sha256:5f17d2b3f9091ab75384153bfa276619ffa1cf6a38da60e10d5e6749cc5b902e", size = 49679, upload-time = "2024-10-04T02:43:59.224Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/55/82/7d25dce10aad92d2226b269bce2f85cfd843b4477cd50245d7d40ecf8f89/types_redis-4.6.0.20241004-py3-none-any.whl", hash = "sha256:ef5da68cb827e5f606c8f9c0b49eeee4c2669d6d97122f301d3a55dc6a63f6ed", size = 58737, upload-time = "2024-10-04T02:43:57.968Z" }, +] + [[package]] name = "types-regex" version = "2024.11.6.20250403" From 854c1aa37d52645ad8c34ecde4d9b91de857c5d5 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 13 Aug 2025 10:15:45 +0800 Subject: [PATCH 18/27] fix: goto-anything highlighting consistency improvements (#23843) --- web/app/components/goto-anything/command-selector.tsx | 2 +- web/app/components/goto-anything/index.tsx | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/web/app/components/goto-anything/command-selector.tsx b/web/app/components/goto-anything/command-selector.tsx index 2b62c92a59..d13421a0eb 100644 --- a/web/app/components/goto-anything/command-selector.tsx +++ b/web/app/components/goto-anything/command-selector.tsx @@ -61,7 +61,7 @@ const CommandSelector: FC = ({ actions, onCommandSelect, searchFilter, co className="flex cursor-pointer items-center rounded-md p-2.5 transition-all - duration-150 hover:bg-state-base-hover-alt aria-[selected=true]:bg-state-base-hover" + duration-150 hover:bg-state-base-hover aria-[selected=true]:bg-state-base-hover-alt" onSelect={() => onCommandSelect(action.shortcut)} > diff --git a/web/app/components/goto-anything/index.tsx b/web/app/components/goto-anything/index.tsx index 2d2d56eea1..bff0773a46 100644 --- a/web/app/components/goto-anything/index.tsx +++ b/web/app/components/goto-anything/index.tsx @@ -37,6 +37,7 @@ const GotoAnything: FC = ({ const handleNavSearch = useCallback((q: string) => { setShow(true) setSearchQuery(q) + setCmdVal('') requestAnimationFrame(() => inputRef.current?.focus()) }, []) // Filter actions based on context @@ -333,7 +334,7 @@ const GotoAnything: FC = ({ handleNavigate(result)} > {result.icon} From e600070a61f350d19f87619a4314b47e75d49b14 Mon Sep 17 00:00:00 2001 From: QuantumGhost Date: Wed, 13 Aug 2025 11:13:08 +0800 Subject: [PATCH 19/27] feat(api): auto-delete WorkflowDraftVariable when app is deleted (#23737) This commit introduces a background task that automatically deletes `WorkflowDraftVariable` records when their associated workflow apps are deleted. Additionally, it adds a new cleanup script `cleanup-orphaned-draft-variables` to remove existing orphaned draft variables from the database. --- api/commands.py | 136 ++++++++++ api/extensions/ext_commands.py | 2 + api/tasks/remove_app_and_related_data_task.py | 74 +++++- api/tests/integration_tests/tasks/__init__.py | 0 .../test_remove_app_and_related_data_task.py | 214 +++++++++++++++ api/tests/unit_tests/tasks/__init__.py | 0 .../test_remove_app_and_related_data_task.py | 243 ++++++++++++++++++ 7 files changed, 665 insertions(+), 4 deletions(-) create mode 100644 api/tests/integration_tests/tasks/__init__.py create mode 100644 api/tests/integration_tests/tasks/test_remove_app_and_related_data_task.py create mode 100644 api/tests/unit_tests/tasks/__init__.py create mode 100644 api/tests/unit_tests/tasks/test_remove_app_and_related_data_task.py diff --git a/api/commands.py b/api/commands.py index 8ee52ba716..6b38e34b9b 100644 --- a/api/commands.py +++ b/api/commands.py @@ -36,6 +36,7 @@ from services.account_service import AccountService, RegisterService, TenantServ from services.clear_free_plan_tenant_expired_logs import ClearFreePlanTenantExpiredLogs from services.plugin.data_migration import PluginDataMigration from services.plugin.plugin_migration import PluginMigration +from tasks.remove_app_and_related_data_task import delete_draft_variables_batch @click.command("reset-password", help="Reset the account password.") @@ -1202,3 +1203,138 @@ def setup_system_tool_oauth_client(provider, client_params): db.session.add(oauth_client) db.session.commit() click.echo(click.style(f"OAuth client params setup successfully. id: {oauth_client.id}", fg="green")) + + +def _find_orphaned_draft_variables(batch_size: int = 1000) -> list[str]: + """ + Find draft variables that reference non-existent apps. + + Args: + batch_size: Maximum number of orphaned app IDs to return + + Returns: + List of app IDs that have draft variables but don't exist in the apps table + """ + query = """ + SELECT DISTINCT wdv.app_id + FROM workflow_draft_variables AS wdv + WHERE NOT EXISTS( + SELECT 1 FROM apps WHERE apps.id = wdv.app_id + ) + LIMIT :batch_size + """ + + with db.engine.connect() as conn: + result = conn.execute(sa.text(query), {"batch_size": batch_size}) + return [row[0] for row in result] + + +def _count_orphaned_draft_variables() -> dict[str, Any]: + """ + Count orphaned draft variables by app. + + Returns: + Dictionary with statistics about orphaned variables + """ + query = """ + SELECT + wdv.app_id, + COUNT(*) as variable_count + FROM workflow_draft_variables AS wdv + WHERE NOT EXISTS( + SELECT 1 FROM apps WHERE apps.id = wdv.app_id + ) + GROUP BY wdv.app_id + ORDER BY variable_count DESC + """ + + with db.engine.connect() as conn: + result = conn.execute(sa.text(query)) + orphaned_by_app = {row[0]: row[1] for row in result} + + total_orphaned = sum(orphaned_by_app.values()) + app_count = len(orphaned_by_app) + + return { + "total_orphaned_variables": total_orphaned, + "orphaned_app_count": app_count, + "orphaned_by_app": orphaned_by_app, + } + + +@click.command() +@click.option("--dry-run", is_flag=True, help="Show what would be deleted without actually deleting") +@click.option("--batch-size", default=1000, help="Number of records to process per batch (default 1000)") +@click.option("--max-apps", default=None, type=int, help="Maximum number of apps to process (default: no limit)") +@click.option("-f", "--force", is_flag=True, help="Skip user confirmation and force the command to execute.") +def cleanup_orphaned_draft_variables( + dry_run: bool, + batch_size: int, + max_apps: int | None, + force: bool = False, +): + """ + Clean up orphaned draft variables from the database. + + This script finds and removes draft variables that belong to apps + that no longer exist in the database. + """ + logger = logging.getLogger(__name__) + + # Get statistics + stats = _count_orphaned_draft_variables() + + logger.info("Found %s orphaned draft variables", stats["total_orphaned_variables"]) + logger.info("Across %s non-existent apps", stats["orphaned_app_count"]) + + if stats["total_orphaned_variables"] == 0: + logger.info("No orphaned draft variables found. Exiting.") + return + + if dry_run: + logger.info("DRY RUN: Would delete the following:") + for app_id, count in sorted(stats["orphaned_by_app"].items(), key=lambda x: x[1], reverse=True)[ + :10 + ]: # Show top 10 + logger.info(" App %s: %s variables", app_id, count) + if len(stats["orphaned_by_app"]) > 10: + logger.info(" ... and %s more apps", len(stats["orphaned_by_app"]) - 10) + return + + # Confirm deletion + if not force: + click.confirm( + f"Are you sure you want to delete {stats['total_orphaned_variables']} " + f"orphaned draft variables from {stats['orphaned_app_count']} apps?", + abort=True, + ) + + total_deleted = 0 + processed_apps = 0 + + while True: + if max_apps and processed_apps >= max_apps: + logger.info("Reached maximum app limit (%s). Stopping.", max_apps) + break + + orphaned_app_ids = _find_orphaned_draft_variables(batch_size=10) + if not orphaned_app_ids: + logger.info("No more orphaned draft variables found.") + break + + for app_id in orphaned_app_ids: + if max_apps and processed_apps >= max_apps: + break + + try: + deleted_count = delete_draft_variables_batch(app_id, batch_size) + total_deleted += deleted_count + processed_apps += 1 + + logger.info("Deleted %s variables for app %s", deleted_count, app_id) + + except Exception: + logger.exception("Error processing app %s", app_id) + continue + + logger.info("Cleanup completed. Total deleted: %s variables across %s apps", total_deleted, processed_apps) diff --git a/api/extensions/ext_commands.py b/api/extensions/ext_commands.py index 600e336c19..8904ff7a92 100644 --- a/api/extensions/ext_commands.py +++ b/api/extensions/ext_commands.py @@ -4,6 +4,7 @@ from dify_app import DifyApp def init_app(app: DifyApp): from commands import ( add_qdrant_index, + cleanup_orphaned_draft_variables, clear_free_plan_tenant_expired_logs, clear_orphaned_file_records, convert_to_agent_apps, @@ -42,6 +43,7 @@ def init_app(app: DifyApp): clear_orphaned_file_records, remove_orphaned_files_on_storage, setup_system_tool_oauth_client, + cleanup_orphaned_draft_variables, ] for cmd in cmds_to_register: app.cli.add_command(cmd) diff --git a/api/tasks/remove_app_and_related_data_task.py b/api/tasks/remove_app_and_related_data_task.py index 929b60e529..828c52044f 100644 --- a/api/tasks/remove_app_and_related_data_task.py +++ b/api/tasks/remove_app_and_related_data_task.py @@ -33,7 +33,11 @@ from models import ( ) from models.tools import WorkflowToolProvider from models.web import PinnedConversation, SavedMessage -from models.workflow import ConversationVariable, Workflow, WorkflowAppLog +from models.workflow import ( + ConversationVariable, + Workflow, + WorkflowAppLog, +) from repositories.factory import DifyAPIRepositoryFactory @@ -62,6 +66,7 @@ def remove_app_and_related_data_task(self, tenant_id: str, app_id: str): _delete_end_users(tenant_id, app_id) _delete_trace_app_configs(tenant_id, app_id) _delete_conversation_variables(app_id=app_id) + _delete_draft_variables(app_id) end_at = time.perf_counter() logging.info(click.style(f"App and related data deleted: {app_id} latency: {end_at - start_at}", fg="green")) @@ -91,7 +96,12 @@ def _delete_app_site(tenant_id: str, app_id: str): def del_site(site_id: str): db.session.query(Site).where(Site.id == site_id).delete(synchronize_session=False) - _delete_records("""select id from sites where app_id=:app_id limit 1000""", {"app_id": app_id}, del_site, "site") + _delete_records( + """select id from sites where app_id=:app_id limit 1000""", + {"app_id": app_id}, + del_site, + "site", + ) def _delete_app_mcp_servers(tenant_id: str, app_id: str): @@ -111,7 +121,10 @@ def _delete_app_api_tokens(tenant_id: str, app_id: str): db.session.query(ApiToken).where(ApiToken.id == api_token_id).delete(synchronize_session=False) _delete_records( - """select id from api_tokens where app_id=:app_id limit 1000""", {"app_id": app_id}, del_api_token, "api token" + """select id from api_tokens where app_id=:app_id limit 1000""", + {"app_id": app_id}, + del_api_token, + "api token", ) @@ -273,7 +286,10 @@ def _delete_app_messages(tenant_id: str, app_id: str): db.session.query(Message).where(Message.id == message_id).delete() _delete_records( - """select id from messages where app_id=:app_id limit 1000""", {"app_id": app_id}, del_message, "message" + """select id from messages where app_id=:app_id limit 1000""", + {"app_id": app_id}, + del_message, + "message", ) @@ -329,6 +345,56 @@ def _delete_trace_app_configs(tenant_id: str, app_id: str): ) +def _delete_draft_variables(app_id: str): + """Delete all workflow draft variables for an app in batches.""" + return delete_draft_variables_batch(app_id, batch_size=1000) + + +def delete_draft_variables_batch(app_id: str, batch_size: int = 1000) -> int: + """ + Delete draft variables for an app in batches. + + Args: + app_id: The ID of the app whose draft variables should be deleted + batch_size: Number of records to delete per batch + + Returns: + Total number of records deleted + """ + if batch_size <= 0: + raise ValueError("batch_size must be positive") + + total_deleted = 0 + + while True: + with db.engine.begin() as conn: + # Get a batch of draft variable IDs + query_sql = """ + SELECT id FROM workflow_draft_variables + WHERE app_id = :app_id + LIMIT :batch_size + """ + result = conn.execute(sa.text(query_sql), {"app_id": app_id, "batch_size": batch_size}) + + draft_var_ids = [row[0] for row in result] + if not draft_var_ids: + break + + # Delete the batch + delete_sql = """ + DELETE FROM workflow_draft_variables + WHERE id IN :ids + """ + deleted_result = conn.execute(sa.text(delete_sql), {"ids": tuple(draft_var_ids)}) + batch_deleted = deleted_result.rowcount + total_deleted += batch_deleted + + logging.info(click.style(f"Deleted {batch_deleted} draft variables (batch) for app {app_id}", fg="green")) + + logging.info(click.style(f"Deleted {total_deleted} total draft variables for app {app_id}", fg="green")) + return total_deleted + + def _delete_records(query_sql: str, params: dict, delete_func: Callable, name: str) -> None: while True: with db.engine.begin() as conn: diff --git a/api/tests/integration_tests/tasks/__init__.py b/api/tests/integration_tests/tasks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/integration_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/integration_tests/tasks/test_remove_app_and_related_data_task.py new file mode 100644 index 0000000000..2f7fc60ada --- /dev/null +++ b/api/tests/integration_tests/tasks/test_remove_app_and_related_data_task.py @@ -0,0 +1,214 @@ +import uuid + +import pytest +from sqlalchemy import delete + +from core.variables.segments import StringSegment +from models import Tenant, db +from models.model import App +from models.workflow import WorkflowDraftVariable +from tasks.remove_app_and_related_data_task import _delete_draft_variables, delete_draft_variables_batch + + +@pytest.fixture +def app_and_tenant(flask_req_ctx): + tenant_id = uuid.uuid4() + tenant = Tenant( + id=tenant_id, + name="test_tenant", + ) + db.session.add(tenant) + + app = App( + tenant_id=tenant_id, # Now tenant.id will have a value + name=f"Test App for tenant {tenant.id}", + mode="workflow", + enable_site=True, + enable_api=True, + ) + db.session.add(app) + db.session.flush() + yield (tenant, app) + + # Cleanup with proper error handling + db.session.delete(app) + db.session.delete(tenant) + + +class TestDeleteDraftVariablesIntegration: + @pytest.fixture + def setup_test_data(self, app_and_tenant): + """Create test data with apps and draft variables.""" + tenant, app = app_and_tenant + + # Create a second app for testing + app2 = App( + tenant_id=tenant.id, + name="Test App 2", + mode="workflow", + enable_site=True, + enable_api=True, + ) + db.session.add(app2) + db.session.commit() + + # Create draft variables for both apps + variables_app1 = [] + variables_app2 = [] + + for i in range(5): + var1 = WorkflowDraftVariable.new_node_variable( + app_id=app.id, + node_id=f"node_{i}", + name=f"var_{i}", + value=StringSegment(value="test_value"), + node_execution_id=str(uuid.uuid4()), + ) + db.session.add(var1) + variables_app1.append(var1) + + var2 = WorkflowDraftVariable.new_node_variable( + app_id=app2.id, + node_id=f"node_{i}", + name=f"var_{i}", + value=StringSegment(value="test_value"), + node_execution_id=str(uuid.uuid4()), + ) + db.session.add(var2) + variables_app2.append(var2) + + # Commit all the variables to the database + db.session.commit() + + yield { + "app1": app, + "app2": app2, + "tenant": tenant, + "variables_app1": variables_app1, + "variables_app2": variables_app2, + } + + # Cleanup - refresh session and check if objects still exist + db.session.rollback() # Clear any pending changes + + # Clean up remaining variables + cleanup_query = ( + delete(WorkflowDraftVariable) + .where( + WorkflowDraftVariable.app_id.in_([app.id, app2.id]), + ) + .execution_options(synchronize_session=False) + ) + db.session.execute(cleanup_query) + + # Clean up app2 + app2_obj = db.session.get(App, app2.id) + if app2_obj: + db.session.delete(app2_obj) + + db.session.commit() + + def test_delete_draft_variables_batch_removes_correct_variables(self, setup_test_data): + """Test that batch deletion only removes variables for the specified app.""" + data = setup_test_data + app1_id = data["app1"].id + app2_id = data["app2"].id + + # Verify initial state + app1_vars_before = db.session.query(WorkflowDraftVariable).filter_by(app_id=app1_id).count() + app2_vars_before = db.session.query(WorkflowDraftVariable).filter_by(app_id=app2_id).count() + assert app1_vars_before == 5 + assert app2_vars_before == 5 + + # Delete app1 variables + deleted_count = delete_draft_variables_batch(app1_id, batch_size=10) + + # Verify results + assert deleted_count == 5 + + app1_vars_after = db.session.query(WorkflowDraftVariable).filter_by(app_id=app1_id).count() + app2_vars_after = db.session.query(WorkflowDraftVariable).filter_by(app_id=app2_id).count() + + assert app1_vars_after == 0 # All app1 variables deleted + assert app2_vars_after == 5 # App2 variables unchanged + + def test_delete_draft_variables_batch_with_small_batch_size(self, setup_test_data): + """Test batch deletion with small batch size processes all records.""" + data = setup_test_data + app1_id = data["app1"].id + + # Use small batch size to force multiple batches + deleted_count = delete_draft_variables_batch(app1_id, batch_size=2) + + assert deleted_count == 5 + + # Verify all variables are deleted + remaining_vars = db.session.query(WorkflowDraftVariable).filter_by(app_id=app1_id).count() + assert remaining_vars == 0 + + def test_delete_draft_variables_batch_nonexistent_app(self, setup_test_data): + """Test that deleting variables for nonexistent app returns 0.""" + nonexistent_app_id = str(uuid.uuid4()) # Use a valid UUID format + + deleted_count = delete_draft_variables_batch(nonexistent_app_id, batch_size=100) + + assert deleted_count == 0 + + def test_delete_draft_variables_wrapper_function(self, setup_test_data): + """Test that _delete_draft_variables wrapper function works correctly.""" + data = setup_test_data + app1_id = data["app1"].id + + # Verify initial state + vars_before = db.session.query(WorkflowDraftVariable).filter_by(app_id=app1_id).count() + assert vars_before == 5 + + # Call wrapper function + deleted_count = _delete_draft_variables(app1_id) + + # Verify results + assert deleted_count == 5 + + vars_after = db.session.query(WorkflowDraftVariable).filter_by(app_id=app1_id).count() + assert vars_after == 0 + + def test_batch_deletion_handles_large_dataset(self, app_and_tenant): + """Test batch deletion with larger dataset to verify batching logic.""" + tenant, app = app_and_tenant + + # Create many draft variables + variables = [] + for i in range(25): + var = WorkflowDraftVariable.new_node_variable( + app_id=app.id, + node_id=f"node_{i}", + name=f"var_{i}", + value=StringSegment(value="test_value"), + node_execution_id=str(uuid.uuid4()), + ) + db.session.add(var) + variables.append(var) + variable_ids = [i.id for i in variables] + + # Commit the variables to the database + db.session.commit() + + try: + # Use small batch size to force multiple batches + deleted_count = delete_draft_variables_batch(app.id, batch_size=8) + + assert deleted_count == 25 + + # Verify all variables are deleted + remaining_vars = db.session.query(WorkflowDraftVariable).filter_by(app_id=app.id).count() + assert remaining_vars == 0 + + finally: + query = ( + delete(WorkflowDraftVariable) + .where( + WorkflowDraftVariable.id.in_(variable_ids), + ) + .execution_options(synchronize_session=False) + ) + db.session.execute(query) diff --git a/api/tests/unit_tests/tasks/__init__.py b/api/tests/unit_tests/tasks/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/api/tests/unit_tests/tasks/test_remove_app_and_related_data_task.py b/api/tests/unit_tests/tasks/test_remove_app_and_related_data_task.py new file mode 100644 index 0000000000..d8003570b5 --- /dev/null +++ b/api/tests/unit_tests/tasks/test_remove_app_and_related_data_task.py @@ -0,0 +1,243 @@ +from unittest.mock import ANY, MagicMock, call, patch + +import pytest +import sqlalchemy as sa + +from tasks.remove_app_and_related_data_task import _delete_draft_variables, delete_draft_variables_batch + + +class TestDeleteDraftVariablesBatch: + @patch("tasks.remove_app_and_related_data_task.db") + def test_delete_draft_variables_batch_success(self, mock_db): + """Test successful deletion of draft variables in batches.""" + app_id = "test-app-id" + batch_size = 100 + + # Mock database connection and engine + mock_conn = MagicMock() + mock_engine = MagicMock() + mock_db.engine = mock_engine + # Properly mock the context manager + mock_context_manager = MagicMock() + mock_context_manager.__enter__.return_value = mock_conn + mock_context_manager.__exit__.return_value = None + mock_engine.begin.return_value = mock_context_manager + + # Mock two batches of results, then empty + batch1_ids = [f"var-{i}" for i in range(100)] + batch2_ids = [f"var-{i}" for i in range(100, 150)] + + # Setup side effects for execute calls in the correct order: + # 1. SELECT (returns batch1_ids) + # 2. DELETE (returns result with rowcount=100) + # 3. SELECT (returns batch2_ids) + # 4. DELETE (returns result with rowcount=50) + # 5. SELECT (returns empty, ends loop) + + # Create mock results with actual integer rowcount attributes + class MockResult: + def __init__(self, rowcount): + self.rowcount = rowcount + + # First SELECT result + select_result1 = MagicMock() + select_result1.__iter__.return_value = iter([(id_,) for id_ in batch1_ids]) + + # First DELETE result + delete_result1 = MockResult(rowcount=100) + + # Second SELECT result + select_result2 = MagicMock() + select_result2.__iter__.return_value = iter([(id_,) for id_ in batch2_ids]) + + # Second DELETE result + delete_result2 = MockResult(rowcount=50) + + # Third SELECT result (empty, ends loop) + select_result3 = MagicMock() + select_result3.__iter__.return_value = iter([]) + + # Configure side effects in the correct order + mock_conn.execute.side_effect = [ + select_result1, # First SELECT + delete_result1, # First DELETE + select_result2, # Second SELECT + delete_result2, # Second DELETE + select_result3, # Third SELECT (empty) + ] + + # Execute the function + result = delete_draft_variables_batch(app_id, batch_size) + + # Verify the result + assert result == 150 + + # Verify database calls + assert mock_conn.execute.call_count == 5 # 3 selects + 2 deletes + + # Verify the expected calls in order: + # 1. SELECT, 2. DELETE, 3. SELECT, 4. DELETE, 5. SELECT + expected_calls = [ + # First SELECT + call( + sa.text(""" + SELECT id FROM workflow_draft_variables + WHERE app_id = :app_id + LIMIT :batch_size + """), + {"app_id": app_id, "batch_size": batch_size}, + ), + # First DELETE + call( + sa.text(""" + DELETE FROM workflow_draft_variables + WHERE id IN :ids + """), + {"ids": tuple(batch1_ids)}, + ), + # Second SELECT + call( + sa.text(""" + SELECT id FROM workflow_draft_variables + WHERE app_id = :app_id + LIMIT :batch_size + """), + {"app_id": app_id, "batch_size": batch_size}, + ), + # Second DELETE + call( + sa.text(""" + DELETE FROM workflow_draft_variables + WHERE id IN :ids + """), + {"ids": tuple(batch2_ids)}, + ), + # Third SELECT (empty result) + call( + sa.text(""" + SELECT id FROM workflow_draft_variables + WHERE app_id = :app_id + LIMIT :batch_size + """), + {"app_id": app_id, "batch_size": batch_size}, + ), + ] + + # Check that all calls were made correctly + actual_calls = mock_conn.execute.call_args_list + assert len(actual_calls) == len(expected_calls) + + # Simplified verification - just check that the right number of calls were made + # and that the SQL queries contain the expected patterns + for i, actual_call in enumerate(actual_calls): + if i % 2 == 0: # SELECT calls (even indices: 0, 2, 4) + # Verify it's a SELECT query + sql_text = str(actual_call[0][0]) + assert "SELECT id FROM workflow_draft_variables" in sql_text + assert "WHERE app_id = :app_id" in sql_text + assert "LIMIT :batch_size" in sql_text + else: # DELETE calls (odd indices: 1, 3) + # Verify it's a DELETE query + sql_text = str(actual_call[0][0]) + assert "DELETE FROM workflow_draft_variables" in sql_text + assert "WHERE id IN :ids" in sql_text + + @patch("tasks.remove_app_and_related_data_task.db") + def test_delete_draft_variables_batch_empty_result(self, mock_db): + """Test deletion when no draft variables exist for the app.""" + app_id = "nonexistent-app-id" + batch_size = 1000 + + # Mock database connection + mock_conn = MagicMock() + mock_engine = MagicMock() + mock_db.engine = mock_engine + # Properly mock the context manager + mock_context_manager = MagicMock() + mock_context_manager.__enter__.return_value = mock_conn + mock_context_manager.__exit__.return_value = None + mock_engine.begin.return_value = mock_context_manager + + # Mock empty result + empty_result = MagicMock() + empty_result.__iter__.return_value = iter([]) + mock_conn.execute.return_value = empty_result + + result = delete_draft_variables_batch(app_id, batch_size) + + assert result == 0 + assert mock_conn.execute.call_count == 1 # Only one select query + + def test_delete_draft_variables_batch_invalid_batch_size(self): + """Test that invalid batch size raises ValueError.""" + app_id = "test-app-id" + + with pytest.raises(ValueError, match="batch_size must be positive"): + delete_draft_variables_batch(app_id, -1) + + with pytest.raises(ValueError, match="batch_size must be positive"): + delete_draft_variables_batch(app_id, 0) + + @patch("tasks.remove_app_and_related_data_task.db") + @patch("tasks.remove_app_and_related_data_task.logging") + def test_delete_draft_variables_batch_logs_progress(self, mock_logging, mock_db): + """Test that batch deletion logs progress correctly.""" + app_id = "test-app-id" + batch_size = 50 + + # Mock database + mock_conn = MagicMock() + mock_engine = MagicMock() + mock_db.engine = mock_engine + # Properly mock the context manager + mock_context_manager = MagicMock() + mock_context_manager.__enter__.return_value = mock_conn + mock_context_manager.__exit__.return_value = None + mock_engine.begin.return_value = mock_context_manager + + # Mock one batch then empty + batch_ids = [f"var-{i}" for i in range(30)] + # Create properly configured mocks + select_result = MagicMock() + select_result.__iter__.return_value = iter([(id_,) for id_ in batch_ids]) + + # Create simple object with rowcount attribute + class MockResult: + def __init__(self, rowcount): + self.rowcount = rowcount + + delete_result = MockResult(rowcount=30) + + empty_result = MagicMock() + empty_result.__iter__.return_value = iter([]) + + mock_conn.execute.side_effect = [ + # Select query result + select_result, + # Delete query result + delete_result, + # Empty select result (end condition) + empty_result, + ] + + result = delete_draft_variables_batch(app_id, batch_size) + + assert result == 30 + + # Verify logging calls + assert mock_logging.info.call_count == 2 + mock_logging.info.assert_any_call( + ANY # click.style call + ) + + @patch("tasks.remove_app_and_related_data_task.delete_draft_variables_batch") + def test_delete_draft_variables_calls_batch_function(self, mock_batch_delete): + """Test that _delete_draft_variables calls the batch function correctly.""" + app_id = "test-app-id" + expected_return = 42 + mock_batch_delete.return_value = expected_return + + result = _delete_draft_variables(app_id) + + assert result == expected_return + mock_batch_delete.assert_called_once_with(app_id, batch_size=1000) From ad2bc7f8ac6989d59f488f09c023c32b3de080e2 Mon Sep 17 00:00:00 2001 From: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Date: Wed, 13 Aug 2025 11:20:40 +0800 Subject: [PATCH 20/27] fix: update modal component to use relative positioning (#23855) --- web/app/components/base/modal/index.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/web/app/components/base/modal/index.tsx b/web/app/components/base/modal/index.tsx index bb23bc3746..426953261e 100644 --- a/web/app/components/base/modal/index.tsx +++ b/web/app/components/base/modal/index.tsx @@ -52,7 +52,7 @@ export default function Modal({
Date: Wed, 13 Aug 2025 11:21:32 +0800 Subject: [PATCH 21/27] Add more comprehensive Test Containers Based Tests for advanced prompt service (#23850) --- .../test_advanced_prompt_template_service.py | 885 ++++++++++++++ .../services/test_agent_service.py | 1033 +++++++++++++++++ 2 files changed, 1918 insertions(+) create mode 100644 api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py create mode 100644 api/tests/test_containers_integration_tests/services/test_agent_service.py diff --git a/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py b/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py new file mode 100644 index 0000000000..9ed9008af9 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_advanced_prompt_template_service.py @@ -0,0 +1,885 @@ +import copy + +import pytest +from faker import Faker + +from core.prompt.prompt_templates.advanced_prompt_templates import ( + BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG, + BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG, + BAICHUAN_COMPLETION_APP_CHAT_PROMPT_CONFIG, + BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG, + BAICHUAN_CONTEXT, + CHAT_APP_CHAT_PROMPT_CONFIG, + CHAT_APP_COMPLETION_PROMPT_CONFIG, + COMPLETION_APP_CHAT_PROMPT_CONFIG, + COMPLETION_APP_COMPLETION_PROMPT_CONFIG, + CONTEXT, +) +from models.model import AppMode +from services.advanced_prompt_template_service import AdvancedPromptTemplateService + + +class TestAdvancedPromptTemplateService: + """Integration tests for AdvancedPromptTemplateService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + # This service doesn't have external dependencies, but we keep the pattern + # for consistency with other test files + return {} + + def test_get_prompt_baichuan_model_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful prompt generation for Baichuan model. + + This test verifies: + - Proper prompt generation for Baichuan models + - Correct model detection logic + - Appropriate prompt template selection + """ + fake = Faker() + + # Test data for Baichuan model + args = { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "baichuan-13b-chat", + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + + # Verify context is included for Baichuan model + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert BAICHUAN_CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + assert "{{#histories#}}" in prompt_text + assert "{{#query#}}" in prompt_text + + def test_get_prompt_common_model_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful prompt generation for common models. + + This test verifies: + - Proper prompt generation for non-Baichuan models + - Correct model detection logic + - Appropriate prompt template selection + """ + fake = Faker() + + # Test data for common model + args = { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "gpt-3.5-turbo", + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + + # Verify context is included for common model + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + assert "{{#histories#}}" in prompt_text + assert "{{#query#}}" in prompt_text + + def test_get_prompt_case_insensitive_baichuan_detection( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan model detection is case insensitive. + + This test verifies: + - Model name detection works regardless of case + - Proper prompt template selection for different case variations + """ + fake = Faker() + + # Test different case variations + test_cases = ["Baichuan-13B-Chat", "BAICHUAN-13B-CHAT", "baichuan-13b-chat", "BaiChuan-13B-Chat"] + + for model_name in test_cases: + args = { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": model_name, + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify Baichuan template is used + assert result is not None + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert BAICHUAN_CONTEXT in prompt_text + + def test_get_common_prompt_chat_app_completion_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test common prompt generation for chat app with completion mode. + + This test verifies: + - Correct prompt template selection for chat app + completion mode + - Proper context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "completion", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + assert "conversation_histories_role" in result["completion_prompt_config"] + assert "stop" in result + + # Verify context is included + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + assert "{{#histories#}}" in prompt_text + assert "{{#query#}}" in prompt_text + + def test_get_common_prompt_chat_app_chat_mode(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test common prompt generation for chat app with chat mode. + + This test verifies: + - Correct prompt template selection for chat app + chat mode + - Proper context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "chat", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "chat_prompt_config" in result + assert "prompt" in result["chat_prompt_config"] + assert len(result["chat_prompt_config"]["prompt"]) > 0 + assert "role" in result["chat_prompt_config"]["prompt"][0] + assert "text" in result["chat_prompt_config"]["prompt"][0] + + # Verify context is included + prompt_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + + def test_get_common_prompt_completion_app_completion_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test common prompt generation for completion app with completion mode. + + This test verifies: + - Correct prompt template selection for completion app + completion mode + - Proper context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt(AppMode.COMPLETION.value, "completion", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + assert "stop" in result + + # Verify context is included + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + + def test_get_common_prompt_completion_app_chat_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test common prompt generation for completion app with chat mode. + + This test verifies: + - Correct prompt template selection for completion app + chat mode + - Proper context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt(AppMode.COMPLETION.value, "chat", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "chat_prompt_config" in result + assert "prompt" in result["chat_prompt_config"] + assert len(result["chat_prompt_config"]["prompt"]) > 0 + assert "role" in result["chat_prompt_config"]["prompt"][0] + assert "text" in result["chat_prompt_config"]["prompt"][0] + + # Verify context is included + prompt_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + + def test_get_common_prompt_no_context(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test common prompt generation without context. + + This test verifies: + - Correct handling when has_context is "false" + - Context is not included in prompt + - Template structure remains intact + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "completion", "false") + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + + # Verify context is NOT included + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert CONTEXT not in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + assert "{{#histories#}}" in prompt_text + assert "{{#query#}}" in prompt_text + + def test_get_common_prompt_unsupported_app_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test common prompt generation with unsupported app mode. + + This test verifies: + - Proper handling of unsupported app modes + - Default empty dict return + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt("unsupported_mode", "completion", "true") + + # Assert: Verify empty dict is returned + assert result == {} + + def test_get_common_prompt_unsupported_model_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test common prompt generation with unsupported model mode. + + This test verifies: + - Proper handling of unsupported model modes + - Default empty dict return + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_common_prompt(AppMode.CHAT.value, "unsupported_mode", "true") + + # Assert: Verify empty dict is returned + assert result == {} + + def test_get_completion_prompt_with_context(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test completion prompt generation with context. + + This test verifies: + - Proper context integration in completion prompts + - Template structure preservation + - Context placement at the beginning + """ + fake = Faker() + + # Create test prompt template + prompt_template = copy.deepcopy(CHAT_APP_COMPLETION_PROMPT_CONFIG) + original_text = prompt_template["completion_prompt_config"]["prompt"]["text"] + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_completion_prompt(prompt_template, "true", CONTEXT) + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + + # Verify context is prepended to original text + result_text = result["completion_prompt_config"]["prompt"]["text"] + assert result_text.startswith(CONTEXT) + assert original_text in result_text + assert result_text == CONTEXT + original_text + + def test_get_completion_prompt_without_context( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test completion prompt generation without context. + + This test verifies: + - Original template is preserved when no context + - No modification to prompt text + """ + fake = Faker() + + # Create test prompt template + prompt_template = copy.deepcopy(CHAT_APP_COMPLETION_PROMPT_CONFIG) + original_text = prompt_template["completion_prompt_config"]["prompt"]["text"] + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_completion_prompt(prompt_template, "false", CONTEXT) + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + + # Verify original text is unchanged + result_text = result["completion_prompt_config"]["prompt"]["text"] + assert result_text == original_text + assert CONTEXT not in result_text + + def test_get_chat_prompt_with_context(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test chat prompt generation with context. + + This test verifies: + - Proper context integration in chat prompts + - Template structure preservation + - Context placement at the beginning of first message + """ + fake = Faker() + + # Create test prompt template + prompt_template = copy.deepcopy(CHAT_APP_CHAT_PROMPT_CONFIG) + original_text = prompt_template["chat_prompt_config"]["prompt"][0]["text"] + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_chat_prompt(prompt_template, "true", CONTEXT) + + # Assert: Verify the expected outcomes + assert result is not None + assert "chat_prompt_config" in result + assert "prompt" in result["chat_prompt_config"] + assert len(result["chat_prompt_config"]["prompt"]) > 0 + assert "text" in result["chat_prompt_config"]["prompt"][0] + + # Verify context is prepended to original text + result_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert result_text.startswith(CONTEXT) + assert original_text in result_text + assert result_text == CONTEXT + original_text + + def test_get_chat_prompt_without_context(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test chat prompt generation without context. + + This test verifies: + - Original template is preserved when no context + - No modification to prompt text + """ + fake = Faker() + + # Create test prompt template + prompt_template = copy.deepcopy(CHAT_APP_CHAT_PROMPT_CONFIG) + original_text = prompt_template["chat_prompt_config"]["prompt"][0]["text"] + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_chat_prompt(prompt_template, "false", CONTEXT) + + # Assert: Verify the expected outcomes + assert result is not None + assert "chat_prompt_config" in result + assert "prompt" in result["chat_prompt_config"] + assert len(result["chat_prompt_config"]["prompt"]) > 0 + assert "text" in result["chat_prompt_config"]["prompt"][0] + + # Verify original text is unchanged + result_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert result_text == original_text + assert CONTEXT not in result_text + + def test_get_baichuan_prompt_chat_app_completion_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan prompt generation for chat app with completion mode. + + This test verifies: + - Correct Baichuan prompt template selection for chat app + completion mode + - Proper Baichuan context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "completion", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + assert "conversation_histories_role" in result["completion_prompt_config"] + assert "stop" in result + + # Verify Baichuan context is included + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert BAICHUAN_CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + assert "{{#histories#}}" in prompt_text + assert "{{#query#}}" in prompt_text + + def test_get_baichuan_prompt_chat_app_chat_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan prompt generation for chat app with chat mode. + + This test verifies: + - Correct Baichuan prompt template selection for chat app + chat mode + - Proper Baichuan context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "chat", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "chat_prompt_config" in result + assert "prompt" in result["chat_prompt_config"] + assert len(result["chat_prompt_config"]["prompt"]) > 0 + assert "role" in result["chat_prompt_config"]["prompt"][0] + assert "text" in result["chat_prompt_config"]["prompt"][0] + + # Verify Baichuan context is included + prompt_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert BAICHUAN_CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + + def test_get_baichuan_prompt_completion_app_completion_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan prompt generation for completion app with completion mode. + + This test verifies: + - Correct Baichuan prompt template selection for completion app + completion mode + - Proper Baichuan context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.COMPLETION.value, "completion", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + assert "stop" in result + + # Verify Baichuan context is included + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert BAICHUAN_CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + + def test_get_baichuan_prompt_completion_app_chat_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan prompt generation for completion app with chat mode. + + This test verifies: + - Correct Baichuan prompt template selection for completion app + chat mode + - Proper Baichuan context integration + - Template structure validation + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.COMPLETION.value, "chat", "true") + + # Assert: Verify the expected outcomes + assert result is not None + assert "chat_prompt_config" in result + assert "prompt" in result["chat_prompt_config"] + assert len(result["chat_prompt_config"]["prompt"]) > 0 + assert "role" in result["chat_prompt_config"]["prompt"][0] + assert "text" in result["chat_prompt_config"]["prompt"][0] + + # Verify Baichuan context is included + prompt_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert BAICHUAN_CONTEXT in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + + def test_get_baichuan_prompt_no_context(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test Baichuan prompt generation without context. + + This test verifies: + - Correct handling when has_context is "false" + - Baichuan context is not included in prompt + - Template structure remains intact + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "completion", "false") + + # Assert: Verify the expected outcomes + assert result is not None + assert "completion_prompt_config" in result + assert "prompt" in result["completion_prompt_config"] + assert "text" in result["completion_prompt_config"]["prompt"] + + # Verify Baichuan context is NOT included + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert BAICHUAN_CONTEXT not in prompt_text + assert "{{#pre_prompt#}}" in prompt_text + assert "{{#histories#}}" in prompt_text + assert "{{#query#}}" in prompt_text + + def test_get_baichuan_prompt_unsupported_app_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan prompt generation with unsupported app mode. + + This test verifies: + - Proper handling of unsupported app modes + - Default empty dict return + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt("unsupported_mode", "completion", "true") + + # Assert: Verify empty dict is returned + assert result == {} + + def test_get_baichuan_prompt_unsupported_model_mode( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test Baichuan prompt generation with unsupported model mode. + + This test verifies: + - Proper handling of unsupported model modes + - Default empty dict return + """ + fake = Faker() + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_baichuan_prompt(AppMode.CHAT.value, "unsupported_mode", "true") + + # Assert: Verify empty dict is returned + assert result == {} + + def test_get_prompt_all_app_modes_common_model( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test prompt generation for all app modes with common model. + + This test verifies: + - All app modes work correctly with common models + - Proper template selection for each combination + """ + fake = Faker() + + # Test all app modes + app_modes = [AppMode.CHAT.value, AppMode.COMPLETION.value] + model_modes = ["completion", "chat"] + + for app_mode in app_modes: + for model_mode in model_modes: + args = { + "app_mode": app_mode, + "model_mode": model_mode, + "model_name": "gpt-3.5-turbo", + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify result is not empty + assert result is not None + assert result != {} + + def test_get_prompt_all_app_modes_baichuan_model( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test prompt generation for all app modes with Baichuan model. + + This test verifies: + - All app modes work correctly with Baichuan models + - Proper template selection for each combination + """ + fake = Faker() + + # Test all app modes + app_modes = [AppMode.CHAT.value, AppMode.COMPLETION.value] + model_modes = ["completion", "chat"] + + for app_mode in app_modes: + for model_mode in model_modes: + args = { + "app_mode": app_mode, + "model_mode": model_mode, + "model_name": "baichuan-13b-chat", + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify result is not empty + assert result is not None + assert result != {} + + def test_get_prompt_edge_cases(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test prompt generation with edge cases. + + This test verifies: + - Handling of edge case inputs + - Proper error handling + - Consistent behavior with unusual inputs + """ + fake = Faker() + + # Test edge cases + edge_cases = [ + {"app_mode": "", "model_mode": "completion", "model_name": "gpt-3.5-turbo", "has_context": "true"}, + {"app_mode": AppMode.CHAT.value, "model_mode": "", "model_name": "gpt-3.5-turbo", "has_context": "true"}, + {"app_mode": AppMode.CHAT.value, "model_mode": "completion", "model_name": "", "has_context": "true"}, + { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "gpt-3.5-turbo", + "has_context": "", + }, + ] + + for args in edge_cases: + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify method handles edge cases gracefully + # Should either return a valid result or empty dict, but not crash + assert result is not None + + def test_template_immutability(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test that original templates are not modified. + + This test verifies: + - Original template constants are not modified + - Deep copy is used properly + - Template immutability is maintained + """ + fake = Faker() + + # Store original templates + original_chat_completion = copy.deepcopy(CHAT_APP_COMPLETION_PROMPT_CONFIG) + original_chat_chat = copy.deepcopy(CHAT_APP_CHAT_PROMPT_CONFIG) + original_completion_completion = copy.deepcopy(COMPLETION_APP_COMPLETION_PROMPT_CONFIG) + original_completion_chat = copy.deepcopy(COMPLETION_APP_CHAT_PROMPT_CONFIG) + + # Test with context + args = { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "gpt-3.5-turbo", + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify original templates are unchanged + assert original_chat_completion == CHAT_APP_COMPLETION_PROMPT_CONFIG + assert original_chat_chat == CHAT_APP_CHAT_PROMPT_CONFIG + assert original_completion_completion == COMPLETION_APP_COMPLETION_PROMPT_CONFIG + assert original_completion_chat == COMPLETION_APP_CHAT_PROMPT_CONFIG + + def test_baichuan_template_immutability(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test that original Baichuan templates are not modified. + + This test verifies: + - Original Baichuan template constants are not modified + - Deep copy is used properly + - Template immutability is maintained + """ + fake = Faker() + + # Store original templates + original_baichuan_chat_completion = copy.deepcopy(BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG) + original_baichuan_chat_chat = copy.deepcopy(BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG) + original_baichuan_completion_completion = copy.deepcopy(BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG) + original_baichuan_completion_chat = copy.deepcopy(BAICHUAN_COMPLETION_APP_CHAT_PROMPT_CONFIG) + + # Test with context + args = { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "baichuan-13b-chat", + "has_context": "true", + } + + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify original templates are unchanged + assert original_baichuan_chat_completion == BAICHUAN_CHAT_APP_COMPLETION_PROMPT_CONFIG + assert original_baichuan_chat_chat == BAICHUAN_CHAT_APP_CHAT_PROMPT_CONFIG + assert original_baichuan_completion_completion == BAICHUAN_COMPLETION_APP_COMPLETION_PROMPT_CONFIG + assert original_baichuan_completion_chat == BAICHUAN_COMPLETION_APP_CHAT_PROMPT_CONFIG + + def test_context_integration_consistency(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test consistency of context integration across different scenarios. + + This test verifies: + - Context is always prepended correctly + - Context integration is consistent across different templates + - No context duplication or corruption + """ + fake = Faker() + + # Test different scenarios + test_scenarios = [ + { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "gpt-3.5-turbo", + "has_context": "true", + }, + { + "app_mode": AppMode.CHAT.value, + "model_mode": "chat", + "model_name": "gpt-3.5-turbo", + "has_context": "true", + }, + { + "app_mode": AppMode.COMPLETION.value, + "model_mode": "completion", + "model_name": "gpt-3.5-turbo", + "has_context": "true", + }, + { + "app_mode": AppMode.COMPLETION.value, + "model_mode": "chat", + "model_name": "gpt-3.5-turbo", + "has_context": "true", + }, + ] + + for args in test_scenarios: + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify context integration is consistent + assert result is not None + assert result != {} + + # Check that context is properly integrated + if "completion_prompt_config" in result: + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert prompt_text.startswith(CONTEXT) + elif "chat_prompt_config" in result: + prompt_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert prompt_text.startswith(CONTEXT) + + def test_baichuan_context_integration_consistency( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test consistency of Baichuan context integration across different scenarios. + + This test verifies: + - Baichuan context is always prepended correctly + - Context integration is consistent across different templates + - No context duplication or corruption + """ + fake = Faker() + + # Test different scenarios + test_scenarios = [ + { + "app_mode": AppMode.CHAT.value, + "model_mode": "completion", + "model_name": "baichuan-13b-chat", + "has_context": "true", + }, + { + "app_mode": AppMode.CHAT.value, + "model_mode": "chat", + "model_name": "baichuan-13b-chat", + "has_context": "true", + }, + { + "app_mode": AppMode.COMPLETION.value, + "model_mode": "completion", + "model_name": "baichuan-13b-chat", + "has_context": "true", + }, + { + "app_mode": AppMode.COMPLETION.value, + "model_mode": "chat", + "model_name": "baichuan-13b-chat", + "has_context": "true", + }, + ] + + for args in test_scenarios: + # Act: Execute the method under test + result = AdvancedPromptTemplateService.get_prompt(args) + + # Assert: Verify context integration is consistent + assert result is not None + assert result != {} + + # Check that Baichuan context is properly integrated + if "completion_prompt_config" in result: + prompt_text = result["completion_prompt_config"]["prompt"]["text"] + assert prompt_text.startswith(BAICHUAN_CONTEXT) + elif "chat_prompt_config" in result: + prompt_text = result["chat_prompt_config"]["prompt"][0]["text"] + assert prompt_text.startswith(BAICHUAN_CONTEXT) diff --git a/api/tests/test_containers_integration_tests/services/test_agent_service.py b/api/tests/test_containers_integration_tests/services/test_agent_service.py new file mode 100644 index 0000000000..d63b188b12 --- /dev/null +++ b/api/tests/test_containers_integration_tests/services/test_agent_service.py @@ -0,0 +1,1033 @@ +import json +from unittest.mock import MagicMock, patch + +import pytest +from faker import Faker + +from core.plugin.impl.exc import PluginDaemonClientSideError +from models.model import AppModelConfig, Conversation, EndUser, Message, MessageAgentThought +from services.account_service import AccountService, TenantService +from services.agent_service import AgentService +from services.app_service import AppService + + +class TestAgentService: + """Integration tests for AgentService using testcontainers.""" + + @pytest.fixture + def mock_external_service_dependencies(self): + """Mock setup for external service dependencies.""" + with ( + patch("services.agent_service.PluginAgentClient") as mock_plugin_agent_client, + patch("services.agent_service.ToolManager") as mock_tool_manager, + patch("services.agent_service.AgentConfigManager") as mock_agent_config_manager, + patch("services.agent_service.current_user") as mock_current_user, + patch("services.app_service.FeatureService") as mock_feature_service, + patch("services.app_service.EnterpriseService") as mock_enterprise_service, + patch("services.app_service.ModelManager") as mock_model_manager, + patch("services.account_service.FeatureService") as mock_account_feature_service, + ): + # Setup default mock returns for agent service + mock_plugin_agent_client_instance = mock_plugin_agent_client.return_value + mock_plugin_agent_client_instance.fetch_agent_strategy_providers.return_value = [ + MagicMock( + plugin_id="test_plugin", + declaration=MagicMock( + identity=MagicMock(name="test_provider"), + strategies=[MagicMock(identity=MagicMock(name="test_strategy"))], + ), + ) + ] + mock_plugin_agent_client_instance.fetch_agent_strategy_provider.return_value = MagicMock( + plugin_id="test_plugin", + declaration=MagicMock( + identity=MagicMock(name="test_provider"), + strategies=[MagicMock(identity=MagicMock(name="test_strategy"))], + ), + ) + + # Setup ToolManager mocks + mock_tool_manager.get_tool_icon.return_value = "test_icon" + mock_tool_manager.get_tool_label.return_value = MagicMock( + to_dict=lambda: {"en_US": "Test Tool", "zh_Hans": "测试工具"} + ) + + # Setup AgentConfigManager mocks + mock_agent_config = MagicMock() + mock_agent_config.tools = [ + MagicMock(tool_name="test_tool", provider_type="test_provider", provider_id="test_id") + ] + mock_agent_config_manager.convert.return_value = mock_agent_config + + # Setup current_user mock + mock_current_user.timezone = "UTC" + + # Setup default mock returns for app service + mock_feature_service.get_system_features.return_value.webapp_auth.enabled = False + mock_enterprise_service.WebAppAuth.update_app_access_mode.return_value = None + mock_enterprise_service.WebAppAuth.cleanup_webapp.return_value = None + + # Setup default mock returns for account service + mock_account_feature_service.get_system_features.return_value.is_allow_register = True + + # Mock ModelManager for model configuration + mock_model_instance = mock_model_manager.return_value + mock_model_instance.get_default_model_instance.return_value = None + mock_model_instance.get_default_provider_model_name.return_value = ("openai", "gpt-3.5-turbo") + + yield { + "plugin_agent_client": mock_plugin_agent_client, + "tool_manager": mock_tool_manager, + "agent_config_manager": mock_agent_config_manager, + "current_user": mock_current_user, + "feature_service": mock_feature_service, + "enterprise_service": mock_enterprise_service, + "model_manager": mock_model_manager, + "account_feature_service": mock_account_feature_service, + } + + def _create_test_app_and_account(self, db_session_with_containers, mock_external_service_dependencies): + """ + Helper method to create a test app and account for testing. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + mock_external_service_dependencies: Mock dependencies + + Returns: + tuple: (app, account) - Created app and account instances + """ + fake = Faker() + + # Setup mocks for account creation + mock_external_service_dependencies[ + "account_feature_service" + ].get_system_features.return_value.is_allow_register = True + + # Create account and tenant + account = AccountService.create_account( + email=fake.email(), + name=fake.name(), + interface_language="en-US", + password=fake.password(length=12), + ) + TenantService.create_owner_tenant_if_not_exist(account, name=fake.company()) + tenant = account.current_tenant + + # Create app with realistic data + app_args = { + "name": fake.company(), + "description": fake.text(max_nb_chars=100), + "mode": "agent-chat", + "icon_type": "emoji", + "icon": "🤖", + "icon_background": "#FF6B6B", + "api_rph": 100, + "api_rpm": 10, + } + + app_service = AppService() + app = app_service.create_app(tenant.id, app_args, account) + + # Update the app model config to set agent_mode for agent-chat mode + if app.mode == "agent-chat" and app.app_model_config: + app.app_model_config.agent_mode = json.dumps({"enabled": True, "strategy": "react", "tools": []}) + from extensions.ext_database import db + + db.session.commit() + + return app, account + + def _create_test_conversation_and_message(self, db_session_with_containers, app, account): + """ + Helper method to create a test conversation and message with agent thoughts. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + app: App instance + account: Account instance + + Returns: + tuple: (conversation, message) - Created conversation and message instances + """ + fake = Faker() + + from extensions.ext_database import db + + # Create conversation + conversation = Conversation( + id=fake.uuid4(), + app_id=app.id, + from_account_id=account.id, + from_end_user_id=None, + name=fake.sentence(), + inputs={}, + status="normal", + mode="chat", + from_source="api", + ) + db.session.add(conversation) + db.session.commit() + + # Create app model config + app_model_config = AppModelConfig( + id=fake.uuid4(), + app_id=app.id, + provider="openai", + model_id="gpt-3.5-turbo", + configs={}, + model="gpt-3.5-turbo", + agent_mode=json.dumps({"enabled": True, "strategy": "react", "tools": []}), + ) + db.session.add(app_model_config) + db.session.commit() + + # Update conversation with app model config + conversation.app_model_config_id = app_model_config.id + db.session.commit() + + # Create message + message = Message( + id=fake.uuid4(), + conversation_id=conversation.id, + app_id=app.id, + from_account_id=account.id, + from_end_user_id=None, + inputs={}, + query=fake.text(max_nb_chars=100), + message=[{"role": "user", "text": fake.text(max_nb_chars=100)}], + answer=fake.text(max_nb_chars=200), + message_tokens=100, + message_unit_price=0.001, + answer_tokens=200, + answer_unit_price=0.001, + provider_response_latency=1.5, + currency="USD", + from_source="api", + ) + db.session.add(message) + db.session.commit() + + return conversation, message + + def _create_test_agent_thoughts(self, db_session_with_containers, message): + """ + Helper method to create test agent thoughts for a message. + + Args: + db_session_with_containers: Database session from testcontainers infrastructure + message: Message instance + + Returns: + list: Created agent thoughts + """ + fake = Faker() + + from extensions.ext_database import db + + agent_thoughts = [] + + # Create first agent thought + thought1 = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=1, + thought="I need to analyze the user's request", + tool="test_tool", + tool_labels_str=json.dumps({"test_tool": {"en_US": "Test Tool", "zh_Hans": "测试工具"}}), + tool_meta_str=json.dumps( + { + "test_tool": { + "error": None, + "time_cost": 0.5, + "tool_config": {"tool_provider_type": "test_provider", "tool_provider": "test_id"}, + "tool_parameters": {}, + } + } + ), + tool_input=json.dumps({"test_tool": {"input": "test_input"}}), + observation=json.dumps({"test_tool": {"output": "test_output"}}), + tokens=50, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(thought1) + agent_thoughts.append(thought1) + + # Create second agent thought + thought2 = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=2, + thought="Based on the analysis, I can provide a response", + tool="dataset_tool", + tool_labels_str=json.dumps({"dataset_tool": {"en_US": "Dataset Tool", "zh_Hans": "数据集工具"}}), + tool_meta_str=json.dumps( + { + "dataset_tool": { + "error": None, + "time_cost": 0.3, + "tool_config": {"tool_provider_type": "dataset-retrieval", "tool_provider": "dataset_id"}, + "tool_parameters": {}, + } + } + ), + tool_input=json.dumps({"dataset_tool": {"query": "test_query"}}), + observation=json.dumps({"dataset_tool": {"results": "test_results"}}), + tokens=30, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(thought2) + agent_thoughts.append(thought2) + + db.session.commit() + + return agent_thoughts + + def test_get_agent_logs_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of agent logs with complete data. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + agent_thoughts = self._create_test_agent_thoughts(db_session_with_containers, message) + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result structure + assert result is not None + assert "meta" in result + assert "iterations" in result + assert "files" in result + + # Verify meta information + meta = result["meta"] + assert meta["status"] == "success" + assert meta["executor"] == account.name + assert meta["iterations"] == 2 + assert meta["agent_mode"] == "react" + assert meta["total_tokens"] == 300 # 100 + 200 + assert meta["elapsed_time"] == 1.5 + + # Verify iterations + iterations = result["iterations"] + assert len(iterations) == 2 + + # Verify first iteration + first_iteration = iterations[0] + assert first_iteration["tokens"] == 50 + assert first_iteration["thought"] == "I need to analyze the user's request" + assert len(first_iteration["tool_calls"]) == 1 + + tool_call = first_iteration["tool_calls"][0] + assert tool_call["tool_name"] == "test_tool" + assert tool_call["tool_label"] == {"en_US": "Test Tool", "zh_Hans": "测试工具"} + assert tool_call["status"] == "success" + assert tool_call["time_cost"] == 0.5 + assert tool_call["tool_icon"] == "test_icon" + + # Verify second iteration + second_iteration = iterations[1] + assert second_iteration["tokens"] == 30 + assert second_iteration["thought"] == "Based on the analysis, I can provide a response" + assert len(second_iteration["tool_calls"]) == 1 + + dataset_tool_call = second_iteration["tool_calls"][0] + assert dataset_tool_call["tool_name"] == "dataset_tool" + assert dataset_tool_call["tool_label"] == {"en_US": "Dataset Tool", "zh_Hans": "数据集工具"} + assert dataset_tool_call["status"] == "success" + assert dataset_tool_call["time_cost"] == 0.3 + assert dataset_tool_call["tool_icon"] == "" # dataset-retrieval tools have empty icon + + def test_get_agent_logs_conversation_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when conversation is not found. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Execute the method under test with non-existent conversation + with pytest.raises(ValueError, match="Conversation not found"): + AgentService.get_agent_logs(app, fake.uuid4(), fake.uuid4()) + + def test_get_agent_logs_message_not_found(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test error handling when message is not found. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + # Execute the method under test with non-existent message + with pytest.raises(ValueError, match="Message not found"): + AgentService.get_agent_logs(app, str(conversation.id), fake.uuid4()) + + def test_get_agent_logs_with_end_user(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test agent logs retrieval when conversation is from end user. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + from extensions.ext_database import db + + # Create end user + end_user = EndUser( + id=fake.uuid4(), + tenant_id=app.tenant_id, + app_id=app.id, + type="web_app", + is_anonymous=False, + session_id=fake.uuid4(), + name=fake.name(), + ) + db.session.add(end_user) + db.session.commit() + + # Create conversation with end user + conversation = Conversation( + id=fake.uuid4(), + app_id=app.id, + from_account_id=None, + from_end_user_id=end_user.id, + name=fake.sentence(), + inputs={}, + status="normal", + mode="chat", + from_source="api", + ) + db.session.add(conversation) + db.session.commit() + + # Create app model config + app_model_config = AppModelConfig( + id=fake.uuid4(), + app_id=app.id, + provider="openai", + model_id="gpt-3.5-turbo", + configs={}, + model="gpt-3.5-turbo", + agent_mode=json.dumps({"enabled": True, "strategy": "react", "tools": []}), + ) + db.session.add(app_model_config) + db.session.commit() + + # Update conversation with app model config + conversation.app_model_config_id = app_model_config.id + db.session.commit() + + # Create message + message = Message( + id=fake.uuid4(), + conversation_id=conversation.id, + app_id=app.id, + from_account_id=None, + from_end_user_id=end_user.id, + inputs={}, + query=fake.text(max_nb_chars=100), + message=[{"role": "user", "text": fake.text(max_nb_chars=100)}], + answer=fake.text(max_nb_chars=200), + message_tokens=100, + message_unit_price=0.001, + answer_tokens=200, + answer_unit_price=0.001, + provider_response_latency=1.5, + currency="USD", + from_source="api", + ) + db.session.add(message) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + assert result["meta"]["executor"] == end_user.name + + def test_get_agent_logs_with_unknown_executor(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test agent logs retrieval when executor is unknown. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + from extensions.ext_database import db + + # Create conversation with non-existent account + conversation = Conversation( + id=fake.uuid4(), + app_id=app.id, + from_account_id=fake.uuid4(), # Non-existent account + from_end_user_id=None, + name=fake.sentence(), + inputs={}, + status="normal", + mode="chat", + from_source="api", + ) + db.session.add(conversation) + db.session.commit() + + # Create app model config + app_model_config = AppModelConfig( + id=fake.uuid4(), + app_id=app.id, + provider="openai", + model_id="gpt-3.5-turbo", + configs={}, + model="gpt-3.5-turbo", + agent_mode=json.dumps({"enabled": True, "strategy": "react", "tools": []}), + ) + db.session.add(app_model_config) + db.session.commit() + + # Update conversation with app model config + conversation.app_model_config_id = app_model_config.id + db.session.commit() + + # Create message + message = Message( + id=fake.uuid4(), + conversation_id=conversation.id, + app_id=app.id, + from_account_id=fake.uuid4(), # Non-existent account + from_end_user_id=None, + inputs={}, + query=fake.text(max_nb_chars=100), + message=[{"role": "user", "text": fake.text(max_nb_chars=100)}], + answer=fake.text(max_nb_chars=200), + message_tokens=100, + message_unit_price=0.001, + answer_tokens=200, + answer_unit_price=0.001, + provider_response_latency=1.5, + currency="USD", + from_source="api", + ) + db.session.add(message) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + assert result["meta"]["executor"] == "Unknown" + + def test_get_agent_logs_with_tool_error(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test agent logs retrieval with tool errors. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + from extensions.ext_database import db + + # Create agent thought with tool error + thought_with_error = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=1, + thought="I need to analyze the user's request", + tool="error_tool", + tool_labels_str=json.dumps({"error_tool": {"en_US": "Error Tool", "zh_Hans": "错误工具"}}), + tool_meta_str=json.dumps( + { + "error_tool": { + "error": "Tool execution failed", + "time_cost": 0.5, + "tool_config": {"tool_provider_type": "test_provider", "tool_provider": "test_id"}, + "tool_parameters": {}, + } + } + ), + tool_input=json.dumps({"error_tool": {"input": "test_input"}}), + observation=json.dumps({"error_tool": {"output": "error_output"}}), + tokens=50, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(thought_with_error) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + iterations = result["iterations"] + assert len(iterations) == 1 + + tool_call = iterations[0]["tool_calls"][0] + assert tool_call["status"] == "error" + assert tool_call["error"] == "Tool execution failed" + + def test_get_agent_logs_without_agent_thoughts( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test agent logs retrieval when message has no agent thoughts. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + assert result["meta"]["iterations"] == 0 + assert len(result["iterations"]) == 0 + + def test_get_agent_logs_app_model_config_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when app model config is not found. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + from extensions.ext_database import db + + # Remove app model config to test error handling + app.app_model_config_id = None + db.session.commit() + + # Create conversation without app model config + conversation = Conversation( + id=fake.uuid4(), + app_id=app.id, + from_account_id=account.id, + from_end_user_id=None, + name=fake.sentence(), + inputs={}, + status="normal", + mode="chat", + from_source="api", + app_model_config_id=None, # Explicitly set to None + ) + db.session.add(conversation) + db.session.commit() + + # Create message + message = Message( + id=fake.uuid4(), + conversation_id=conversation.id, + app_id=app.id, + from_account_id=account.id, + from_end_user_id=None, + inputs={}, + query=fake.text(max_nb_chars=100), + message=[{"role": "user", "text": fake.text(max_nb_chars=100)}], + answer=fake.text(max_nb_chars=200), + message_tokens=100, + message_unit_price=0.001, + answer_tokens=200, + answer_unit_price=0.001, + provider_response_latency=1.5, + currency="USD", + from_source="api", + ) + db.session.add(message) + db.session.commit() + + # Execute the method under test + with pytest.raises(ValueError, match="App model config not found"): + AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + def test_get_agent_logs_agent_config_not_found( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test error handling when agent config is not found. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + # Mock AgentConfigManager to return None + mock_external_service_dependencies["agent_config_manager"].convert.return_value = None + + # Execute the method under test + with pytest.raises(ValueError, match="Agent config not found"): + AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + def test_list_agent_providers_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful listing of agent providers. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + # Execute the method under test + result = AgentService.list_agent_providers(str(account.id), str(app.tenant_id)) + + # Verify the result + assert result is not None + assert len(result) == 1 + assert result[0].plugin_id == "test_plugin" + + # Verify the mock was called correctly + mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value + mock_plugin_client.fetch_agent_strategy_providers.assert_called_once_with(str(app.tenant_id)) + + def test_get_agent_provider_success(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test successful retrieval of specific agent provider. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + provider_name = "test_provider" + + # Execute the method under test + result = AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + + # Verify the result + assert result is not None + assert result.plugin_id == "test_plugin" + + # Verify the mock was called correctly + mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value + mock_plugin_client.fetch_agent_strategy_provider.assert_called_once_with(str(app.tenant_id), provider_name) + + def test_get_agent_provider_plugin_error(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test error handling when plugin daemon client raises an error. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + + provider_name = "test_provider" + error_message = "Plugin not found" + + # Mock PluginAgentClient to raise an error + mock_plugin_client = mock_external_service_dependencies["plugin_agent_client"].return_value + mock_plugin_client.fetch_agent_strategy_provider.side_effect = PluginDaemonClientSideError(error_message) + + # Execute the method under test + with pytest.raises(ValueError, match=error_message): + AgentService.get_agent_provider(str(account.id), str(app.tenant_id), provider_name) + + def test_get_agent_logs_with_complex_tool_data( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test agent logs retrieval with complex tool data and multiple tools. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + from extensions.ext_database import db + + # Create agent thought with multiple tools + complex_thought = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=1, + thought="I need to use multiple tools to complete this task", + tool="tool1;tool2;tool3", + tool_labels_str=json.dumps( + { + "tool1": {"en_US": "First Tool", "zh_Hans": "第一个工具"}, + "tool2": {"en_US": "Second Tool", "zh_Hans": "第二个工具"}, + "tool3": {"en_US": "Third Tool", "zh_Hans": "第三个工具"}, + } + ), + tool_meta_str=json.dumps( + { + "tool1": { + "error": None, + "time_cost": 0.5, + "tool_config": {"tool_provider_type": "test_provider", "tool_provider": "test_id"}, + "tool_parameters": {"param1": "value1"}, + }, + "tool2": { + "error": "Tool 2 failed", + "time_cost": 0.3, + "tool_config": {"tool_provider_type": "another_provider", "tool_provider": "another_id"}, + "tool_parameters": {"param2": "value2"}, + }, + "tool3": { + "error": None, + "time_cost": 0.7, + "tool_config": {"tool_provider_type": "dataset-retrieval", "tool_provider": "dataset_id"}, + "tool_parameters": {"param3": "value3"}, + }, + } + ), + tool_input=json.dumps( + {"tool1": {"input1": "data1"}, "tool2": {"input2": "data2"}, "tool3": {"input3": "data3"}} + ), + observation=json.dumps( + {"tool1": {"output1": "result1"}, "tool2": {"output2": "result2"}, "tool3": {"output3": "result3"}} + ), + tokens=100, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(complex_thought) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + iterations = result["iterations"] + assert len(iterations) == 1 + + tool_calls = iterations[0]["tool_calls"] + assert len(tool_calls) == 3 + + # Verify first tool + assert tool_calls[0]["tool_name"] == "tool1" + assert tool_calls[0]["tool_label"] == {"en_US": "First Tool", "zh_Hans": "第一个工具"} + assert tool_calls[0]["status"] == "success" + assert tool_calls[0]["tool_parameters"] == {"param1": "value1"} + + # Verify second tool (with error) + assert tool_calls[1]["tool_name"] == "tool2" + assert tool_calls[1]["tool_label"] == {"en_US": "Second Tool", "zh_Hans": "第二个工具"} + assert tool_calls[1]["status"] == "error" + assert tool_calls[1]["error"] == "Tool 2 failed" + + # Verify third tool (dataset tool) + assert tool_calls[2]["tool_name"] == "tool3" + assert tool_calls[2]["tool_label"] == {"en_US": "Third Tool", "zh_Hans": "第三个工具"} + assert tool_calls[2]["status"] == "success" + assert tool_calls[2]["tool_icon"] == "" # dataset-retrieval tools have empty icon + + def test_get_agent_logs_with_files(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test agent logs retrieval with message files and agent thought files. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + from core.file import FileTransferMethod, FileType + from extensions.ext_database import db + from models.enums import CreatorUserRole + + # Add files to message + from models.model import MessageFile + + message_file1 = MessageFile( + message_id=message.id, + type=FileType.IMAGE, + transfer_method=FileTransferMethod.REMOTE_URL, + url="http://example.com/file1.jpg", + belongs_to="user", + created_by_role=CreatorUserRole.ACCOUNT, + created_by=message.from_account_id, + ) + message_file2 = MessageFile( + message_id=message.id, + type=FileType.IMAGE, + transfer_method=FileTransferMethod.REMOTE_URL, + url="http://example.com/file2.png", + belongs_to="user", + created_by_role=CreatorUserRole.ACCOUNT, + created_by=message.from_account_id, + ) + db.session.add(message_file1) + db.session.add(message_file2) + db.session.commit() + + # Create agent thought with files + thought_with_files = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=1, + thought="I need to process some files", + tool="file_tool", + tool_labels_str=json.dumps({"file_tool": {"en_US": "File Tool", "zh_Hans": "文件工具"}}), + tool_meta_str=json.dumps( + { + "file_tool": { + "error": None, + "time_cost": 0.5, + "tool_config": {"tool_provider_type": "test_provider", "tool_provider": "test_id"}, + "tool_parameters": {}, + } + } + ), + tool_input=json.dumps({"file_tool": {"input": "test_input"}}), + observation=json.dumps({"file_tool": {"output": "test_output"}}), + message_files=json.dumps(["file1", "file2"]), + tokens=50, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(thought_with_files) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + assert len(result["files"]) == 2 + + iterations = result["iterations"] + assert len(iterations) == 1 + assert len(iterations[0]["files"]) == 2 + assert "file1" in iterations[0]["files"] + assert "file2" in iterations[0]["files"] + + def test_get_agent_logs_with_different_timezone( + self, db_session_with_containers, mock_external_service_dependencies + ): + """ + Test agent logs retrieval with different timezone settings. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + # Mock current_user with different timezone + mock_external_service_dependencies["current_user"].timezone = "Asia/Shanghai" + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + assert "start_time" in result["meta"] + + # Verify the timezone conversion + start_time = result["meta"]["start_time"] + assert "T" in start_time # ISO format + assert "+08:00" in start_time or "Z" in start_time # Timezone offset + + def test_get_agent_logs_with_empty_tool_data(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test agent logs retrieval with empty tool data. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + from extensions.ext_database import db + + # Create agent thought with empty tool data + empty_thought = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=1, + thought="I need to analyze the user's request", + tool="", # Empty tool + tool_labels_str="{}", # Empty labels + tool_meta_str="{}", # Empty meta + tool_input="", # Empty input + observation="", # Empty observation + tokens=50, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(empty_thought) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result + assert result is not None + iterations = result["iterations"] + assert len(iterations) == 1 + + # Verify empty tool calls + tool_calls = iterations[0]["tool_calls"] + assert len(tool_calls) == 0 # No tools to process + + def test_get_agent_logs_with_malformed_json(self, db_session_with_containers, mock_external_service_dependencies): + """ + Test agent logs retrieval with malformed JSON data in tool fields. + """ + fake = Faker() + + # Create test data + app, account = self._create_test_app_and_account(db_session_with_containers, mock_external_service_dependencies) + conversation, message = self._create_test_conversation_and_message(db_session_with_containers, app, account) + + from extensions.ext_database import db + + # Create agent thought with malformed JSON + malformed_thought = MessageAgentThought( + id=fake.uuid4(), + message_id=message.id, + position=1, + thought="I need to analyze the user's request", + tool="test_tool", + tool_labels_str="invalid json", # Malformed JSON + tool_meta_str="invalid json", # Malformed JSON + tool_input="invalid json", # Malformed JSON + observation="invalid json", # Malformed JSON + tokens=50, + created_by_role="account", + created_by=message.from_account_id, + ) + db.session.add(malformed_thought) + db.session.commit() + + # Execute the method under test + result = AgentService.get_agent_logs(app, str(conversation.id), str(message.id)) + + # Verify the result - should handle malformed JSON gracefully + assert result is not None + iterations = result["iterations"] + assert len(iterations) == 1 + + tool_calls = iterations[0]["tool_calls"] + assert len(tool_calls) == 1 + + # Verify default values for malformed JSON + tool_call = tool_calls[0] + assert tool_call["tool_name"] == "test_tool" + assert tool_call["tool_label"] == "test_tool" # Default to tool name + assert tool_call["tool_input"] == {} + assert tool_call["tool_output"] == "invalid json" # Raw observation value + assert tool_call["tool_parameters"] == {} From 21e1b825fe448ccd55f60da258a49c7d470a2494 Mon Sep 17 00:00:00 2001 From: kenwoodjw Date: Wed, 13 Aug 2025 11:22:03 +0800 Subject: [PATCH 22/27] fix: optimize dataset cleanup task (#23828) Signed-off-by: kenwoodjw --- api/schedule/clean_unused_datasets_task.py | 269 +++++++++------------ 1 file changed, 121 insertions(+), 148 deletions(-) diff --git a/api/schedule/clean_unused_datasets_task.py b/api/schedule/clean_unused_datasets_task.py index 940da5309e..1141451011 100644 --- a/api/schedule/clean_unused_datasets_task.py +++ b/api/schedule/clean_unused_datasets_task.py @@ -1,5 +1,6 @@ import datetime import time +from typing import Optional, TypedDict import click from sqlalchemy import func, select @@ -14,168 +15,140 @@ from models.dataset import Dataset, DatasetAutoDisableLog, DatasetQuery, Documen from services.feature_service import FeatureService +class CleanupConfig(TypedDict): + clean_day: datetime.datetime + plan_filter: Optional[str] + add_logs: bool + + @app.celery.task(queue="dataset") def clean_unused_datasets_task(): click.echo(click.style("Start clean unused datasets indexes.", fg="green")) - plan_sandbox_clean_day_setting = dify_config.PLAN_SANDBOX_CLEAN_DAY_SETTING - plan_pro_clean_day_setting = dify_config.PLAN_PRO_CLEAN_DAY_SETTING start_at = time.perf_counter() - plan_sandbox_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_sandbox_clean_day_setting) - plan_pro_clean_day = datetime.datetime.now() - datetime.timedelta(days=plan_pro_clean_day_setting) - while True: - try: - # Subquery for counting new documents - document_subquery_new = ( - db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) - .where( - Document.indexing_status == "completed", - Document.enabled == True, - Document.archived == False, - Document.updated_at > plan_sandbox_clean_day, - ) - .group_by(Document.dataset_id) - .subquery() - ) - # Subquery for counting old documents - document_subquery_old = ( - db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) - .where( - Document.indexing_status == "completed", - Document.enabled == True, - Document.archived == False, - Document.updated_at < plan_sandbox_clean_day, - ) - .group_by(Document.dataset_id) - .subquery() - ) + # Define cleanup configurations + cleanup_configs: list[CleanupConfig] = [ + { + "clean_day": datetime.datetime.now() - datetime.timedelta(days=dify_config.PLAN_SANDBOX_CLEAN_DAY_SETTING), + "plan_filter": None, + "add_logs": True, + }, + { + "clean_day": datetime.datetime.now() - datetime.timedelta(days=dify_config.PLAN_PRO_CLEAN_DAY_SETTING), + "plan_filter": "sandbox", + "add_logs": False, + }, + ] - # Main query with join and filter - stmt = ( - select(Dataset) - .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id) - .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id) - .where( - Dataset.created_at < plan_sandbox_clean_day, - func.coalesce(document_subquery_new.c.document_count, 0) == 0, - func.coalesce(document_subquery_old.c.document_count, 0) > 0, - ) - .order_by(Dataset.created_at.desc()) - ) + for config in cleanup_configs: + clean_day = config["clean_day"] + plan_filter = config["plan_filter"] + add_logs = config["add_logs"] - datasets = db.paginate(stmt, page=1, per_page=50) - - except SQLAlchemyError: - raise - if datasets.items is None or len(datasets.items) == 0: - break - for dataset in datasets: - dataset_query = ( - db.session.query(DatasetQuery) - .where(DatasetQuery.created_at > plan_sandbox_clean_day, DatasetQuery.dataset_id == dataset.id) - .all() - ) - if not dataset_query or len(dataset_query) == 0: - try: - # add auto disable log - documents = ( - db.session.query(Document) - .where( - Document.dataset_id == dataset.id, - Document.enabled == True, - Document.archived == False, - ) - .all() + while True: + try: + # Subquery for counting new documents + document_subquery_new = ( + db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) + .where( + Document.indexing_status == "completed", + Document.enabled == True, + Document.archived == False, + Document.updated_at > clean_day, ) - for document in documents: - dataset_auto_disable_log = DatasetAutoDisableLog( - tenant_id=dataset.tenant_id, - dataset_id=dataset.id, - document_id=document.id, - ) - db.session.add(dataset_auto_disable_log) - # remove index - index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor() - index_processor.clean(dataset, None) - - # update document - db.session.query(Document).filter_by(dataset_id=dataset.id).update({Document.enabled: False}) - db.session.commit() - click.echo(click.style(f"Cleaned unused dataset {dataset.id} from db success!", fg="green")) - except Exception as e: - click.echo(click.style(f"clean dataset index error: {e.__class__.__name__} {str(e)}", fg="red")) - while True: - try: - # Subquery for counting new documents - document_subquery_new = ( - db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) - .where( - Document.indexing_status == "completed", - Document.enabled == True, - Document.archived == False, - Document.updated_at > plan_pro_clean_day, + .group_by(Document.dataset_id) + .subquery() ) - .group_by(Document.dataset_id) - .subquery() - ) - # Subquery for counting old documents - document_subquery_old = ( - db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) - .where( - Document.indexing_status == "completed", - Document.enabled == True, - Document.archived == False, - Document.updated_at < plan_pro_clean_day, + # Subquery for counting old documents + document_subquery_old = ( + db.session.query(Document.dataset_id, func.count(Document.id).label("document_count")) + .where( + Document.indexing_status == "completed", + Document.enabled == True, + Document.archived == False, + Document.updated_at < clean_day, + ) + .group_by(Document.dataset_id) + .subquery() ) - .group_by(Document.dataset_id) - .subquery() - ) - # Main query with join and filter - stmt = ( - select(Dataset) - .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id) - .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id) - .where( - Dataset.created_at < plan_pro_clean_day, - func.coalesce(document_subquery_new.c.document_count, 0) == 0, - func.coalesce(document_subquery_old.c.document_count, 0) > 0, + # Main query with join and filter + stmt = ( + select(Dataset) + .outerjoin(document_subquery_new, Dataset.id == document_subquery_new.c.dataset_id) + .outerjoin(document_subquery_old, Dataset.id == document_subquery_old.c.dataset_id) + .where( + Dataset.created_at < clean_day, + func.coalesce(document_subquery_new.c.document_count, 0) == 0, + func.coalesce(document_subquery_old.c.document_count, 0) > 0, + ) + .order_by(Dataset.created_at.desc()) ) - .order_by(Dataset.created_at.desc()) - ) - datasets = db.paginate(stmt, page=1, per_page=50) - except SQLAlchemyError: - raise - if datasets.items is None or len(datasets.items) == 0: - break - for dataset in datasets: - dataset_query = ( - db.session.query(DatasetQuery) - .where(DatasetQuery.created_at > plan_pro_clean_day, DatasetQuery.dataset_id == dataset.id) - .all() - ) - if not dataset_query or len(dataset_query) == 0: - try: - features_cache_key = f"features:{dataset.tenant_id}" - plan_cache = redis_client.get(features_cache_key) - if plan_cache is None: - features = FeatureService.get_features(dataset.tenant_id) - redis_client.setex(features_cache_key, 600, features.billing.subscription.plan) - plan = features.billing.subscription.plan - else: - plan = plan_cache.decode() - if plan == "sandbox": - # remove index - index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor() - index_processor.clean(dataset, None) + datasets = db.paginate(stmt, page=1, per_page=50) + + except SQLAlchemyError: + raise + + if datasets.items is None or len(datasets.items) == 0: + break + + for dataset in datasets: + dataset_query = ( + db.session.query(DatasetQuery) + .where(DatasetQuery.created_at > clean_day, DatasetQuery.dataset_id == dataset.id) + .all() + ) + + if not dataset_query or len(dataset_query) == 0: + try: + should_clean = True + + # Check plan filter if specified + if plan_filter: + features_cache_key = f"features:{dataset.tenant_id}" + plan_cache = redis_client.get(features_cache_key) + if plan_cache is None: + features = FeatureService.get_features(dataset.tenant_id) + redis_client.setex(features_cache_key, 600, features.billing.subscription.plan) + plan = features.billing.subscription.plan + else: + plan = plan_cache.decode() + should_clean = plan == plan_filter + + if should_clean: + # Add auto disable log if required + if add_logs: + documents = ( + db.session.query(Document) + .where( + Document.dataset_id == dataset.id, + Document.enabled == True, + Document.archived == False, + ) + .all() + ) + for document in documents: + dataset_auto_disable_log = DatasetAutoDisableLog( + tenant_id=dataset.tenant_id, + dataset_id=dataset.id, + document_id=document.id, + ) + db.session.add(dataset_auto_disable_log) + + # Remove index + index_processor = IndexProcessorFactory(dataset.doc_form).init_index_processor() + index_processor.clean(dataset, None) + + # Update document + db.session.query(Document).filter_by(dataset_id=dataset.id).update( + {Document.enabled: False} + ) + db.session.commit() + click.echo(click.style(f"Cleaned unused dataset {dataset.id} from db success!", fg="green")) + except Exception as e: + click.echo(click.style(f"clean dataset index error: {e.__class__.__name__} {str(e)}", fg="red")) - # update document - db.session.query(Document).filter_by(dataset_id=dataset.id).update({Document.enabled: False}) - db.session.commit() - click.echo(click.style(f"Cleaned unused dataset {dataset.id} from db success!", fg="green")) - except Exception as e: - click.echo(click.style(f"clean dataset index error: {e.__class__.__name__} {str(e)}", fg="red")) end_at = time.perf_counter() click.echo(click.style(f"Cleaned unused dataset from db success latency: {end_at - start_at}", fg="green")) From e11a334c9bbac209fbe3168ffb4cd4e5dfedd5e8 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:55:10 +0800 Subject: [PATCH 23/27] fix: SimpleSelect chevron icon state sync and add notClearable to monitoring selector (#23858) --- .../overview/{cardView.tsx => card-view.tsx} | 0 .../{chartView.tsx => chart-view.tsx} | 1 + .../[appId]/overview/page.tsx | 2 +- web/app/components/app-sidebar/app-info.tsx | 2 +- web/app/components/base/select/index.tsx | 154 +++++++++--------- 5 files changed, 77 insertions(+), 82 deletions(-) rename web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/{cardView.tsx => card-view.tsx} (100%) rename web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/{chartView.tsx => chart-view.tsx} (99%) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/cardView.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx similarity index 100% rename from web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/cardView.tsx rename to web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view.tsx diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chartView.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx similarity index 99% rename from web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chartView.tsx rename to web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx index 646c8bd93d..09d3e4317c 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chartView.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/chart-view.tsx @@ -54,6 +54,7 @@ export default function ChartView({ appId, headerRight }: IChartViewProps) { ({ value: k, name: t(`appLog.filter.period.${v.name}`) }))} className='mt-0 !w-40' + notClearable={true} onSelect={(item) => { const id = item.value const value = TIME_PERIOD_MAPPING[id]?.value ?? '-1' diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/page.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/page.tsx index e0c09e739e..bc07a799e4 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/page.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/page.tsx @@ -1,5 +1,5 @@ import React from 'react' -import ChartView from './chartView' +import ChartView from './chart-view' import TracingPanel from './tracing/panel' import ApikeyInfoPanel from '@/app/components/app/overview/apikey-info-panel' diff --git a/web/app/components/app-sidebar/app-info.tsx b/web/app/components/app-sidebar/app-info.tsx index 288dcf8c8b..cf55c0d68d 100644 --- a/web/app/components/app-sidebar/app-info.tsx +++ b/web/app/components/app-sidebar/app-info.tsx @@ -25,7 +25,7 @@ import type { EnvironmentVariable } from '@/app/components/workflow/types' import { fetchWorkflowDraft } from '@/service/workflow' import ContentDialog from '@/app/components/base/content-dialog' import Button from '@/app/components/base/button' -import CardView from '@/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/cardView' +import CardView from '@/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/card-view' import Divider from '../base/divider' import type { Operation } from './app-operations' import AppOperations from './app-operations' diff --git a/web/app/components/base/select/index.tsx b/web/app/components/base/select/index.tsx index 1f29d5bd6a..aa0cf02215 100644 --- a/web/app/components/base/select/index.tsx +++ b/web/app/components/base/select/index.tsx @@ -192,7 +192,6 @@ const SimpleSelect: FC = ({ const localPlaceholder = placeholder || t('common.placeholder.select') const [selectedItem, setSelectedItem] = useState(null) - const [open, setOpen] = useState(false) useEffect(() => { let defaultSelect = null @@ -215,88 +214,83 @@ const SimpleSelect: FC = ({ } }} > -
- {renderTrigger && {renderTrigger(selectedItem)}} - {!renderTrigger && ( - { - // get data-open, use setTimeout to ensure the attribute is set - setTimeout(() => { - if (listboxRef.current) { - const isOpen = listboxRef.current.getAttribute('data-open') !== null - setOpen(isOpen) - onOpenChange?.(isOpen) - } - }) - }} className={classNames(`flex h-full w-full items-center rounded-lg border-0 bg-components-input-bg-normal pl-3 pr-10 focus-visible:bg-state-base-hover-alt focus-visible:outline-none group-hover/simple-select:bg-state-base-hover-alt sm:text-sm sm:leading-6 ${disabled ? 'cursor-not-allowed' : 'cursor-pointer'}`, className)}> - {selectedItem?.name ?? localPlaceholder} - - {isLoading ? - : (selectedItem && !notClearable) - ? ( - { - e.stopPropagation() - setSelectedItem(null) - onSelect({ name: '', value: '' }) - }} - className="h-4 w-4 cursor-pointer text-text-quaternary" - aria-hidden="false" - /> - ) - : ( - open ? ( -
+ )} ) } From e0fe0e1a3e8cf8f6fafa5b399457daa8405eb140 Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:55:25 +0800 Subject: [PATCH 24/27] fix: goto-anything command filter should only match shortcut (#23862) --- .../goto-anything/command-selector.test.tsx | 32 +++++++++---------- .../goto-anything/command-selector.tsx | 1 - 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/web/__tests__/goto-anything/command-selector.test.tsx b/web/__tests__/goto-anything/command-selector.test.tsx index 1073b9d481..1db4be31fb 100644 --- a/web/__tests__/goto-anything/command-selector.test.tsx +++ b/web/__tests__/goto-anything/command-selector.test.tsx @@ -37,7 +37,7 @@ describe('CommandSelector', () => { }, knowledge: { key: '@knowledge', - shortcut: '@knowledge', + shortcut: '@kb', title: 'Search Knowledge', description: 'Search knowledge bases', search: jest.fn(), @@ -75,7 +75,7 @@ describe('CommandSelector', () => { ) expect(screen.getByTestId('command-item-@app')).toBeInTheDocument() - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.getByTestId('command-item-@kb')).toBeInTheDocument() expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument() expect(screen.getByTestId('command-item-@node')).toBeInTheDocument() }) @@ -90,7 +90,7 @@ describe('CommandSelector', () => { ) expect(screen.getByTestId('command-item-@app')).toBeInTheDocument() - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.getByTestId('command-item-@kb')).toBeInTheDocument() expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument() expect(screen.getByTestId('command-item-@node')).toBeInTheDocument() }) @@ -107,7 +107,7 @@ describe('CommandSelector', () => { ) expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument() - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.getByTestId('command-item-@kb')).toBeInTheDocument() expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument() expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument() }) @@ -122,7 +122,7 @@ describe('CommandSelector', () => { ) expect(screen.getByTestId('command-item-@app')).toBeInTheDocument() - expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument() + expect(screen.queryByTestId('command-item-@kb')).not.toBeInTheDocument() expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument() expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument() }) @@ -137,7 +137,7 @@ describe('CommandSelector', () => { ) expect(screen.getByTestId('command-item-@app')).toBeInTheDocument() - expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument() + expect(screen.queryByTestId('command-item-@kb')).not.toBeInTheDocument() }) it('should match partial strings', () => { @@ -145,14 +145,14 @@ describe('CommandSelector', () => { , ) expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument() - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.queryByTestId('command-item-@kb')).not.toBeInTheDocument() expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument() - expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument() + expect(screen.getByTestId('command-item-@node')).toBeInTheDocument() }) }) @@ -167,7 +167,7 @@ describe('CommandSelector', () => { ) expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument() - expect(screen.queryByTestId('command-item-@knowledge')).not.toBeInTheDocument() + expect(screen.queryByTestId('command-item-@kb')).not.toBeInTheDocument() expect(screen.queryByTestId('command-item-@plugin')).not.toBeInTheDocument() expect(screen.queryByTestId('command-item-@node')).not.toBeInTheDocument() @@ -210,7 +210,7 @@ describe('CommandSelector', () => { />, ) - expect(mockOnCommandValueChange).toHaveBeenCalledWith('@knowledge') + expect(mockOnCommandValueChange).toHaveBeenCalledWith('@kb') }) it('should not call onCommandValueChange if current value still exists', () => { @@ -246,10 +246,10 @@ describe('CommandSelector', () => { />, ) - const knowledgeItem = screen.getByTestId('command-item-@knowledge') + const knowledgeItem = screen.getByTestId('command-item-@kb') fireEvent.click(knowledgeItem) - expect(mockOnCommandSelect).toHaveBeenCalledWith('@knowledge') + expect(mockOnCommandSelect).toHaveBeenCalledWith('@kb') }) }) @@ -276,7 +276,7 @@ describe('CommandSelector', () => { ) expect(screen.getByTestId('command-item-@app')).toBeInTheDocument() - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.getByTestId('command-item-@kb')).toBeInTheDocument() expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument() expect(screen.getByTestId('command-item-@node')).toBeInTheDocument() }) @@ -312,7 +312,7 @@ describe('CommandSelector', () => { ) expect(screen.getByTestId('command-item-@app')).toBeInTheDocument() - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.getByTestId('command-item-@kb')).toBeInTheDocument() expect(screen.getByTestId('command-item-@plugin')).toBeInTheDocument() expect(screen.getByTestId('command-item-@node')).toBeInTheDocument() }) @@ -326,7 +326,7 @@ describe('CommandSelector', () => { />, ) - expect(screen.getByTestId('command-item-@knowledge')).toBeInTheDocument() + expect(screen.getByTestId('command-item-@kb')).toBeInTheDocument() expect(screen.queryByTestId('command-item-@app')).not.toBeInTheDocument() }) }) diff --git a/web/app/components/goto-anything/command-selector.tsx b/web/app/components/goto-anything/command-selector.tsx index d13421a0eb..6ec179c3e3 100644 --- a/web/app/components/goto-anything/command-selector.tsx +++ b/web/app/components/goto-anything/command-selector.tsx @@ -20,7 +20,6 @@ const CommandSelector: FC = ({ actions, onCommandSelect, searchFilter, co return true const filterLower = searchFilter.toLowerCase() return action.shortcut.toLowerCase().includes(filterLower) - || action.key.toLowerCase().includes(filterLower) }) useEffect(() => { From 2da00bb4ec25762543dae8dd798791b3d97c43aa Mon Sep 17 00:00:00 2001 From: lyzno1 <92089059+lyzno1@users.noreply.github.com> Date: Wed, 13 Aug 2025 13:56:06 +0800 Subject: [PATCH 25/27] Fix misleading Studio button in account header (#23842) --- web/app/account/header.tsx | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/web/app/account/header.tsx b/web/app/account/header.tsx index d033bfab61..af09ca1c9c 100644 --- a/web/app/account/header.tsx +++ b/web/app/account/header.tsx @@ -13,14 +13,14 @@ const Header = () => { const router = useRouter() const systemFeatures = useGlobalPublicStore(s => s.systemFeatures) - const back = useCallback(() => { - router.back() + const goToStudio = useCallback(() => { + router.push('/apps') }, [router]) return (
-
+
{systemFeatures.branding.enabled && systemFeatures.branding.login_page_logo ? {

{t('common.account.account')}

-