diff --git a/.agents/skills/component-refactoring/SKILL.md b/.agents/skills/component-refactoring/SKILL.md index 0ed18d71d1..98a94592ab 100644 --- a/.agents/skills/component-refactoring/SKILL.md +++ b/.agents/skills/component-refactoring/SKILL.md @@ -367,7 +367,7 @@ For each extraction: ┌────────────────────────────────────────┐ │ 1. Extract code │ │ 2. Run: pnpm lint:fix │ - │ 3. Run: pnpm type-check:tsgo │ + │ 3. Run: pnpm type-check │ │ 4. Run: pnpm test │ │ 5. Test functionality manually │ │ 6. PASS? → Next extraction │ diff --git a/.agents/skills/frontend-testing/references/checklist.md b/.agents/skills/frontend-testing/references/checklist.md index 99258498dd..519c3f166f 100644 --- a/.agents/skills/frontend-testing/references/checklist.md +++ b/.agents/skills/frontend-testing/references/checklist.md @@ -127,7 +127,7 @@ For the current file being tested: - [ ] Run full directory test: `pnpm test path/to/directory/` - [ ] Check coverage report: `pnpm test:coverage` - [ ] Run `pnpm lint:fix` on all test files -- [ ] Run `pnpm type-check:tsgo` +- [ ] Run `pnpm type-check` ## Common Issues to Watch diff --git a/AGENTS.md b/AGENTS.md index 667976a3c8..c6ebf7ae8a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -31,7 +31,7 @@ The codebase is split into: ## Language Style - **Python**: Keep type hints on functions and attributes, and implement relevant special methods (e.g., `__repr__`, `__str__`). Prefer `TypedDict` over `dict` or `Mapping` for type safety and better code documentation. -- **TypeScript**: Use the strict config, rely on ESLint (`pnpm lint:fix` preferred) plus `pnpm type-check:tsgo`, and avoid `any` types. +- **TypeScript**: Use the strict config, rely on ESLint (`pnpm lint:fix` preferred) plus `pnpm type-check`, and avoid `any` types. ## General Practices diff --git a/README.md b/README.md index d9848a6c78..778028fc76 100644 --- a/README.md +++ b/README.md @@ -139,19 +139,6 @@ Star Dify on GitHub and be instantly notified of new releases. If you need to customize the configuration, please refer to the comments in our [.env.example](docker/.env.example) file and update the corresponding values in your `.env` file. Additionally, you might need to make adjustments to the `docker-compose.yaml` file itself, such as changing image versions, port mappings, or volume mounts, based on your specific deployment environment and requirements. After making any changes, please re-run `docker compose up -d`. You can find the full list of available environment variables [here](https://docs.dify.ai/getting-started/install-self-hosted/environments). -#### Customizing Suggested Questions - -You can now customize the "Suggested Questions After Answer" feature to better fit your use case. For example, to generate longer, more technical questions: - -```bash -# In your .env file -SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]' -SUGGESTED_QUESTIONS_MAX_TOKENS=512 -SUGGESTED_QUESTIONS_TEMPERATURE=0.3 -``` - -See the [Suggested Questions Configuration Guide](docs/suggested-questions-configuration.md) for detailed examples and usage instructions. - ### Metrics Monitoring with Grafana Import the dashboard to Grafana, using Dify's PostgreSQL database as data source, to monitor metrics in granularity of apps, tenants, messages, and more. @@ -160,7 +147,7 @@ Import the dashboard to Grafana, using Dify's PostgreSQL database as data source ### Deployment with Kubernetes -If you'd like to configure a highly-available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes. +If you'd like to configure a highly available setup, there are community-contributed [Helm Charts](https://helm.sh/) and YAML files which allow Dify to be deployed on Kubernetes. - [Helm Chart by @LeoQuote](https://github.com/douban/charts/tree/master/charts/dify) - [Helm Chart by @BorisPolonsky](https://github.com/BorisPolonsky/dify-helm) diff --git a/api/.env.example b/api/.env.example index 7455d4a0e9..f6f65011ea 100644 --- a/api/.env.example +++ b/api/.env.example @@ -659,6 +659,11 @@ INNER_API_KEY_FOR_PLUGIN=QaHbTe77CtuXmsfyhR7+vRjI/+XbV1AaFy691iy+kGDv2Jvy0/eAh8Y MARKETPLACE_ENABLED=true MARKETPLACE_API_URL=https://marketplace.dify.ai +# Creators Platform configuration +CREATORS_PLATFORM_FEATURES_ENABLED=true +CREATORS_PLATFORM_API_URL=https://creators.dify.ai +CREATORS_PLATFORM_OAUTH_CLIENT_ID= + # Endpoint configuration ENDPOINT_URL_TEMPLATE=http://localhost:5002/e/{hook_id} @@ -709,22 +714,6 @@ SWAGGER_UI_PATH=/swagger-ui.html # Set to false to export dataset IDs as plain text for easier cross-environment import DSL_EXPORT_ENCRYPT_DATASET_ID=true -# Suggested Questions After Answer Configuration -# These environment variables allow customization of the suggested questions feature -# -# Custom prompt for generating suggested questions (optional) -# If not set, uses the default prompt that generates 3 questions under 20 characters each -# Example: "Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: [\"question1\",\"question2\",\"question3\",\"question4\",\"question5\"]" -# SUGGESTED_QUESTIONS_PROMPT= - -# Maximum number of tokens for suggested questions generation (default: 256) -# Adjust this value for longer questions or more questions -# SUGGESTED_QUESTIONS_MAX_TOKENS=256 - -# Temperature for suggested questions generation (default: 0.0) -# Higher values (0.5-1.0) produce more creative questions, lower values (0.0-0.3) produce more focused questions -# SUGGESTED_QUESTIONS_TEMPERATURE=0 - # Tenant isolated task queue configuration TENANT_ISOLATED_TASK_CONCURRENCY=1 diff --git a/api/README.md b/api/README.md index 00562f3f78..a075bc0fa9 100644 --- a/api/README.md +++ b/api/README.md @@ -101,3 +101,11 @@ The scripts resolve paths relative to their location, so you can run them from a uv run ruff format ./ # Format code uv run basedpyright . # Type checking ``` + +## Generate TS stub + +``` +uv run dev/generate_swagger_specs.py --output-dir openapi +``` + +use https://jsontotable.org/openapi-to-typescript to convert to typescript diff --git a/api/commands/plugin.py b/api/commands/plugin.py index c34391025a..8bd5392d7b 100644 --- a/api/commands/plugin.py +++ b/api/commands/plugin.py @@ -11,7 +11,7 @@ from configs import dify_config from core.helper import encrypter from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.plugin import PluginInstaller -from core.tools.utils.system_oauth_encryption import encrypt_system_oauth_params +from core.tools.utils.system_encryption import encrypt_system_params from extensions.ext_database import db from models import Tenant from models.oauth import DatasourceOauthParamConfig, DatasourceProvider @@ -44,7 +44,7 @@ def setup_system_tool_oauth_client(provider, client_params): click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow")) click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow")) - oauth_client_params = encrypt_system_oauth_params(client_params_dict) + oauth_client_params = encrypt_system_params(client_params_dict) click.echo(click.style("Client params encrypted successfully.", fg="green")) except Exception as e: click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red")) @@ -94,7 +94,7 @@ def setup_system_trigger_oauth_client(provider, client_params): click.echo(click.style(f"Encrypting client params: {client_params}", fg="yellow")) click.echo(click.style(f"Using SECRET_KEY: `{dify_config.SECRET_KEY}`", fg="yellow")) - oauth_client_params = encrypt_system_oauth_params(client_params_dict) + oauth_client_params = encrypt_system_params(client_params_dict) click.echo(click.style("Client params encrypted successfully.", fg="green")) except Exception as e: click.echo(click.style(f"Error parsing client params: {str(e)}", fg="red")) diff --git a/api/configs/feature/__init__.py b/api/configs/feature/__init__.py index ae49ae47d0..52e33c1789 100644 --- a/api/configs/feature/__init__.py +++ b/api/configs/feature/__init__.py @@ -287,6 +287,27 @@ class MarketplaceConfig(BaseSettings): ) +class CreatorsPlatformConfig(BaseSettings): + """ + Configuration for Creators Platform integration + """ + + CREATORS_PLATFORM_FEATURES_ENABLED: bool = Field( + description="Enable or disable Creators Platform features", + default=True, + ) + + CREATORS_PLATFORM_API_URL: HttpUrl = Field( + description="Creators Platform API URL", + default=HttpUrl("https://creators.dify.ai"), + ) + + CREATORS_PLATFORM_OAUTH_CLIENT_ID: str = Field( + description="OAuth client ID for Creators Platform integration", + default="", + ) + + class EndpointConfig(BaseSettings): """ Configuration for various application endpoints and URLs @@ -1379,6 +1400,7 @@ class FeatureConfig( AuthConfig, # Changed from OAuthConfig to AuthConfig BillingConfig, CodeExecutionSandboxConfig, + CreatorsPlatformConfig, TriggerConfig, AsyncWorkflowConfig, PluginConfig, diff --git a/api/controllers/common/human_input.py b/api/controllers/common/human_input.py new file mode 100644 index 0000000000..5d6f4efb95 --- /dev/null +++ b/api/controllers/common/human_input.py @@ -0,0 +1,6 @@ +from pydantic import BaseModel, JsonValue + + +class HumanInputFormSubmitPayload(BaseModel): + inputs: dict[str, JsonValue] + action: str diff --git a/api/controllers/console/app/app.py b/api/controllers/console/app/app.py index 9102983d86..a736fc8bc8 100644 --- a/api/controllers/console/app/app.py +++ b/api/controllers/console/app/app.py @@ -692,6 +692,32 @@ class AppExportApi(Resource): return payload.model_dump(mode="json") +@console_ns.route("/apps//publish-to-creators-platform") +class AppPublishToCreatorsPlatformApi(Resource): + @setup_required + @login_required + @account_initialization_required + @get_app_model(mode=None) + @edit_permission_required + def post(self, app_model): + """Publish app to Creators Platform""" + from configs import dify_config + from core.helper.creators import get_redirect_url, upload_dsl + + if not dify_config.CREATORS_PLATFORM_FEATURES_ENABLED: + return {"error": "Creators Platform features are not enabled"}, 403 + + current_user, _ = current_account_with_tenant() + + dsl_content = AppDslService.export_dsl(app_model=app_model, include_secret=False) + dsl_bytes = dsl_content.encode("utf-8") + + claim_code = upload_dsl(dsl_bytes) + redirect_url = get_redirect_url(str(current_user.id), claim_code) + + return {"redirect_url": redirect_url} + + @console_ns.route("/apps//name") class AppNameApi(Resource): @console_ns.doc("check_app_name") diff --git a/api/controllers/console/human_input_form.py b/api/controllers/console/human_input_form.py index 845af37365..79b3e6cc9f 100644 --- a/api/controllers/console/human_input_form.py +++ b/api/controllers/console/human_input_form.py @@ -8,10 +8,10 @@ from collections.abc import Generator from flask import Response, jsonify, request from flask_restx import Resource -from pydantic import BaseModel from sqlalchemy import select from sqlalchemy.orm import Session, sessionmaker +from controllers.common.human_input import HumanInputFormSubmitPayload from controllers.console import console_ns from controllers.console.wraps import account_initialization_required, setup_required from controllers.web.error import InvalidArgumentError, NotFoundError @@ -20,11 +20,11 @@ from core.app.apps.base_app_generator import BaseAppGenerator from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter from core.app.apps.message_generator import MessageGenerator from core.app.apps.workflow.app_generator import WorkflowAppGenerator +from core.workflow.human_input_policy import HumanInputSurface, is_recipient_type_allowed_for_surface from extensions.ext_database import db from libs.login import current_account_with_tenant, login_required from models import App from models.enums import CreatorUserRole -from models.human_input import RecipientType from models.model import AppMode from models.workflow import WorkflowRun from repositories.factory import DifyAPIRepositoryFactory @@ -34,11 +34,6 @@ from services.workflow_event_snapshot_service import build_workflow_event_stream logger = logging.getLogger(__name__) -class HumanInputFormSubmitPayload(BaseModel): - inputs: dict - action: str - - def _jsonify_form_definition(form: Form) -> Response: payload = form.get_definition().model_dump() payload["expiration_time"] = int(form.expiration_time.timestamp()) @@ -56,6 +51,11 @@ class ConsoleHumanInputFormApi(Resource): if form.tenant_id != current_tenant_id: raise NotFoundError("App not found") + @staticmethod + def _ensure_console_recipient_type(form: Form) -> None: + if not is_recipient_type_allowed_for_surface(form.recipient_type, HumanInputSurface.CONSOLE): + raise NotFoundError("form not found") + @setup_required @login_required @account_initialization_required @@ -99,10 +99,8 @@ class ConsoleHumanInputFormApi(Resource): raise NotFoundError(f"form not found, token={form_token}") self._ensure_console_access(form) - + self._ensure_console_recipient_type(form) recipient_type = form.recipient_type - if recipient_type not in {RecipientType.CONSOLE, RecipientType.BACKSTAGE}: - raise NotFoundError(f"form not found, token={form_token}") # The type checker is not smart enought to validate the following invariant. # So we need to assert it manually. assert recipient_type is not None, "recipient_type cannot be None here." diff --git a/api/controllers/console/tag/tags.py b/api/controllers/console/tag/tags.py index 614bf03ea5..f73e2da54e 100644 --- a/api/controllers/console/tag/tags.py +++ b/api/controllers/console/tag/tags.py @@ -37,6 +37,11 @@ class TagBindingRemovePayload(BaseModel): type: TagType = Field(description="Tag type") +class TagBindingItemDeletePayload(BaseModel): + target_id: str = Field(description="Target ID to unbind tag from") + type: TagType = Field(description="Tag type") + + class TagListQueryParam(BaseModel): type: Literal["knowledge", "app", ""] = Field("", description="Tag type filter") keyword: str | None = Field(None, description="Search keyword") @@ -70,6 +75,7 @@ register_schema_models( TagBasePayload, TagBindingPayload, TagBindingRemovePayload, + TagBindingItemDeletePayload, TagListQueryParam, TagResponse, ) @@ -152,41 +158,107 @@ class TagUpdateDeleteApi(Resource): return "", 204 -@console_ns.route("/tag-bindings/create") -class TagBindingCreateApi(Resource): +def _require_tag_binding_edit_permission() -> None: + """ + Ensure the current account can edit tag bindings. + + Tag binding operations are allowed for users who can edit resources (app/dataset) within the current tenant. + """ + current_user, _ = current_account_with_tenant() + # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator + if not (current_user.has_edit_permission or current_user.is_dataset_editor): + raise Forbidden() + + +def _create_tag_bindings() -> tuple[dict[str, str], int]: + _require_tag_binding_edit_permission() + + payload = TagBindingPayload.model_validate(console_ns.payload or {}) + TagService.save_tag_binding( + TagBindingCreatePayload( + tag_ids=payload.tag_ids, + target_id=payload.target_id, + type=payload.type, + ) + ) + return {"result": "success"}, 200 + + +def _remove_tag_binding() -> tuple[dict[str, str], int]: + _require_tag_binding_edit_permission() + + payload = TagBindingRemovePayload.model_validate(console_ns.payload or {}) + TagService.delete_tag_binding( + TagBindingDeletePayload( + tag_id=payload.tag_id, + target_id=payload.target_id, + type=payload.type, + ) + ) + return {"result": "success"}, 200 + + +@console_ns.route("/tag-bindings") +class TagBindingCollectionApi(Resource): + """Canonical collection resource for tag binding creation.""" + + @console_ns.doc("create_tag_binding") @console_ns.expect(console_ns.models[TagBindingPayload.__name__]) @setup_required @login_required @account_initialization_required def post(self): - current_user, _ = current_account_with_tenant() - # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator - if not (current_user.has_edit_permission or current_user.is_dataset_editor): - raise Forbidden() + return _create_tag_bindings() - payload = TagBindingPayload.model_validate(console_ns.payload or {}) - TagService.save_tag_binding( - TagBindingCreatePayload(tag_ids=payload.tag_ids, target_id=payload.target_id, type=payload.type) + +@console_ns.route("/tag-bindings/") +class TagBindingItemApi(Resource): + """Canonical item resource for tag binding deletion.""" + + @console_ns.doc("delete_tag_binding") + @console_ns.doc(params={"id": "Tag ID"}) + @console_ns.expect(console_ns.models[TagBindingItemDeletePayload.__name__]) + @setup_required + @login_required + @account_initialization_required + def delete(self, id): + _require_tag_binding_edit_permission() + payload = TagBindingItemDeletePayload.model_validate(console_ns.payload or {}) + TagService.delete_tag_binding( + TagBindingDeletePayload( + tag_id=str(id), + target_id=payload.target_id, + type=payload.type, + ) ) - return {"result": "success"}, 200 +@console_ns.route("/tag-bindings/create") +class DeprecatedTagBindingCreateApi(Resource): + """Deprecated verb-based alias for tag binding creation.""" + + @console_ns.doc("create_tag_binding_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc(description="Deprecated legacy alias. Use POST /tag-bindings instead.") + @console_ns.expect(console_ns.models[TagBindingPayload.__name__]) + @setup_required + @login_required + @account_initialization_required + def post(self): + return _create_tag_bindings() + + @console_ns.route("/tag-bindings/remove") -class TagBindingDeleteApi(Resource): +class DeprecatedTagBindingRemoveApi(Resource): + """Deprecated verb-based alias for tag binding deletion.""" + + @console_ns.doc("delete_tag_binding_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc(description="Deprecated legacy alias. Use DELETE /tag-bindings/{id} instead.") @console_ns.expect(console_ns.models[TagBindingRemovePayload.__name__]) @setup_required @login_required @account_initialization_required def post(self): - current_user, _ = current_account_with_tenant() - # The role of the current user in the ta table must be admin, owner, editor, or dataset_operator - if not (current_user.has_edit_permission or current_user.is_dataset_editor): - raise Forbidden() - - payload = TagBindingRemovePayload.model_validate(console_ns.payload or {}) - TagService.delete_tag_binding( - TagBindingDeletePayload(tag_id=payload.tag_id, target_id=payload.target_id, type=payload.type) - ) - - return {"result": "success"}, 200 + return _remove_tag_binding() diff --git a/api/controllers/console/workspace/endpoint.py b/api/controllers/console/workspace/endpoint.py index f45b72f390..d4be07382a 100644 --- a/api/controllers/console/workspace/endpoint.py +++ b/api/controllers/console/workspace/endpoint.py @@ -1,3 +1,11 @@ +"""Console workspace endpoint controllers. + +This module exposes workspace-scoped plugin endpoint management APIs. The +canonical write routes follow resource-oriented paths, while the historical +verb-based aliases stay available as deprecated resources so OpenAPI metadata +marks only the legacy paths as deprecated. +""" + from typing import Any from flask import request @@ -25,7 +33,12 @@ class EndpointIdPayload(BaseModel): endpoint_id: str -class EndpointUpdatePayload(EndpointIdPayload): +class EndpointUpdatePayload(BaseModel): + settings: dict[str, Any] + name: str = Field(min_length=1) + + +class LegacyEndpointUpdatePayload(EndpointIdPayload): settings: dict[str, Any] name: str = Field(min_length=1) @@ -76,6 +89,7 @@ register_schema_models( EndpointCreatePayload, EndpointIdPayload, EndpointUpdatePayload, + LegacyEndpointUpdatePayload, EndpointListQuery, EndpointListForPluginQuery, EndpointCreateResponse, @@ -88,8 +102,60 @@ register_schema_models( ) -@console_ns.route("/workspaces/current/endpoints/create") -class EndpointCreateApi(Resource): +def _create_endpoint() -> dict[str, bool]: + """Create a plugin endpoint for the current workspace.""" + user, tenant_id = current_account_with_tenant() + + args = EndpointCreatePayload.model_validate(console_ns.payload) + + try: + return { + "success": EndpointService.create_endpoint( + tenant_id=tenant_id, + user_id=user.id, + plugin_unique_identifier=args.plugin_unique_identifier, + name=args.name, + settings=args.settings, + ) + } + except PluginPermissionDeniedError as e: + raise ValueError(e.description) from e + + +def _update_endpoint(endpoint_id: str) -> dict[str, bool]: + """Update a plugin endpoint identified by the canonical path parameter.""" + user, tenant_id = current_account_with_tenant() + + args = EndpointUpdatePayload.model_validate(console_ns.payload) + + return { + "success": EndpointService.update_endpoint( + tenant_id=tenant_id, + user_id=user.id, + endpoint_id=endpoint_id, + name=args.name, + settings=args.settings, + ) + } + + +def _delete_endpoint(endpoint_id: str) -> dict[str, bool]: + """Delete a plugin endpoint identified by the canonical path parameter.""" + user, tenant_id = current_account_with_tenant() + + return { + "success": EndpointService.delete_endpoint( + tenant_id=tenant_id, + user_id=user.id, + endpoint_id=endpoint_id, + ) + } + + +@console_ns.route("/workspaces/current/endpoints") +class EndpointCollectionApi(Resource): + """Canonical collection resource for endpoint creation.""" + @console_ns.doc("create_endpoint") @console_ns.doc(description="Create a new plugin endpoint") @console_ns.expect(console_ns.models[EndpointCreatePayload.__name__]) @@ -104,22 +170,33 @@ class EndpointCreateApi(Resource): @is_admin_or_owner_required @account_initialization_required def post(self): - user, tenant_id = current_account_with_tenant() + return _create_endpoint() - args = EndpointCreatePayload.model_validate(console_ns.payload) - try: - return { - "success": EndpointService.create_endpoint( - tenant_id=tenant_id, - user_id=user.id, - plugin_unique_identifier=args.plugin_unique_identifier, - name=args.name, - settings=args.settings, - ) - } - except PluginPermissionDeniedError as e: - raise ValueError(e.description) from e +@console_ns.route("/workspaces/current/endpoints/create") +class DeprecatedEndpointCreateApi(Resource): + """Deprecated verb-based alias for endpoint creation.""" + + @console_ns.doc("create_endpoint_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc( + description=( + "Deprecated legacy alias for creating a plugin endpoint. Use POST /workspaces/current/endpoints instead." + ) + ) + @console_ns.expect(console_ns.models[EndpointCreatePayload.__name__]) + @console_ns.response( + 200, + "Endpoint created successfully", + console_ns.models[EndpointCreateResponse.__name__], + ) + @console_ns.response(403, "Admin privileges required") + @setup_required + @login_required + @is_admin_or_owner_required + @account_initialization_required + def post(self): + return _create_endpoint() @console_ns.route("/workspaces/current/endpoints/list") @@ -190,10 +267,56 @@ class EndpointListForSinglePluginApi(Resource): ) -@console_ns.route("/workspaces/current/endpoints/delete") -class EndpointDeleteApi(Resource): +@console_ns.route("/workspaces/current/endpoints/") +class EndpointItemApi(Resource): + """Canonical item resource for endpoint updates and deletion.""" + @console_ns.doc("delete_endpoint") @console_ns.doc(description="Delete a plugin endpoint") + @console_ns.doc(params={"id": {"description": "Endpoint ID", "type": "string", "required": True}}) + @console_ns.response( + 200, + "Endpoint deleted successfully", + console_ns.models[EndpointDeleteResponse.__name__], + ) + @console_ns.response(403, "Admin privileges required") + @setup_required + @login_required + @is_admin_or_owner_required + @account_initialization_required + def delete(self, id: str): + return _delete_endpoint(endpoint_id=id) + + @console_ns.doc("update_endpoint") + @console_ns.doc(description="Update a plugin endpoint") + @console_ns.expect(console_ns.models[EndpointUpdatePayload.__name__]) + @console_ns.doc(params={"id": {"description": "Endpoint ID", "type": "string", "required": True}}) + @console_ns.response( + 200, + "Endpoint updated successfully", + console_ns.models[EndpointUpdateResponse.__name__], + ) + @console_ns.response(403, "Admin privileges required") + @setup_required + @login_required + @is_admin_or_owner_required + @account_initialization_required + def patch(self, id: str): + return _update_endpoint(endpoint_id=id) + + +@console_ns.route("/workspaces/current/endpoints/delete") +class DeprecatedEndpointDeleteApi(Resource): + """Deprecated verb-based alias for endpoint deletion.""" + + @console_ns.doc("delete_endpoint_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc( + description=( + "Deprecated legacy alias for deleting a plugin endpoint. " + "Use DELETE /workspaces/current/endpoints/{id} instead." + ) + ) @console_ns.expect(console_ns.models[EndpointIdPayload.__name__]) @console_ns.response( 200, @@ -206,22 +329,23 @@ class EndpointDeleteApi(Resource): @is_admin_or_owner_required @account_initialization_required def post(self): - user, tenant_id = current_account_with_tenant() - args = EndpointIdPayload.model_validate(console_ns.payload) - - return { - "success": EndpointService.delete_endpoint( - tenant_id=tenant_id, user_id=user.id, endpoint_id=args.endpoint_id - ) - } + return _delete_endpoint(endpoint_id=args.endpoint_id) @console_ns.route("/workspaces/current/endpoints/update") -class EndpointUpdateApi(Resource): - @console_ns.doc("update_endpoint") - @console_ns.doc(description="Update a plugin endpoint") - @console_ns.expect(console_ns.models[EndpointUpdatePayload.__name__]) +class DeprecatedEndpointUpdateApi(Resource): + """Deprecated verb-based alias for endpoint updates.""" + + @console_ns.doc("update_endpoint_deprecated") + @console_ns.doc(deprecated=True) + @console_ns.doc( + description=( + "Deprecated legacy alias for updating a plugin endpoint. " + "Use PATCH /workspaces/current/endpoints/{id} instead." + ) + ) + @console_ns.expect(console_ns.models[LegacyEndpointUpdatePayload.__name__]) @console_ns.response( 200, "Endpoint updated successfully", @@ -233,19 +357,8 @@ class EndpointUpdateApi(Resource): @is_admin_or_owner_required @account_initialization_required def post(self): - user, tenant_id = current_account_with_tenant() - - args = EndpointUpdatePayload.model_validate(console_ns.payload) - - return { - "success": EndpointService.update_endpoint( - tenant_id=tenant_id, - user_id=user.id, - endpoint_id=args.endpoint_id, - name=args.name, - settings=args.settings, - ) - } + args = LegacyEndpointUpdatePayload.model_validate(console_ns.payload) + return _update_endpoint(endpoint_id=args.endpoint_id) @console_ns.route("/workspaces/current/endpoints/enable") diff --git a/api/controllers/service_api/__init__.py b/api/controllers/service_api/__init__.py index 4f7f7d9a98..182631e8f5 100644 --- a/api/controllers/service_api/__init__.py +++ b/api/controllers/service_api/__init__.py @@ -23,9 +23,11 @@ from .app import ( conversation, file, file_preview, + human_input_form, message, site, workflow, + workflow_events, ) from .dataset import ( dataset, @@ -50,6 +52,7 @@ __all__ = [ "file", "file_preview", "hit_testing", + "human_input_form", "index", "message", "metadata", @@ -58,6 +61,7 @@ __all__ = [ "segment", "site", "workflow", + "workflow_events", ] api.add_namespace(service_api_ns) diff --git a/api/controllers/service_api/app/human_input_form.py b/api/controllers/service_api/app/human_input_form.py new file mode 100644 index 0000000000..8e5003dbbf --- /dev/null +++ b/api/controllers/service_api/app/human_input_form.py @@ -0,0 +1,137 @@ +""" +Service API human input form endpoints. + +This module exposes app-token authenticated APIs for fetching and submitting +paused human input forms in workflow/chatflow runs. +""" + +import json +import logging +from datetime import datetime + +from flask import Response +from flask_restx import Resource +from werkzeug.exceptions import BadRequest, NotFound + +from controllers.common.human_input import HumanInputFormSubmitPayload +from controllers.common.schema import register_schema_models +from controllers.service_api import service_api_ns +from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token +from core.workflow.human_input_policy import HumanInputSurface, is_recipient_type_allowed_for_surface +from extensions.ext_database import db +from models.model import App, EndUser +from services.human_input_service import Form, FormNotFoundError, HumanInputService + +logger = logging.getLogger(__name__) + + +register_schema_models(service_api_ns, HumanInputFormSubmitPayload) + + +def _stringify_default_values(values: dict[str, object]) -> dict[str, str]: + result: dict[str, str] = {} + for key, value in values.items(): + if value is None: + result[key] = "" + elif isinstance(value, (dict, list)): + result[key] = json.dumps(value, ensure_ascii=False) + else: + result[key] = str(value) + return result + + +def _to_timestamp(value: datetime) -> int: + return int(value.timestamp()) + + +def _jsonify_form_definition(form: Form) -> Response: + definition_payload = form.get_definition().model_dump() + payload = { + "form_content": definition_payload["rendered_content"], + "inputs": definition_payload["inputs"], + "resolved_default_values": _stringify_default_values(definition_payload["default_values"]), + "user_actions": definition_payload["user_actions"], + "expiration_time": _to_timestamp(form.expiration_time), + } + return Response(json.dumps(payload, ensure_ascii=False), mimetype="application/json") + + +def _ensure_form_belongs_to_app(form: Form, app_model: App) -> None: + if form.app_id != app_model.id or form.tenant_id != app_model.tenant_id: + raise NotFound("Form not found") + + +def _ensure_form_is_allowed_for_service_api(form: Form) -> None: + # Keep app-token callers scoped to the public web-form surface; internal HITL + # routes must continue to flow through console-only authentication. + if not is_recipient_type_allowed_for_surface(form.recipient_type, HumanInputSurface.SERVICE_API): + raise NotFound("Form not found") + + +@service_api_ns.route("/form/human_input/") +class WorkflowHumanInputFormApi(Resource): + @service_api_ns.doc("get_human_input_form") + @service_api_ns.doc(description="Get a paused human input form by token") + @service_api_ns.doc(params={"form_token": "Human input form token"}) + @service_api_ns.doc( + responses={ + 200: "Form retrieved successfully", + 401: "Unauthorized - invalid API token", + 404: "Form not found", + 412: "Form already submitted or expired", + } + ) + @validate_app_token + def get(self, app_model: App, form_token: str): + service = HumanInputService(db.engine) + form = service.get_form_by_token(form_token) + if form is None: + raise NotFound("Form not found") + + _ensure_form_belongs_to_app(form, app_model) + _ensure_form_is_allowed_for_service_api(form) + service.ensure_form_active(form) + return _jsonify_form_definition(form) + + @service_api_ns.expect(service_api_ns.models[HumanInputFormSubmitPayload.__name__]) + @service_api_ns.doc("submit_human_input_form") + @service_api_ns.doc(description="Submit a paused human input form by token") + @service_api_ns.doc(params={"form_token": "Human input form token"}) + @service_api_ns.doc( + responses={ + 200: "Form submitted successfully", + 400: "Bad request - invalid submission data", + 401: "Unauthorized - invalid API token", + 404: "Form not found", + 412: "Form already submitted or expired", + } + ) + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.JSON, required=True)) + def post(self, app_model: App, end_user: EndUser, form_token: str): + payload = HumanInputFormSubmitPayload.model_validate(service_api_ns.payload or {}) + + service = HumanInputService(db.engine) + form = service.get_form_by_token(form_token) + if form is None: + raise NotFound("Form not found") + + _ensure_form_belongs_to_app(form, app_model) + _ensure_form_is_allowed_for_service_api(form) + + recipient_type = form.recipient_type + if recipient_type is None: + logger.warning("Recipient type is None for form, form_id=%s", form.id) + raise BadRequest("Form recipient type is invalid") + + try: + service.submit_form_by_token( + recipient_type=recipient_type, + form_token=form_token, + selected_action_id=payload.action, + form_data=payload.inputs, + submission_end_user_id=end_user.id, + ) + except FormNotFoundError: + raise NotFound("Form not found") + + return {}, 200 diff --git a/api/controllers/service_api/app/workflow_events.py b/api/controllers/service_api/app/workflow_events.py new file mode 100644 index 0000000000..b281b271c0 --- /dev/null +++ b/api/controllers/service_api/app/workflow_events.py @@ -0,0 +1,142 @@ +""" +Service API workflow resume event stream endpoints. +""" + +import json +from collections.abc import Generator + +from flask import Response, request +from flask_restx import Resource +from sqlalchemy.orm import sessionmaker +from werkzeug.exceptions import NotFound + +from controllers.service_api import service_api_ns +from controllers.service_api.app.error import NotWorkflowAppError +from controllers.service_api.wraps import FetchUserArg, WhereisUserArg, validate_app_token +from core.app.apps.advanced_chat.app_generator import AdvancedChatAppGenerator +from core.app.apps.base_app_generator import BaseAppGenerator +from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter +from core.app.apps.message_generator import MessageGenerator +from core.app.apps.workflow.app_generator import WorkflowAppGenerator +from core.app.entities.task_entities import StreamEvent +from core.workflow.human_input_policy import HumanInputSurface +from extensions.ext_database import db +from models.enums import CreatorUserRole +from models.model import App, AppMode, EndUser +from repositories.factory import DifyAPIRepositoryFactory +from services.workflow_event_snapshot_service import build_workflow_event_stream + + +@service_api_ns.route("/workflow//events") +class WorkflowEventsApi(Resource): + """Service API for getting workflow execution events after resume.""" + + @service_api_ns.doc("get_workflow_events") + @service_api_ns.doc(description="Get workflow execution events stream after resume") + @service_api_ns.doc( + params={ + "task_id": "Workflow run ID", + "user": "End user identifier (query param)", + "include_state_snapshot": ( + "Whether to replay from persisted state snapshot, " + 'specify `"true"` to include a status snapshot of executed nodes' + ), + "continue_on_pause": ( + "Whether to keep the stream open across workflow_paused events," + 'specify `"true"` to keep the stream open for `workflow_paused` events.' + ), + } + ) + @service_api_ns.doc( + responses={ + 200: "SSE event stream", + 401: "Unauthorized - invalid API token", + 404: "Workflow run not found", + } + ) + @validate_app_token(fetch_user_arg=FetchUserArg(fetch_from=WhereisUserArg.QUERY, required=True)) + def get(self, app_model: App, end_user: EndUser, task_id: str): + app_mode = AppMode.value_of(app_model.mode) + if app_mode not in {AppMode.WORKFLOW, AppMode.ADVANCED_CHAT}: + raise NotWorkflowAppError() + + session_maker = sessionmaker(db.engine) + repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) + workflow_run = repo.get_workflow_run_by_id_and_tenant_id( + tenant_id=app_model.tenant_id, + run_id=task_id, + ) + + if workflow_run is None: + raise NotFound("Workflow run not found") + + if workflow_run.app_id != app_model.id: + raise NotFound("Workflow run not found") + + if workflow_run.created_by_role != CreatorUserRole.END_USER: + raise NotFound("Workflow run not found") + + if workflow_run.created_by != end_user.id: + raise NotFound("Workflow run not found") + + workflow_run_entity = workflow_run + + if workflow_run_entity.finished_at is not None: + response = WorkflowResponseConverter.workflow_run_result_to_finish_response( + task_id=workflow_run_entity.id, + workflow_run=workflow_run_entity, + creator_user=end_user, + ) + + payload = response.model_dump(mode="json") + payload["event"] = response.event.value + + def _generate_finished_events() -> Generator[str, None, None]: + yield f"data: {json.dumps(payload)}\n\n" + + event_generator = _generate_finished_events + else: + msg_generator = MessageGenerator() + generator: BaseAppGenerator + if app_mode == AppMode.ADVANCED_CHAT: + generator = AdvancedChatAppGenerator() + elif app_mode == AppMode.WORKFLOW: + generator = WorkflowAppGenerator() + else: + raise NotWorkflowAppError() + + include_state_snapshot = request.args.get("include_state_snapshot", "false").lower() == "true" + continue_on_pause = request.args.get("continue_on_pause", "false").lower() == "true" + terminal_events: list[StreamEvent] | None = [] if continue_on_pause else None + + def _generate_stream_events(): + if include_state_snapshot: + return generator.convert_to_event_stream( + build_workflow_event_stream( + app_mode=app_mode, + workflow_run=workflow_run_entity, + tenant_id=app_model.tenant_id, + app_id=app_model.id, + session_maker=session_maker, + human_input_surface=HumanInputSurface.SERVICE_API, + close_on_pause=not continue_on_pause, + ) + ) + return generator.convert_to_event_stream( + msg_generator.retrieve_events( + app_mode, + workflow_run_entity.id, + terminal_events=terminal_events, + ), + ) + + event_generator = _generate_stream_events + + return Response( + event_generator(), + mimetype="text/event-stream", + headers={ + "Cache-Control": "no-cache", + "Connection": "keep-alive", + }, + ) diff --git a/api/controllers/service_api/dataset/document.py b/api/controllers/service_api/dataset/document.py index 6db047567f..bc28ecb6b7 100644 --- a/api/controllers/service_api/dataset/document.py +++ b/api/controllers/service_api/dataset/document.py @@ -1,4 +1,12 @@ +"""Service API endpoints for dataset document management. + +The canonical Service API paths use hyphenated route segments. Legacy underscore +aliases remain registered for backward compatibility, but they must stay marked +deprecated in generated API docs so clients migrate toward the canonical paths. +""" + import json +from collections.abc import Mapping from contextlib import ExitStack from typing import Self from uuid import UUID @@ -117,12 +125,137 @@ register_schema_models( ) -@service_api_ns.route( - "/datasets//document/create_by_text", - "/datasets//document/create-by-text", -) +def _create_document_by_text(tenant_id: str, dataset_id: UUID) -> tuple[Mapping[str, object], int]: + """Create a document from text for both canonical and legacy routes.""" + payload = DocumentTextCreatePayload.model_validate(service_api_ns.payload or {}) + args = payload.model_dump(exclude_none=True) + + dataset_id_str = str(dataset_id) + tenant_id_str = str(tenant_id) + dataset = db.session.scalar( + select(Dataset).where(Dataset.tenant_id == tenant_id_str, Dataset.id == dataset_id_str).limit(1) + ) + + if not dataset: + raise ValueError("Dataset does not exist.") + + if not dataset.indexing_technique and not args["indexing_technique"]: + raise ValueError("indexing_technique is required.") + + embedding_model_provider = payload.embedding_model_provider + embedding_model = payload.embedding_model + if embedding_model_provider and embedding_model: + DatasetService.check_embedding_model_setting(tenant_id_str, embedding_model_provider, embedding_model) + + retrieval_model = payload.retrieval_model + if ( + retrieval_model + and retrieval_model.reranking_model + and retrieval_model.reranking_model.reranking_provider_name + and retrieval_model.reranking_model.reranking_model_name + ): + DatasetService.check_reranking_model_setting( + tenant_id_str, + retrieval_model.reranking_model.reranking_provider_name, + retrieval_model.reranking_model.reranking_model_name, + ) + + if not current_user: + raise ValueError("current_user is required") + + upload_file = FileService(db.engine).upload_text( + text=payload.text, text_name=payload.name, user_id=current_user.id, tenant_id=tenant_id_str + ) + data_source = { + "type": "upload_file", + "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, + } + args["data_source"] = data_source + knowledge_config = KnowledgeConfig.model_validate(args) + DocumentService.document_create_args_validate(knowledge_config) + + if not current_user: + raise ValueError("current_user is required") + + try: + documents, batch = DocumentService.save_document_with_dataset_id( + dataset=dataset, + knowledge_config=knowledge_config, + account=current_user, + dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None, + created_from="api", + ) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + document = documents[0] + + documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch} + return documents_and_batch_fields, 200 + + +def _update_document_by_text(tenant_id: str, dataset_id: UUID, document_id: UUID) -> tuple[Mapping[str, object], int]: + """Update a document from text for both canonical and legacy routes.""" + payload = DocumentTextUpdate.model_validate(service_api_ns.payload or {}) + dataset = db.session.scalar( + select(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == str(dataset_id)).limit(1) + ) + args = payload.model_dump(exclude_none=True) + if not dataset: + raise ValueError("Dataset does not exist.") + + retrieval_model = payload.retrieval_model + if ( + retrieval_model + and retrieval_model.reranking_model + and retrieval_model.reranking_model.reranking_provider_name + and retrieval_model.reranking_model.reranking_model_name + ): + DatasetService.check_reranking_model_setting( + tenant_id, + retrieval_model.reranking_model.reranking_provider_name, + retrieval_model.reranking_model.reranking_model_name, + ) + + # indexing_technique is already set in dataset since this is an update + args["indexing_technique"] = dataset.indexing_technique + + if args.get("text"): + text = args.get("text") + name = args.get("name") + if not current_user: + raise ValueError("current_user is required") + upload_file = FileService(db.engine).upload_text( + text=str(text), text_name=str(name), user_id=current_user.id, tenant_id=tenant_id + ) + data_source = { + "type": "upload_file", + "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, + } + args["data_source"] = data_source + + args["original_document_id"] = str(document_id) + knowledge_config = KnowledgeConfig.model_validate(args) + DocumentService.document_create_args_validate(knowledge_config) + + try: + documents, batch = DocumentService.save_document_with_dataset_id( + dataset=dataset, + knowledge_config=knowledge_config, + account=current_user, + dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None, + created_from="api", + ) + except ProviderTokenNotInitError as ex: + raise ProviderNotInitializeError(ex.description) + document = documents[0] + + documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch} + return documents_and_batch_fields, 200 + + +@service_api_ns.route("/datasets//document/create-by-text") class DocumentAddByTextApi(DatasetApiResource): - """Resource for documents.""" + """Resource for the canonical text document creation route.""" @service_api_ns.expect(service_api_ns.models[DocumentTextCreatePayload.__name__]) @service_api_ns.doc("create_document_by_text") @@ -138,81 +271,43 @@ class DocumentAddByTextApi(DatasetApiResource): @cloud_edition_billing_resource_check("vector_space", "dataset") @cloud_edition_billing_resource_check("documents", "dataset") @cloud_edition_billing_rate_limit_check("knowledge", "dataset") - def post(self, tenant_id, dataset_id): + def post(self, tenant_id: str, dataset_id: UUID): """Create document by text.""" - payload = DocumentTextCreatePayload.model_validate(service_api_ns.payload or {}) - args = payload.model_dump(exclude_none=True) + return _create_document_by_text(tenant_id=tenant_id, dataset_id=dataset_id) - dataset_id = str(dataset_id) - tenant_id = str(tenant_id) - dataset = db.session.scalar( - select(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == dataset_id).limit(1) + +@service_api_ns.route("/datasets//document/create_by_text") +class DeprecatedDocumentAddByTextApi(DatasetApiResource): + """Deprecated resource alias for text document creation.""" + + @service_api_ns.expect(service_api_ns.models[DocumentTextCreatePayload.__name__]) + @service_api_ns.doc("create_document_by_text_deprecated") + @service_api_ns.doc(deprecated=True) + @service_api_ns.doc( + description=( + "Deprecated legacy alias for creating a new document by providing text content. " + "Use /datasets/{dataset_id}/document/create-by-text instead." ) - - if not dataset: - raise ValueError("Dataset does not exist.") - - if not dataset.indexing_technique and not args["indexing_technique"]: - raise ValueError("indexing_technique is required.") - - embedding_model_provider = payload.embedding_model_provider - embedding_model = payload.embedding_model - if embedding_model_provider and embedding_model: - DatasetService.check_embedding_model_setting(tenant_id, embedding_model_provider, embedding_model) - - retrieval_model = payload.retrieval_model - if ( - retrieval_model - and retrieval_model.reranking_model - and retrieval_model.reranking_model.reranking_provider_name - and retrieval_model.reranking_model.reranking_model_name - ): - DatasetService.check_reranking_model_setting( - tenant_id, - retrieval_model.reranking_model.reranking_provider_name, - retrieval_model.reranking_model.reranking_model_name, - ) - - if not current_user: - raise ValueError("current_user is required") - - upload_file = FileService(db.engine).upload_text( - text=payload.text, text_name=payload.name, user_id=current_user.id, tenant_id=tenant_id - ) - data_source = { - "type": "upload_file", - "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, + ) + @service_api_ns.doc(params={"dataset_id": "Dataset ID"}) + @service_api_ns.doc( + responses={ + 200: "Document created successfully", + 401: "Unauthorized - invalid API token", + 400: "Bad request - invalid parameters", } - args["data_source"] = data_source - knowledge_config = KnowledgeConfig.model_validate(args) - # validate args - DocumentService.document_create_args_validate(knowledge_config) - - if not current_user: - raise ValueError("current_user is required") - - try: - documents, batch = DocumentService.save_document_with_dataset_id( - dataset=dataset, - knowledge_config=knowledge_config, - account=current_user, - dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None, - created_from="api", - ) - except ProviderTokenNotInitError as ex: - raise ProviderNotInitializeError(ex.description) - document = documents[0] - - documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch} - return documents_and_batch_fields, 200 + ) + @cloud_edition_billing_resource_check("vector_space", "dataset") + @cloud_edition_billing_resource_check("documents", "dataset") + @cloud_edition_billing_rate_limit_check("knowledge", "dataset") + def post(self, tenant_id: str, dataset_id: UUID): + """Create document by text through the deprecated underscore alias.""" + return _create_document_by_text(tenant_id=tenant_id, dataset_id=dataset_id) -@service_api_ns.route( - "/datasets//documents//update_by_text", - "/datasets//documents//update-by-text", -) +@service_api_ns.route("/datasets//documents//update-by-text") class DocumentUpdateByTextApi(DatasetApiResource): - """Resource for update documents.""" + """Resource for the canonical text document update route.""" @service_api_ns.expect(service_api_ns.models[DocumentTextUpdate.__name__]) @service_api_ns.doc("update_document_by_text") @@ -229,62 +324,35 @@ class DocumentUpdateByTextApi(DatasetApiResource): @cloud_edition_billing_rate_limit_check("knowledge", "dataset") def post(self, tenant_id: str, dataset_id: UUID, document_id: UUID): """Update document by text.""" - payload = DocumentTextUpdate.model_validate(service_api_ns.payload or {}) - dataset = db.session.scalar( - select(Dataset).where(Dataset.tenant_id == tenant_id, Dataset.id == str(dataset_id)).limit(1) + return _update_document_by_text(tenant_id=tenant_id, dataset_id=dataset_id, document_id=document_id) + + +@service_api_ns.route("/datasets//documents//update_by_text") +class DeprecatedDocumentUpdateByTextApi(DatasetApiResource): + """Deprecated resource alias for text document updates.""" + + @service_api_ns.expect(service_api_ns.models[DocumentTextUpdate.__name__]) + @service_api_ns.doc("update_document_by_text_deprecated") + @service_api_ns.doc(deprecated=True) + @service_api_ns.doc( + description=( + "Deprecated legacy alias for updating an existing document by providing text content. " + "Use /datasets/{dataset_id}/documents/{document_id}/update-by-text instead." ) - args = payload.model_dump(exclude_none=True) - if not dataset: - raise ValueError("Dataset does not exist.") - - retrieval_model = payload.retrieval_model - if ( - retrieval_model - and retrieval_model.reranking_model - and retrieval_model.reranking_model.reranking_provider_name - and retrieval_model.reranking_model.reranking_model_name - ): - DatasetService.check_reranking_model_setting( - tenant_id, - retrieval_model.reranking_model.reranking_provider_name, - retrieval_model.reranking_model.reranking_model_name, - ) - - # indexing_technique is already set in dataset since this is an update - args["indexing_technique"] = dataset.indexing_technique - - if args.get("text"): - text = args.get("text") - name = args.get("name") - if not current_user: - raise ValueError("current_user is required") - upload_file = FileService(db.engine).upload_text( - text=str(text), text_name=str(name), user_id=current_user.id, tenant_id=tenant_id - ) - data_source = { - "type": "upload_file", - "info_list": {"data_source_type": "upload_file", "file_info_list": {"file_ids": [upload_file.id]}}, - } - args["data_source"] = data_source - # validate args - args["original_document_id"] = str(document_id) - knowledge_config = KnowledgeConfig.model_validate(args) - DocumentService.document_create_args_validate(knowledge_config) - - try: - documents, batch = DocumentService.save_document_with_dataset_id( - dataset=dataset, - knowledge_config=knowledge_config, - account=current_user, - dataset_process_rule=dataset.latest_process_rule if "process_rule" not in args else None, - created_from="api", - ) - except ProviderTokenNotInitError as ex: - raise ProviderNotInitializeError(ex.description) - document = documents[0] - - documents_and_batch_fields = {"document": marshal(document, document_fields), "batch": batch} - return documents_and_batch_fields, 200 + ) + @service_api_ns.doc(params={"dataset_id": "Dataset ID", "document_id": "Document ID"}) + @service_api_ns.doc( + responses={ + 200: "Document updated successfully", + 401: "Unauthorized - invalid API token", + 404: "Document not found", + } + ) + @cloud_edition_billing_resource_check("vector_space", "dataset") + @cloud_edition_billing_rate_limit_check("knowledge", "dataset") + def post(self, tenant_id: str, dataset_id: UUID, document_id: UUID): + """Update document by text through the deprecated underscore alias.""" + return _update_document_by_text(tenant_id=tenant_id, dataset_id=dataset_id, document_id=document_id) @service_api_ns.route( diff --git a/api/controllers/web/human_input_form.py b/api/controllers/web/human_input_form.py index 44876f8303..1ddf2e0717 100644 --- a/api/controllers/web/human_input_form.py +++ b/api/controllers/web/human_input_form.py @@ -9,11 +9,11 @@ from typing import Any, NotRequired, TypedDict from flask import Response, request from flask_restx import Resource -from pydantic import BaseModel from sqlalchemy import select from werkzeug.exceptions import Forbidden from configs import dify_config +from controllers.common.human_input import HumanInputFormSubmitPayload from controllers.web import web_ns from controllers.web.error import NotFoundError, WebFormRateLimitExceededError from controllers.web.site import serialize_app_site_payload @@ -26,11 +26,6 @@ from services.human_input_service import Form, FormNotFoundError, HumanInputServ logger = logging.getLogger(__name__) -class HumanInputFormSubmitPayload(BaseModel): - inputs: dict - action: str - - _FORM_SUBMIT_RATE_LIMITER = RateLimiter( prefix="web_form_submit_rate_limit", max_attempts=dify_config.WEB_FORM_SUBMIT_RATE_LIMIT_MAX_ATTEMPTS, diff --git a/api/core/app/app_config/features/suggested_questions_after_answer/manager.py b/api/core/app/app_config/features/suggested_questions_after_answer/manager.py index 2dddce349c..0c36992c77 100644 --- a/api/core/app/app_config/features/suggested_questions_after_answer/manager.py +++ b/api/core/app/app_config/features/suggested_questions_after_answer/manager.py @@ -1,5 +1,7 @@ from typing import Any +CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH = 1000 + class SuggestedQuestionsAfterAnswerConfigManager: @classmethod @@ -20,7 +22,11 @@ class SuggestedQuestionsAfterAnswerConfigManager: @classmethod def validate_and_set_defaults(cls, config: dict[str, Any]) -> tuple[dict[str, Any], list[str]]: """ - Validate and set defaults for suggested questions feature + Validate and set defaults for suggested questions feature. + + Optional fields: + - prompt: custom instruction prompt. + - model: provider/model configuration for suggested question generation. :param config: app model config args """ @@ -39,4 +45,27 @@ class SuggestedQuestionsAfterAnswerConfigManager: if not isinstance(config["suggested_questions_after_answer"]["enabled"], bool): raise ValueError("enabled in suggested_questions_after_answer must be of boolean type") + prompt = config["suggested_questions_after_answer"].get("prompt") + if prompt is not None and not isinstance(prompt, str): + raise ValueError("prompt in suggested_questions_after_answer must be of string type") + if isinstance(prompt, str) and len(prompt) > CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH: + raise ValueError( + f"prompt in suggested_questions_after_answer must be less than or equal to " + f"{CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH} characters" + ) + + if "model" in config["suggested_questions_after_answer"]: + model_config = config["suggested_questions_after_answer"]["model"] + if not isinstance(model_config, dict): + raise ValueError("model in suggested_questions_after_answer must be of object type") + + if "provider" not in model_config or not isinstance(model_config["provider"], str): + raise ValueError("provider in suggested_questions_after_answer.model must be of string type") + + if "name" not in model_config or not isinstance(model_config["name"], str): + raise ValueError("name in suggested_questions_after_answer.model must be of string type") + + if "completion_params" in model_config and not isinstance(model_config["completion_params"], dict): + raise ValueError("completion_params in suggested_questions_after_answer.model must be of object type") + return config, ["suggested_questions_after_answer"] diff --git a/api/core/app/apps/advanced_chat/app_generator.py b/api/core/app/apps/advanced_chat/app_generator.py index 9e64b471cb..b79d5514b4 100644 --- a/api/core/app/apps/advanced_chat/app_generator.py +++ b/api/core/app/apps/advanced_chat/app_generator.py @@ -34,7 +34,11 @@ from core.app.apps.exc import GenerateTaskStoppedError from core.app.apps.message_based_app_generator import MessageBasedAppGenerator from core.app.apps.message_based_app_queue_manager import MessageBasedAppQueueManager from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom -from core.app.entities.task_entities import ChatbotAppBlockingResponse, ChatbotAppStreamResponse +from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, + ChatbotAppBlockingResponse, + ChatbotAppStreamResponse, +) from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig, PauseStatePersistenceLayer from core.helper.trace_id_helper import extract_external_trace_id_from_args from core.ops.ops_trace_manager import TraceQueueManager @@ -655,7 +659,11 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator): user: Account | EndUser, draft_var_saver_factory: DraftVariableSaverFactory, stream: bool = False, - ) -> ChatbotAppBlockingResponse | Generator[ChatbotAppStreamResponse, None, None]: + ) -> ( + ChatbotAppBlockingResponse + | AdvancedChatPausedBlockingResponse + | Generator[ChatbotAppStreamResponse, None, None] + ): """ Handle response. :param application_generate_entity: application generate entity diff --git a/api/core/app/apps/advanced_chat/generate_response_converter.py b/api/core/app/apps/advanced_chat/generate_response_converter.py index fe2702ed69..7cb0c9a8d3 100644 --- a/api/core/app/apps/advanced_chat/generate_response_converter.py +++ b/api/core/app/apps/advanced_chat/generate_response_converter.py @@ -3,7 +3,7 @@ from typing import Any, cast from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( - AppBlockingResponse, + AdvancedChatPausedBlockingResponse, AppStreamResponse, ChatbotAppBlockingResponse, ChatbotAppStreamResponse, @@ -12,22 +12,40 @@ from core.app.entities.task_entities import ( NodeFinishStreamResponse, NodeStartStreamResponse, PingStreamResponse, + StreamEvent, ) -class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = ChatbotAppBlockingResponse - +class AdvancedChatAppGenerateResponseConverter( + AppGenerateResponseConverter[ChatbotAppBlockingResponse | AdvancedChatPausedBlockingResponse] +): @classmethod - def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_full_response( + cls, blocking_response: ChatbotAppBlockingResponse | AdvancedChatPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking full response. :param blocking_response: blocking response :return: """ - blocking_response = cast(ChatbotAppBlockingResponse, blocking_response) + if isinstance(blocking_response, AdvancedChatPausedBlockingResponse): + paused_data = blocking_response.data.model_dump(mode="json") + return { + "event": StreamEvent.WORKFLOW_PAUSED.value, + "task_id": blocking_response.task_id, + "id": blocking_response.data.id, + "message_id": blocking_response.data.message_id, + "conversation_id": blocking_response.data.conversation_id, + "mode": blocking_response.data.mode, + "answer": blocking_response.data.answer, + "metadata": blocking_response.data.metadata, + "created_at": blocking_response.data.created_at, + "workflow_run_id": blocking_response.data.workflow_run_id, + "data": paused_data, + } + response = { - "event": "message", + "event": StreamEvent.MESSAGE.value, "task_id": blocking_response.task_id, "id": blocking_response.data.id, "message_id": blocking_response.data.message_id, @@ -41,7 +59,9 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_simple_response( + cls, blocking_response: ChatbotAppBlockingResponse | AdvancedChatPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking simple response. :param blocking_response: blocking response @@ -50,7 +70,8 @@ class AdvancedChatAppGenerateResponseConverter(AppGenerateResponseConverter): response = cls.convert_blocking_full_response(blocking_response) metadata = response.get("metadata", {}) - response["metadata"] = cls._get_simple_metadata(metadata) + if isinstance(metadata, dict): + response["metadata"] = cls._get_simple_metadata(metadata) return response diff --git a/api/core/app/apps/advanced_chat/generate_task_pipeline.py b/api/core/app/apps/advanced_chat/generate_task_pipeline.py index 78b582bdf5..82dbf5381d 100644 --- a/api/core/app/apps/advanced_chat/generate_task_pipeline.py +++ b/api/core/app/apps/advanced_chat/generate_task_pipeline.py @@ -53,14 +53,18 @@ from core.app.entities.queue_entities import ( WorkflowQueueMessage, ) from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, ChatbotAppBlockingResponse, ChatbotAppStreamResponse, ErrorStreamResponse, + HumanInputRequiredPauseReasonPayload, + HumanInputRequiredResponse, MessageAudioEndStreamResponse, MessageAudioStreamResponse, MessageEndStreamResponse, PingStreamResponse, StreamResponse, + WorkflowPauseStreamResponse, WorkflowTaskState, ) from core.app.task_pipeline.based_generate_task_pipeline import BasedGenerateTaskPipeline @@ -210,7 +214,13 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): if message.status == MessageStatus.PAUSED and message.answer: self._task_state.answer = message.answer - def process(self) -> Union[ChatbotAppBlockingResponse, Generator[ChatbotAppStreamResponse, None, None]]: + def process( + self, + ) -> Union[ + ChatbotAppBlockingResponse, + AdvancedChatPausedBlockingResponse, + Generator[ChatbotAppStreamResponse, None, None], + ]: """ Process generate task pipeline. :return: @@ -226,14 +236,39 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): else: return self._to_blocking_response(generator) - def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) -> ChatbotAppBlockingResponse: + def _to_blocking_response( + self, generator: Generator[StreamResponse, None, None] + ) -> Union[ChatbotAppBlockingResponse, AdvancedChatPausedBlockingResponse]: """ Process blocking response. :return: """ + human_input_responses: list[HumanInputRequiredResponse] = [] for stream_response in generator: if isinstance(stream_response, ErrorStreamResponse): raise stream_response.err + elif isinstance(stream_response, HumanInputRequiredResponse): + human_input_responses.append(stream_response) + elif isinstance(stream_response, WorkflowPauseStreamResponse): + return AdvancedChatPausedBlockingResponse( + task_id=stream_response.task_id, + data=AdvancedChatPausedBlockingResponse.Data( + id=self._message_id, + mode=self._conversation_mode, + conversation_id=self._conversation_id, + message_id=self._message_id, + workflow_run_id=stream_response.data.workflow_run_id, + answer=self._task_state.answer, + metadata=self._message_end_to_stream_response().metadata, + created_at=self._message_created_at, + paused_nodes=stream_response.data.paused_nodes, + reasons=stream_response.data.reasons, + status=stream_response.data.status, + elapsed_time=stream_response.data.elapsed_time, + total_tokens=stream_response.data.total_tokens, + total_steps=stream_response.data.total_steps, + ), + ) elif isinstance(stream_response, MessageEndStreamResponse): extras = {} if stream_response.metadata: @@ -254,8 +289,41 @@ class AdvancedChatAppGenerateTaskPipeline(GraphRuntimeStateSupport): else: continue + if human_input_responses: + return self._build_paused_blocking_response_from_human_input(human_input_responses) + raise ValueError("queue listening stopped unexpectedly.") + def _build_paused_blocking_response_from_human_input( + self, human_input_responses: list[HumanInputRequiredResponse] + ) -> AdvancedChatPausedBlockingResponse: + runtime_state = self._resolve_graph_runtime_state() + paused_nodes = list(dict.fromkeys(response.data.node_id for response in human_input_responses)) + reasons = [ + HumanInputRequiredPauseReasonPayload.from_response_data(response.data).model_dump(mode="json") + for response in human_input_responses + ] + + return AdvancedChatPausedBlockingResponse( + task_id=self._application_generate_entity.task_id, + data=AdvancedChatPausedBlockingResponse.Data( + id=self._message_id, + mode=self._conversation_mode, + conversation_id=self._conversation_id, + message_id=self._message_id, + workflow_run_id=human_input_responses[-1].workflow_run_id, + answer=self._task_state.answer, + metadata=self._message_end_to_stream_response().metadata, + created_at=self._message_created_at, + paused_nodes=paused_nodes, + reasons=reasons, + status=WorkflowExecutionStatus.PAUSED, + elapsed_time=time.perf_counter() - self._base_task_pipeline.start_at, + total_tokens=runtime_state.total_tokens, + total_steps=runtime_state.node_run_steps, + ), + ) + def _to_stream_response( self, generator: Generator[StreamResponse, None, None] ) -> Generator[ChatbotAppStreamResponse, Any, None]: diff --git a/api/core/app/apps/agent_chat/generate_response_converter.py b/api/core/app/apps/agent_chat/generate_response_converter.py index 731c6ee12e..03bc0a9108 100644 --- a/api/core/app/apps/agent_chat/generate_response_converter.py +++ b/api/core/app/apps/agent_chat/generate_response_converter.py @@ -1,6 +1,8 @@ from collections.abc import Generator from typing import Any, cast +from pydantic import JsonValue + from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( AppStreamResponse, @@ -12,11 +14,9 @@ from core.app.entities.task_entities import ( ) -class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = ChatbotAppBlockingResponse - +class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter[ChatbotAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking full response. :param blocking_response: blocking response @@ -37,7 +37,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking simple response. :param blocking_response: blocking response @@ -70,7 +70,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, @@ -101,7 +101,7 @@ class AgentChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, diff --git a/api/core/app/apps/base_app_generate_response_converter.py b/api/core/app/apps/base_app_generate_response_converter.py index d5edfaeb25..abcbb2f943 100644 --- a/api/core/app/apps/base_app_generate_response_converter.py +++ b/api/core/app/apps/base_app_generate_response_converter.py @@ -1,7 +1,9 @@ import logging from abc import ABC, abstractmethod from collections.abc import Generator, Mapping -from typing import Any, Union +from typing import Any, Union, cast + +from pydantic import JsonValue from core.app.entities.app_invoke_entities import InvokeFrom from core.app.entities.task_entities import AppBlockingResponse, AppStreamResponse @@ -11,8 +13,10 @@ from graphon.model_runtime.errors.invoke import InvokeError logger = logging.getLogger(__name__) -class AppGenerateResponseConverter(ABC): - _blocking_response_type: type[AppBlockingResponse] +class AppGenerateResponseConverter[TBlockingResponse: AppBlockingResponse](ABC): + @classmethod + def _cast_blocking_response(cls, response: AppBlockingResponse) -> TBlockingResponse: + return cast(TBlockingResponse, response) @classmethod def convert( @@ -20,7 +24,7 @@ class AppGenerateResponseConverter(ABC): ) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]: if invoke_from in {InvokeFrom.DEBUGGER, InvokeFrom.SERVICE_API}: if isinstance(response, AppBlockingResponse): - return cls.convert_blocking_full_response(response) + return cls.convert_blocking_full_response(cls._cast_blocking_response(response)) else: def _generate_full_response() -> Generator[dict[str, Any] | str, Any, None]: @@ -29,7 +33,7 @@ class AppGenerateResponseConverter(ABC): return _generate_full_response() else: if isinstance(response, AppBlockingResponse): - return cls.convert_blocking_simple_response(response) + return cls.convert_blocking_simple_response(cls._cast_blocking_response(response)) else: def _generate_simple_response() -> Generator[dict[str, Any] | str, Any, None]: @@ -39,12 +43,12 @@ class AppGenerateResponseConverter(ABC): @classmethod @abstractmethod - def convert_blocking_full_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_full_response(cls, blocking_response: TBlockingResponse) -> dict[str, Any]: raise NotImplementedError @classmethod @abstractmethod - def convert_blocking_simple_response(cls, blocking_response: AppBlockingResponse) -> dict[str, Any]: + def convert_blocking_simple_response(cls, blocking_response: TBlockingResponse) -> dict[str, Any]: raise NotImplementedError @classmethod @@ -106,13 +110,13 @@ class AppGenerateResponseConverter(ABC): return metadata @classmethod - def _error_to_stream_response(cls, e: Exception) -> dict[str, Any]: + def _error_to_stream_response(cls, e: Exception) -> dict[str, JsonValue]: """ Error to stream response. :param e: exception :return: """ - error_responses: dict[type[Exception], dict[str, Any]] = { + error_responses: dict[type[Exception], dict[str, JsonValue]] = { ValueError: {"code": "invalid_param", "status": 400}, ProviderTokenNotInitError: {"code": "provider_not_initialize", "status": 400}, QuotaExceededError: { @@ -126,7 +130,7 @@ class AppGenerateResponseConverter(ABC): } # Determine the response based on the type of exception - data: dict[str, Any] | None = None + data: dict[str, JsonValue] | None = None for k, v in error_responses.items(): if isinstance(e, k): data = v diff --git a/api/core/app/apps/chat/generate_response_converter.py b/api/core/app/apps/chat/generate_response_converter.py index 3d0375151d..26efcbfafd 100644 --- a/api/core/app/apps/chat/generate_response_converter.py +++ b/api/core/app/apps/chat/generate_response_converter.py @@ -1,6 +1,8 @@ from collections.abc import Generator from typing import Any, cast +from pydantic import JsonValue + from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( AppStreamResponse, @@ -12,11 +14,9 @@ from core.app.entities.task_entities import ( ) -class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = ChatbotAppBlockingResponse - +class ChatAppGenerateResponseConverter(AppGenerateResponseConverter[ChatbotAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking full response. :param blocking_response: blocking response @@ -37,7 +37,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: ChatbotAppBlockingResponse): """ Convert blocking simple response. :param blocking_response: blocking response @@ -70,7 +70,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, @@ -101,7 +101,7 @@ class ChatAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "conversation_id": chunk.conversation_id, "message_id": chunk.message_id, diff --git a/api/core/app/apps/common/workflow_response_converter.py b/api/core/app/apps/common/workflow_response_converter.py index bd685d5189..7bab3f7bff 100644 --- a/api/core/app/apps/common/workflow_response_converter.py +++ b/api/core/app/apps/common/workflow_response_converter.py @@ -52,6 +52,7 @@ from core.tools.tool_manager import ToolManager from core.trigger.constants import TRIGGER_PLUGIN_NODE_TYPE from core.trigger.trigger_manager import TriggerManager from core.workflow.human_input_forms import load_form_tokens_by_form_id +from core.workflow.human_input_policy import HumanInputSurface, enrich_human_input_pause_reasons from core.workflow.system_variables import SystemVariableKey, system_variables_to_mapping from core.workflow.workflow_entry import WorkflowEntry from extensions.ext_database import db @@ -336,7 +337,26 @@ class WorkflowResponseConverter: except (TypeError, json.JSONDecodeError): definition_payload = {} display_in_ui_by_form_id[str(form_id)] = bool(definition_payload.get("display_in_ui")) - form_token_by_form_id = load_form_tokens_by_form_id(human_input_form_ids, session=session) + form_token_by_form_id = load_form_tokens_by_form_id( + human_input_form_ids, + session=session, + surface=( + HumanInputSurface.SERVICE_API + if self._application_generate_entity.invoke_from == InvokeFrom.SERVICE_API + else None + ), + ) + + # Reconnect paths must preserve the same pause-reason contract as live streams; + # otherwise clients see schema drift after resume. + pause_reasons = enrich_human_input_pause_reasons( + pause_reasons, + form_tokens_by_form_id=form_token_by_form_id, + expiration_times_by_form_id={ + form_id: int(expiration_time.timestamp()) + for form_id, expiration_time in expiration_times_by_form_id.items() + }, + ) responses: list[StreamResponse] = [] diff --git a/api/core/app/apps/completion/generate_response_converter.py b/api/core/app/apps/completion/generate_response_converter.py index 71886b39ba..ad978f58e0 100644 --- a/api/core/app/apps/completion/generate_response_converter.py +++ b/api/core/app/apps/completion/generate_response_converter.py @@ -1,6 +1,8 @@ from collections.abc import Generator from typing import Any, cast +from pydantic import JsonValue + from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter from core.app.entities.task_entities import ( AppStreamResponse, @@ -12,17 +14,15 @@ from core.app.entities.task_entities import ( ) -class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = CompletionAppBlockingResponse - +class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter[CompletionAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: CompletionAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: CompletionAppBlockingResponse): """ Convert blocking full response. :param blocking_response: blocking response :return: """ - response = { + response: dict[str, Any] = { "event": "message", "task_id": blocking_response.task_id, "id": blocking_response.data.id, @@ -36,7 +36,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): return response @classmethod - def convert_blocking_simple_response(cls, blocking_response: CompletionAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: CompletionAppBlockingResponse): """ Convert blocking simple response. :param blocking_response: blocking response @@ -69,7 +69,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "message_id": chunk.message_id, "created_at": chunk.created_at, @@ -99,7 +99,7 @@ class CompletionAppGenerateResponseConverter(AppGenerateResponseConverter): yield "ping" continue - response_chunk = { + response_chunk: dict[str, JsonValue] = { "event": sub_stream_response.event.value, "message_id": chunk.message_id, "created_at": chunk.created_at, diff --git a/api/core/app/apps/message_generator.py b/api/core/app/apps/message_generator.py index 68631bb230..c04f20c796 100644 --- a/api/core/app/apps/message_generator.py +++ b/api/core/app/apps/message_generator.py @@ -1,6 +1,7 @@ -from collections.abc import Callable, Generator, Mapping +from collections.abc import Callable, Generator, Iterable, Mapping from core.app.apps.streaming_utils import stream_topic_events +from core.app.entities.task_entities import StreamEvent from extensions.ext_redis import get_pubsub_broadcast_channel from libs.broadcast_channel.channel import Topic from models.model import AppMode @@ -26,6 +27,7 @@ class MessageGenerator: idle_timeout=300, ping_interval: float = 10.0, on_subscribe: Callable[[], None] | None = None, + terminal_events: Iterable[str | StreamEvent] | None = None, ) -> Generator[Mapping | str, None, None]: topic = cls.get_response_topic(app_mode, workflow_run_id) return stream_topic_events( @@ -33,4 +35,5 @@ class MessageGenerator: idle_timeout=idle_timeout, ping_interval=ping_interval, on_subscribe=on_subscribe, + terminal_events=terminal_events, ) diff --git a/api/core/app/apps/pipeline/generate_response_converter.py b/api/core/app/apps/pipeline/generate_response_converter.py index 02b3160b7c..3913657ae8 100644 --- a/api/core/app/apps/pipeline/generate_response_converter.py +++ b/api/core/app/apps/pipeline/generate_response_converter.py @@ -13,11 +13,9 @@ from core.app.entities.task_entities import ( ) -class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = WorkflowAppBlockingResponse - +class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter[WorkflowAppBlockingResponse]): @classmethod - def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, Any]: # type: ignore[override] + def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: """ Convert blocking full response. :param blocking_response: blocking response @@ -26,7 +24,7 @@ class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): return dict(blocking_response.model_dump()) @classmethod - def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, Any]: # type: ignore[override] + def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: """ Convert blocking simple response. :param blocking_response: blocking response diff --git a/api/core/app/apps/pipeline/pipeline_generator.py b/api/core/app/apps/pipeline/pipeline_generator.py index 4b2f17189b..4a76d0809e 100644 --- a/api/core/app/apps/pipeline/pipeline_generator.py +++ b/api/core/app/apps/pipeline/pipeline_generator.py @@ -27,7 +27,11 @@ from core.app.apps.workflow.generate_response_converter import WorkflowAppGenera from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline from core.app.entities.app_invoke_entities import InvokeFrom, RagPipelineGenerateEntity from core.app.entities.rag_pipeline_invoke_entities import RagPipelineInvokeEntity -from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse +from core.app.entities.task_entities import ( + WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, + WorkflowAppStreamResponse, +) from core.datasource.entities.datasource_entities import ( DatasourceProviderType, OnlineDriveBrowseFilesRequest, @@ -627,7 +631,11 @@ class PipelineGenerator(BaseAppGenerator): user: Account | EndUser, draft_var_saver_factory: DraftVariableSaverFactory, stream: bool = False, - ) -> WorkflowAppBlockingResponse | Generator[WorkflowAppStreamResponse, None, None]: + ) -> ( + WorkflowAppBlockingResponse + | WorkflowAppPausedBlockingResponse + | Generator[WorkflowAppStreamResponse, None, None] + ): """ Handle response. :param application_generate_entity: application generate entity diff --git a/api/core/app/apps/streaming_utils.py b/api/core/app/apps/streaming_utils.py index af3441aca3..5743bad4b6 100644 --- a/api/core/app/apps/streaming_utils.py +++ b/api/core/app/apps/streaming_utils.py @@ -59,7 +59,7 @@ def stream_topic_events( def _normalize_terminal_events(terminal_events: Iterable[str | StreamEvent] | None) -> set[str]: - if not terminal_events: + if terminal_events is None: return {StreamEvent.WORKFLOW_FINISHED.value, StreamEvent.WORKFLOW_PAUSED.value} values: set[str] = set() for item in terminal_events: diff --git a/api/core/app/apps/workflow/app_generator.py b/api/core/app/apps/workflow/app_generator.py index 6937014a06..e811c2b2e0 100644 --- a/api/core/app/apps/workflow/app_generator.py +++ b/api/core/app/apps/workflow/app_generator.py @@ -25,7 +25,11 @@ from core.app.apps.workflow.app_runner import WorkflowAppRunner from core.app.apps.workflow.generate_response_converter import WorkflowAppGenerateResponseConverter from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity -from core.app.entities.task_entities import WorkflowAppBlockingResponse, WorkflowAppStreamResponse +from core.app.entities.task_entities import ( + WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, + WorkflowAppStreamResponse, +) from core.app.layers.pause_state_persist_layer import PauseStateLayerConfig, PauseStatePersistenceLayer from core.db.session_factory import session_factory from core.helper.trace_id_helper import extract_external_trace_id_from_args @@ -612,7 +616,11 @@ class WorkflowAppGenerator(BaseAppGenerator): user: Account | EndUser, draft_var_saver_factory: DraftVariableSaverFactory, stream: bool = False, - ) -> WorkflowAppBlockingResponse | Generator[WorkflowAppStreamResponse, None, None]: + ) -> ( + WorkflowAppBlockingResponse + | WorkflowAppPausedBlockingResponse + | Generator[WorkflowAppStreamResponse, None, None] + ): """ Handle response. :param application_generate_entity: application generate entity diff --git a/api/core/app/apps/workflow/generate_response_converter.py b/api/core/app/apps/workflow/generate_response_converter.py index c69826cbef..4037388798 100644 --- a/api/core/app/apps/workflow/generate_response_converter.py +++ b/api/core/app/apps/workflow/generate_response_converter.py @@ -9,24 +9,29 @@ from core.app.entities.task_entities import ( NodeStartStreamResponse, PingStreamResponse, WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, WorkflowAppStreamResponse, ) -class WorkflowAppGenerateResponseConverter(AppGenerateResponseConverter): - _blocking_response_type = WorkflowAppBlockingResponse - +class WorkflowAppGenerateResponseConverter( + AppGenerateResponseConverter[WorkflowAppBlockingResponse | WorkflowAppPausedBlockingResponse] +): @classmethod - def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse): # type: ignore[override] + def convert_blocking_full_response( + cls, blocking_response: WorkflowAppBlockingResponse | WorkflowAppPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking full response. :param blocking_response: blocking response :return: """ - return blocking_response.model_dump() + return dict(blocking_response.model_dump()) @classmethod - def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse): # type: ignore[override] + def convert_blocking_simple_response( + cls, blocking_response: WorkflowAppBlockingResponse | WorkflowAppPausedBlockingResponse + ) -> dict[str, Any]: """ Convert blocking simple response. :param blocking_response: blocking response diff --git a/api/core/app/apps/workflow/generate_task_pipeline.py b/api/core/app/apps/workflow/generate_task_pipeline.py index 15645add57..87d9b73078 100644 --- a/api/core/app/apps/workflow/generate_task_pipeline.py +++ b/api/core/app/apps/workflow/generate_task_pipeline.py @@ -42,12 +42,15 @@ from core.app.entities.queue_entities import ( ) from core.app.entities.task_entities import ( ErrorStreamResponse, + HumanInputRequiredPauseReasonPayload, + HumanInputRequiredResponse, MessageAudioEndStreamResponse, MessageAudioStreamResponse, PingStreamResponse, StreamResponse, TextChunkStreamResponse, WorkflowAppBlockingResponse, + WorkflowAppPausedBlockingResponse, WorkflowAppStreamResponse, WorkflowFinishStreamResponse, WorkflowPauseStreamResponse, @@ -118,7 +121,11 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): ) self._graph_runtime_state: GraphRuntimeState | None = self._base_task_pipeline.queue_manager.graph_runtime_state - def process(self) -> Union[WorkflowAppBlockingResponse, Generator[WorkflowAppStreamResponse, None, None]]: + def process( + self, + ) -> Union[ + WorkflowAppBlockingResponse, WorkflowAppPausedBlockingResponse, Generator[WorkflowAppStreamResponse, None, None] + ]: """ Process generate task pipeline. :return: @@ -129,19 +136,24 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): else: return self._to_blocking_response(generator) - def _to_blocking_response(self, generator: Generator[StreamResponse, None, None]) -> WorkflowAppBlockingResponse: + def _to_blocking_response( + self, generator: Generator[StreamResponse, None, None] + ) -> Union[WorkflowAppBlockingResponse, WorkflowAppPausedBlockingResponse]: """ To blocking response. :return: """ + human_input_responses: list[HumanInputRequiredResponse] = [] for stream_response in generator: if isinstance(stream_response, ErrorStreamResponse): raise stream_response.err + elif isinstance(stream_response, HumanInputRequiredResponse): + human_input_responses.append(stream_response) elif isinstance(stream_response, WorkflowPauseStreamResponse): - response = WorkflowAppBlockingResponse( + return WorkflowAppPausedBlockingResponse( task_id=self._application_generate_entity.task_id, workflow_run_id=stream_response.data.workflow_run_id, - data=WorkflowAppBlockingResponse.Data( + data=WorkflowAppPausedBlockingResponse.Data( id=stream_response.data.workflow_run_id, workflow_id=self._workflow.id, status=stream_response.data.status, @@ -152,12 +164,13 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): total_steps=stream_response.data.total_steps, created_at=stream_response.data.created_at, finished_at=None, + paused_nodes=stream_response.data.paused_nodes, + reasons=stream_response.data.reasons, ), ) - return response elif isinstance(stream_response, WorkflowFinishStreamResponse): - response = WorkflowAppBlockingResponse( + return WorkflowAppBlockingResponse( task_id=self._application_generate_entity.task_id, workflow_run_id=stream_response.data.id, data=WorkflowAppBlockingResponse.Data( @@ -174,12 +187,44 @@ class WorkflowAppGenerateTaskPipeline(GraphRuntimeStateSupport): ), ) - return response else: continue + if human_input_responses: + return self._build_paused_blocking_response_from_human_input(human_input_responses) + raise ValueError("queue listening stopped unexpectedly.") + def _build_paused_blocking_response_from_human_input( + self, human_input_responses: list[HumanInputRequiredResponse] + ) -> WorkflowAppPausedBlockingResponse: + runtime_state = self._resolve_graph_runtime_state() + paused_nodes = list(dict.fromkeys(response.data.node_id for response in human_input_responses)) + created_at = int(runtime_state.start_at) + reasons = [ + HumanInputRequiredPauseReasonPayload.from_response_data(response.data).model_dump(mode="json") + for response in human_input_responses + ] + + return WorkflowAppPausedBlockingResponse( + task_id=self._application_generate_entity.task_id, + workflow_run_id=human_input_responses[-1].workflow_run_id, + data=WorkflowAppPausedBlockingResponse.Data( + id=human_input_responses[-1].workflow_run_id, + workflow_id=self._workflow.id, + status=WorkflowExecutionStatus.PAUSED, + outputs={}, + error=None, + elapsed_time=time.perf_counter() - self._base_task_pipeline.start_at, + total_tokens=runtime_state.total_tokens, + total_steps=runtime_state.node_run_steps, + created_at=created_at, + finished_at=None, + paused_nodes=paused_nodes, + reasons=reasons, + ), + ) + def _to_stream_response( self, generator: Generator[StreamResponse, None, None] ) -> Generator[WorkflowAppStreamResponse, None, None]: diff --git a/api/core/app/entities/task_entities.py b/api/core/app/entities/task_entities.py index 6e4ca69cf0..ad05566521 100644 --- a/api/core/app/entities/task_entities.py +++ b/api/core/app/entities/task_entities.py @@ -1,12 +1,13 @@ from collections.abc import Mapping, Sequence from enum import StrEnum -from typing import Any +from typing import Any, Literal -from pydantic import BaseModel, ConfigDict, Field +from pydantic import BaseModel, ConfigDict, Field, JsonValue from core.app.entities.agent_strategy import AgentStrategyInfo from core.rag.entities import RetrievalSourceMetadata from graphon.entities import WorkflowStartReason +from graphon.entities.pause_reason import PauseReasonType from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionMetadataKey, WorkflowNodeExecutionStatus from graphon.model_runtime.entities.llm_entities import LLMResult, LLMUsage from graphon.nodes.human_input.entities import FormInput, UserAction @@ -295,6 +296,40 @@ class HumanInputRequiredResponse(StreamResponse): data: Data +class HumanInputRequiredPauseReasonPayload(BaseModel): + """ + Public pause-reason payload used by blocking responses when only + ``human_input_required`` events are available. + """ + + TYPE: Literal[PauseReasonType.HUMAN_INPUT_REQUIRED] = PauseReasonType.HUMAN_INPUT_REQUIRED + form_id: str + node_id: str + node_title: str + form_content: str + inputs: Sequence[FormInput] = Field(default_factory=list) + actions: Sequence[UserAction] = Field(default_factory=list) + display_in_ui: bool = False + form_token: str | None = None + resolved_default_values: Mapping[str, Any] = Field(default_factory=dict) + expiration_time: int + + @classmethod + def from_response_data(cls, data: HumanInputRequiredResponse.Data) -> "HumanInputRequiredPauseReasonPayload": + return cls( + form_id=data.form_id, + node_id=data.node_id, + node_title=data.node_title, + form_content=data.form_content, + inputs=data.inputs, + actions=data.actions, + display_in_ui=data.display_in_ui, + form_token=data.form_token, + resolved_default_values=data.resolved_default_values, + expiration_time=data.expiration_time, + ) + + class HumanInputFormFilledResponse(StreamResponse): class Data(BaseModel): """ @@ -355,7 +390,7 @@ class NodeStartStreamResponse(StreamResponse): workflow_run_id: str data: Data - def to_ignore_detail_dict(self): + def to_ignore_detail_dict(self) -> dict[str, JsonValue]: return { "event": self.event.value, "task_id": self.task_id, @@ -412,7 +447,7 @@ class NodeFinishStreamResponse(StreamResponse): workflow_run_id: str data: Data - def to_ignore_detail_dict(self): + def to_ignore_detail_dict(self) -> dict[str, JsonValue]: return { "event": self.event.value, "task_id": self.task_id, @@ -774,6 +809,34 @@ class ChatbotAppBlockingResponse(AppBlockingResponse): data: Data +class AdvancedChatPausedBlockingResponse(AppBlockingResponse): + """ + ChatbotAppPausedBlockingResponse entity + """ + + class Data(BaseModel): + """ + Data entity + """ + + id: str + mode: str + conversation_id: str + message_id: str + workflow_run_id: str + answer: str + metadata: Mapping[str, object] = Field(default_factory=dict) + created_at: int + paused_nodes: Sequence[str] = Field(default_factory=list) + reasons: Sequence[Mapping[str, Any]] = Field(default_factory=list[Mapping[str, Any]]) + status: WorkflowExecutionStatus + elapsed_time: float + total_tokens: int + total_steps: int + + data: Data + + class CompletionAppBlockingResponse(AppBlockingResponse): """ CompletionAppBlockingResponse entity @@ -819,6 +882,33 @@ class WorkflowAppBlockingResponse(AppBlockingResponse): data: Data +class WorkflowAppPausedBlockingResponse(AppBlockingResponse): + """ + WorkflowAppPausedBlockingResponse entity + """ + + class Data(BaseModel): + """ + Data entity + """ + + id: str + workflow_id: str + status: WorkflowExecutionStatus + outputs: Mapping[str, Any] | None = None + error: str | None = None + elapsed_time: float + total_tokens: int + total_steps: int + created_at: int + finished_at: int | None + paused_nodes: Sequence[str] = Field(default_factory=list) + reasons: Sequence[Mapping[str, Any]] = Field(default_factory=list) + + workflow_run_id: str + data: Data + + class AgentLogStreamResponse(StreamResponse): """ AgentLogStreamResponse entity diff --git a/api/core/app/llm/model_access.py b/api/core/app/llm/model_access.py index c49c4eb0ac..5631caa1a5 100644 --- a/api/core/app/llm/model_access.py +++ b/api/core/app/llm/model_access.py @@ -1,5 +1,6 @@ from __future__ import annotations +from copy import deepcopy from typing import Any from core.app.entities.app_invoke_entities import DifyRunContext, ModelConfigWithCredentialsEntity @@ -14,8 +15,21 @@ from graphon.nodes.llm.protocols import CredentialsProvider class DifyCredentialsProvider: + """Resolves and returns LLM credentials for a given provider and model. + + Fetched credentials are stored in :attr:`credentials_cache` and reused for + subsequent ``fetch`` calls for the same ``(provider_name, model_name)``. + Because of that cache, a single instance can return stale credentials after + the tenant or provider configuration changes (e.g. API key rotation). + + Do **not** keep one instance for the lifetime of a process or across + unrelated invocations. Create a new provider per request, workflow run, or + other bounded scope where up-to-date credentials matter. + """ + tenant_id: str provider_manager: ProviderManager + credentials_cache: dict[tuple[str, str], dict[str, Any]] def __init__( self, @@ -30,8 +44,12 @@ class DifyCredentialsProvider: user_id=run_context.user_id, ) self.provider_manager = provider_manager + self.credentials_cache = {} def fetch(self, provider_name: str, model_name: str) -> dict[str, Any]: + if (provider_name, model_name) in self.credentials_cache: + return deepcopy(self.credentials_cache[(provider_name, model_name)]) + provider_configurations = self.provider_manager.get_configurations(self.tenant_id) provider_configuration = provider_configurations.get(provider_name) if not provider_configuration: @@ -46,6 +64,7 @@ class DifyCredentialsProvider: if credentials is None: raise ProviderTokenNotInitError(f"Model {model_name} credentials is not initialized.") + self.credentials_cache[(provider_name, model_name)] = deepcopy(credentials) return credentials @@ -65,7 +84,8 @@ class DifyModelFactory: provider_manager=create_plugin_provider_manager( tenant_id=run_context.tenant_id, user_id=run_context.user_id, - ) + ), + enable_credentials_cache=True, ) self.model_manager = model_manager @@ -84,7 +104,7 @@ def build_dify_model_access(run_context: DifyRunContext) -> tuple[CredentialsPro tenant_id=run_context.tenant_id, user_id=run_context.user_id, ) - model_manager = ModelManager(provider_manager=provider_manager) + model_manager = ModelManager(provider_manager=provider_manager, enable_credentials_cache=True) return ( DifyCredentialsProvider(run_context=run_context, provider_manager=provider_manager), diff --git a/api/core/helper/creators.py b/api/core/helper/creators.py new file mode 100644 index 0000000000..b01e16f18a --- /dev/null +++ b/api/core/helper/creators.py @@ -0,0 +1,41 @@ +""" +Helper module for Creators Platform integration. + +Provides functionality to upload DSL files to the Creators Platform +and generate redirect URLs with OAuth authorization codes. +""" + +import logging +from urllib.parse import urlencode + +import httpx +from yarl import URL + +from configs import dify_config + +logger = logging.getLogger(__name__) + +creators_platform_api_url = URL(str(dify_config.CREATORS_PLATFORM_API_URL)) + + +def upload_dsl(dsl_file_bytes: bytes, filename: str = "template.yaml") -> str: + url = str(creators_platform_api_url / "api/v1/templates/anonymous-upload") + response = httpx.post(url, files={"file": (filename, dsl_file_bytes)}, timeout=30) + response.raise_for_status() + data = response.json() + claim_code = data.get("data", {}).get("claim_code") + if not claim_code: + raise ValueError("Creators Platform did not return a valid claim_code") + return claim_code + + +def get_redirect_url(user_account_id: str, claim_code: str) -> str: + base_url = str(dify_config.CREATORS_PLATFORM_API_URL).rstrip("/") + params: dict[str, str] = {"dsl_claim_code": claim_code} + client_id = str(dify_config.CREATORS_PLATFORM_OAUTH_CLIENT_ID or "") + if client_id: + from services.oauth_server import OAuthServerService + + oauth_code = OAuthServerService.sign_oauth_authorization_code(client_id, user_account_id) + params["oauth_code"] = oauth_code + return f"{base_url}?{urlencode(params)}" diff --git a/api/core/llm_generator/llm_generator.py b/api/core/llm_generator/llm_generator.py index 348526b0ef..af2611bb0b 100644 --- a/api/core/llm_generator/llm_generator.py +++ b/api/core/llm_generator/llm_generator.py @@ -2,7 +2,7 @@ import json import logging import re from collections.abc import Sequence -from typing import Any, Protocol, TypedDict, cast +from typing import Any, NotRequired, Protocol, TypedDict, cast import json_repair from sqlalchemy import select @@ -18,8 +18,6 @@ from core.llm_generator.prompts import ( LLM_MODIFY_CODE_SYSTEM, LLM_MODIFY_PROMPT_SYSTEM, PYTHON_CODE_GENERATOR_PROMPT_TEMPLATE, - SUGGESTED_QUESTIONS_MAX_TOKENS, - SUGGESTED_QUESTIONS_TEMPERATURE, SYSTEM_STRUCTURED_OUTPUT_GENERATE, WORKFLOW_RULE_CONFIG_PROMPT_GENERATE_TEMPLATE, ) @@ -41,6 +39,36 @@ from models.workflow import Workflow logger = logging.getLogger(__name__) +class SuggestedQuestionsModelConfig(TypedDict): + provider: str + name: str + completion_params: NotRequired[dict[str, object]] + + +def _normalize_completion_params(completion_params: dict[str, object]) -> tuple[dict[str, object], list[str]]: + """ + Normalize raw completion params into invocation parameters and stop sequences. + + This mirrors the app-model access path by separating ``stop`` from provider + parameters before invocation, then drops non-positive token limits because + some plugin-backed models reject ``0`` after mapping ``max_tokens`` to their + provider-specific output-token field. + """ + normalized_parameters = dict(completion_params) + stop_value = normalized_parameters.pop("stop", []) + if isinstance(stop_value, list) and all(isinstance(item, str) for item in stop_value): + stop = stop_value + else: + stop = [] + + for token_limit_key in ("max_tokens", "max_output_tokens"): + token_limit = normalized_parameters.get(token_limit_key) + if isinstance(token_limit, int | float) and token_limit <= 0: + normalized_parameters.pop(token_limit_key, None) + + return normalized_parameters, stop + + class WorkflowServiceInterface(Protocol): def get_draft_workflow(self, app_model: App, workflow_id: str | None = None) -> Workflow | None: pass @@ -123,8 +151,15 @@ class LLMGenerator: return name @classmethod - def generate_suggested_questions_after_answer(cls, tenant_id: str, histories: str) -> Sequence[str]: - output_parser = SuggestedQuestionsAfterAnswerOutputParser() + def generate_suggested_questions_after_answer( + cls, + tenant_id: str, + histories: str, + *, + instruction_prompt: str | None = None, + model_config: object | None = None, + ) -> Sequence[str]: + output_parser = SuggestedQuestionsAfterAnswerOutputParser(instruction_prompt=instruction_prompt) format_instructions = output_parser.get_format_instructions() prompt_template = PromptTemplateParser(template="{{histories}}\n{{format_instructions}}\nquestions:\n") @@ -133,10 +168,36 @@ class LLMGenerator: try: model_manager = ModelManager.for_tenant(tenant_id=tenant_id) - model_instance = model_manager.get_default_model_instance( - tenant_id=tenant_id, - model_type=ModelType.LLM, - ) + configured_model = cast(dict[str, object], model_config) if isinstance(model_config, dict) else {} + provider = configured_model.get("provider") + model_name = configured_model.get("name") + use_configured_model = False + + if isinstance(provider, str) and provider and isinstance(model_name, str) and model_name: + try: + model_instance = model_manager.get_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + provider=provider, + model=model_name, + ) + use_configured_model = True + except Exception: + logger.warning( + "Failed to use configured suggested-questions model %s/%s, fallback to default model", + provider, + model_name, + exc_info=True, + ) + model_instance = model_manager.get_default_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + ) + else: + model_instance = model_manager.get_default_model_instance( + tenant_id=tenant_id, + model_type=ModelType.LLM, + ) except InvokeAuthorizationError: return [] @@ -145,19 +206,29 @@ class LLMGenerator: questions: Sequence[str] = [] try: + configured_completion_params = configured_model.get("completion_params") + if use_configured_model and isinstance(configured_completion_params, dict): + model_parameters, stop = _normalize_completion_params(configured_completion_params) + elif use_configured_model: + model_parameters = {} + stop = [] + else: + # Default-model generation keeps the built-in suggested-questions tuning. + model_parameters = { + "max_tokens": 2560, + "temperature": 0.0, + } + stop = [] + response: LLMResult = model_instance.invoke_llm( prompt_messages=list(prompt_messages), - model_parameters={ - "max_tokens": SUGGESTED_QUESTIONS_MAX_TOKENS, - "temperature": SUGGESTED_QUESTIONS_TEMPERATURE, - }, + model_parameters=model_parameters, + stop=stop, stream=False, ) text_content = response.message.get_text_content() questions = output_parser.parse(text_content) if text_content else [] - except InvokeError: - questions = [] except Exception: logger.exception("Failed to generate suggested questions after answer") questions = [] diff --git a/api/core/llm_generator/output_parser/suggested_questions_after_answer.py b/api/core/llm_generator/output_parser/suggested_questions_after_answer.py index eec771181f..7ac340926d 100644 --- a/api/core/llm_generator/output_parser/suggested_questions_after_answer.py +++ b/api/core/llm_generator/output_parser/suggested_questions_after_answer.py @@ -3,17 +3,28 @@ import logging import re from collections.abc import Sequence -from core.llm_generator.prompts import SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT +from core.llm_generator.prompts import DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT logger = logging.getLogger(__name__) class SuggestedQuestionsAfterAnswerOutputParser: + def __init__(self, instruction_prompt: str | None = None) -> None: + self._instruction_prompt = self._build_instruction_prompt(instruction_prompt) + + @staticmethod + def _build_instruction_prompt(instruction_prompt: str | None) -> str: + if not instruction_prompt or not instruction_prompt.strip(): + return DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT + + return f'{instruction_prompt}\nYou must output a JSON array like ["question1", "question2", "question3"].' + def get_format_instructions(self) -> str: - return SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT + return self._instruction_prompt def parse(self, text: str) -> Sequence[str]: - action_match = re.search(r"\[.*?\]", text.strip(), re.DOTALL) + stripped_text = text.strip() + action_match = re.search(r"\[.*?\]", stripped_text, re.DOTALL) questions: list[str] = [] if action_match is not None: try: @@ -23,4 +34,6 @@ class SuggestedQuestionsAfterAnswerOutputParser: else: if isinstance(json_obj, list): questions = [question for question in json_obj if isinstance(question, str)] + elif stripped_text: + logger.warning("Failed to find suggested questions payload array in text: %r", stripped_text[:200]) return questions diff --git a/api/core/llm_generator/prompts.py b/api/core/llm_generator/prompts.py index ee9a016c95..3c6f8c468a 100644 --- a/api/core/llm_generator/prompts.py +++ b/api/core/llm_generator/prompts.py @@ -1,5 +1,4 @@ # Written by YORKI MINAKO🤡, Edited by Xiaoyi, Edited by yasu-oh -import os CONVERSATION_TITLE_PROMPT = """You are asked to generate a concise chat title by decomposing the user’s input into two parts: “Intention” and “Subject”. @@ -96,8 +95,8 @@ JAVASCRIPT_CODE_GENERATOR_PROMPT_TEMPLATE = ( ) -# Default prompt for suggested questions (can be overridden by environment variable) -_DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_PROMPT = ( +# Default prompt and model parameters for suggested questions. +DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = ( "Please help me predict the three most likely questions that human would ask, " "and keep each question under 20 characters.\n" "MAKE SURE your output is the SAME language as the Assistant's latest response. " @@ -105,15 +104,6 @@ _DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_PROMPT = ( '["question1","question2","question3"]\n' ) -# Environment variable override for suggested questions prompt -SUGGESTED_QUESTIONS_AFTER_ANSWER_INSTRUCTION_PROMPT = os.getenv( - "SUGGESTED_QUESTIONS_PROMPT", _DEFAULT_SUGGESTED_QUESTIONS_AFTER_ANSWER_PROMPT -) - -# Configurable LLM parameters for suggested questions (can be overridden by environment variables) -SUGGESTED_QUESTIONS_MAX_TOKENS = int(os.getenv("SUGGESTED_QUESTIONS_MAX_TOKENS", "256")) -SUGGESTED_QUESTIONS_TEMPERATURE = float(os.getenv("SUGGESTED_QUESTIONS_TEMPERATURE", "0")) - GENERATOR_QA_PROMPT = ( " The user will send a long text. Generate a Question and Answer pairs only using the knowledge" " in the long text. Please think step by step." diff --git a/api/core/model_manager.py b/api/core/model_manager.py index 86d0e3baaa..457c888e33 100644 --- a/api/core/model_manager.py +++ b/api/core/model_manager.py @@ -1,5 +1,6 @@ import logging from collections.abc import Callable, Generator, Iterable, Mapping, Sequence +from copy import deepcopy from typing import IO, Any, Literal, Optional, ParamSpec, TypeVar, Union, cast, overload from configs import dify_config @@ -36,11 +37,13 @@ class ModelInstance: Model instance class. """ - def __init__(self, provider_model_bundle: ProviderModelBundle, model: str): + def __init__(self, provider_model_bundle: ProviderModelBundle, model: str, credentials: dict | None = None) -> None: self.provider_model_bundle = provider_model_bundle self.model_name = model self.provider = provider_model_bundle.configuration.provider.provider - self.credentials = self._fetch_credentials_from_bundle(provider_model_bundle, model) + if credentials is None: + credentials = self._fetch_credentials_from_bundle(provider_model_bundle, model) + self.credentials = credentials # Runtime LLM invocation fields. self.parameters: Mapping[str, Any] = {} self.stop: Sequence[str] = () @@ -434,8 +437,30 @@ class ModelInstance: class ModelManager: - def __init__(self, provider_manager: ProviderManager): + """Resolves :class:`ModelInstance` objects for a tenant and provider. + + When ``enable_credentials_cache`` is ``True``, resolved credentials for each + ``(tenant_id, provider, model_type, model)`` are stored in + ``_credentials_cache`` and reused. That can return **stale** credentials after + API keys or provider settings change, so a manager constructed with + ``enable_credentials_cache=True`` should not be kept for the lifetime of a + process or shared across unrelated work. Prefer a new manager per request, + workflow run, or similar bounded scope. + + The default is ``enable_credentials_cache=False``; in that mode the internal + credential cache is not populated, and each ``get_model_instance`` call + loads credentials from the current provider configuration. + """ + + def __init__( + self, + provider_manager: ProviderManager, + *, + enable_credentials_cache: bool = False, + ) -> None: self._provider_manager = provider_manager + self._credentials_cache: dict[tuple[str, str, str, str], Any] = {} + self._enable_credentials_cache = enable_credentials_cache @classmethod def for_tenant(cls, tenant_id: str, user_id: str | None = None) -> "ModelManager": @@ -463,8 +488,19 @@ class ModelManager: tenant_id=tenant_id, provider=provider, model_type=model_type ) - model_instance = ModelInstance(provider_model_bundle, model) - return model_instance + cred_cache_key = (tenant_id, provider, model_type.value, model) + + if cred_cache_key in self._credentials_cache: + return ModelInstance( + provider_model_bundle, + model, + deepcopy(self._credentials_cache[cred_cache_key]), + ) + + ret = ModelInstance(provider_model_bundle, model) + if self._enable_credentials_cache: + self._credentials_cache[cred_cache_key] = deepcopy(ret.credentials) + return ret def get_default_provider_model_name(self, tenant_id: str, model_type: ModelType) -> tuple[str | None, str | None]: """ diff --git a/api/core/rag/datasource/keyword/jieba/jieba.py b/api/core/rag/datasource/keyword/jieba/jieba.py index 242da520c1..392af351b6 100644 --- a/api/core/rag/datasource/keyword/jieba/jieba.py +++ b/api/core/rag/datasource/keyword/jieba/jieba.py @@ -156,7 +156,8 @@ class Jieba(BaseKeyword): if dataset_keyword_table: keyword_table_dict = dataset_keyword_table.keyword_table_dict if keyword_table_dict: - return dict(keyword_table_dict["__data__"]["table"]) + data: Any = keyword_table_dict["__data__"] + return dict(data["table"]) else: keyword_data_source_type = dify_config.KEYWORD_DATA_SOURCE_TYPE dataset_keyword_table = DatasetKeywordTable( diff --git a/api/core/rag/datasource/keyword/jieba/jieba_keyword_table_handler.py b/api/core/rag/datasource/keyword/jieba/jieba_keyword_table_handler.py index 1ca6303af6..2af8238cc4 100644 --- a/api/core/rag/datasource/keyword/jieba/jieba_keyword_table_handler.py +++ b/api/core/rag/datasource/keyword/jieba/jieba_keyword_table_handler.py @@ -109,7 +109,7 @@ class JiebaKeywordTableHandler: """Extract keywords with JIEBA tfidf.""" keywords = self._tfidf.extract_tags( sentence=text, - topK=max_keywords_per_chunk, + topK=max_keywords_per_chunk or 10, ) # jieba.analyse.extract_tags returns an untyped list when withFlag is False by default. keywords = cast(list[str], keywords) diff --git a/api/core/rag/retrieval/router/multi_dataset_function_call_router.py b/api/core/rag/retrieval/router/multi_dataset_function_call_router.py index 426d1b67dc..dd17545c86 100644 --- a/api/core/rag/retrieval/router/multi_dataset_function_call_router.py +++ b/api/core/rag/retrieval/router/multi_dataset_function_call_router.py @@ -31,7 +31,7 @@ class FunctionCallMultiDatasetRouter: result: LLMResult = model_instance.invoke_llm( # pyright: ignore[reportCallIssue, reportArgumentType] prompt_messages=prompt_messages, tools=dataset_tools, - stream=False, + stream=False, # pyright: ignore[reportArgumentType] model_parameters={"temperature": 0.2, "top_p": 0.3, "max_tokens": 1500}, ) usage = result.usage or LLMUsage.empty_usage() diff --git a/api/core/tools/utils/system_oauth_encryption.py b/api/core/tools/utils/system_encryption.py similarity index 57% rename from api/core/tools/utils/system_oauth_encryption.py rename to api/core/tools/utils/system_encryption.py index 6b7007842d..ca7e6a13fe 100644 --- a/api/core/tools/utils/system_oauth_encryption.py +++ b/api/core/tools/utils/system_encryption.py @@ -14,23 +14,23 @@ from configs import dify_config logger = logging.getLogger(__name__) -class OAuthEncryptionError(Exception): - """OAuth encryption/decryption specific error""" +class EncryptionError(Exception): + """Encryption/decryption specific error""" pass -class SystemOAuthEncrypter: +class SystemEncrypter: """ - A simple OAuth parameters encrypter using AES-CBC encryption. + A simple parameters encrypter using AES-CBC encryption. - This class provides methods to encrypt and decrypt OAuth parameters + This class provides methods to encrypt and decrypt parameters using AES-CBC mode with a key derived from the application's SECRET_KEY. """ def __init__(self, secret_key: str | None = None): """ - Initialize the OAuth encrypter. + Initialize the encrypter. Args: secret_key: Optional secret key. If not provided, uses dify_config.SECRET_KEY @@ -43,19 +43,19 @@ class SystemOAuthEncrypter: # Generate a fixed 256-bit key using SHA-256 self.key = hashlib.sha256(secret_key.encode()).digest() - def encrypt_oauth_params(self, oauth_params: Mapping[str, Any]) -> str: + def encrypt_params(self, params: Mapping[str, Any]) -> str: """ - Encrypt OAuth parameters. + Encrypt parameters. Args: - oauth_params: OAuth parameters dictionary, e.g., {"client_id": "xxx", "client_secret": "xxx"} + params: Parameters dictionary, e.g., {"client_id": "xxx", "client_secret": "xxx"} Returns: Base64-encoded encrypted string Raises: - OAuthEncryptionError: If encryption fails - ValueError: If oauth_params is invalid + EncryptionError: If encryption fails + ValueError: If params is invalid """ try: @@ -66,7 +66,7 @@ class SystemOAuthEncrypter: cipher = AES.new(self.key, AES.MODE_CBC, iv) # Encrypt data - padded_data = pad(TypeAdapter(dict).dump_json(dict(oauth_params)), AES.block_size) + padded_data = pad(TypeAdapter(dict).dump_json(dict(params)), AES.block_size) encrypted_data = cipher.encrypt(padded_data) # Combine IV and encrypted data @@ -76,20 +76,20 @@ class SystemOAuthEncrypter: return base64.b64encode(combined).decode() except Exception as e: - raise OAuthEncryptionError(f"Encryption failed: {str(e)}") from e + raise EncryptionError(f"Encryption failed: {str(e)}") from e - def decrypt_oauth_params(self, encrypted_data: str) -> Mapping[str, Any]: + def decrypt_params(self, encrypted_data: str) -> Mapping[str, Any]: """ - Decrypt OAuth parameters. + Decrypt parameters. Args: encrypted_data: Base64-encoded encrypted string Returns: - Decrypted OAuth parameters dictionary + Decrypted parameters dictionary Raises: - OAuthEncryptionError: If decryption fails + EncryptionError: If decryption fails ValueError: If encrypted_data is invalid """ if not isinstance(encrypted_data, str): @@ -118,70 +118,70 @@ class SystemOAuthEncrypter: unpadded_data = unpad(decrypted_data, AES.block_size) # Parse JSON - oauth_params: Mapping[str, Any] = TypeAdapter(Mapping[str, Any]).validate_json(unpadded_data) + params: Mapping[str, Any] = TypeAdapter(Mapping[str, Any]).validate_json(unpadded_data) - if not isinstance(oauth_params, dict): + if not isinstance(params, dict): raise ValueError("Decrypted data is not a valid dictionary") - return oauth_params + return params except Exception as e: - raise OAuthEncryptionError(f"Decryption failed: {str(e)}") from e + raise EncryptionError(f"Decryption failed: {str(e)}") from e # Factory function for creating encrypter instances -def create_system_oauth_encrypter(secret_key: str | None = None) -> SystemOAuthEncrypter: +def create_system_encrypter(secret_key: str | None = None) -> SystemEncrypter: """ - Create an OAuth encrypter instance. + Create an encrypter instance. Args: secret_key: Optional secret key. If not provided, uses dify_config.SECRET_KEY Returns: - SystemOAuthEncrypter instance + SystemEncrypter instance """ - return SystemOAuthEncrypter(secret_key=secret_key) + return SystemEncrypter(secret_key=secret_key) # Global encrypter instance (for backward compatibility) -_oauth_encrypter: SystemOAuthEncrypter | None = None +_encrypter: SystemEncrypter | None = None -def get_system_oauth_encrypter() -> SystemOAuthEncrypter: +def get_system_encrypter() -> SystemEncrypter: """ - Get the global OAuth encrypter instance. + Get the global encrypter instance. Returns: - SystemOAuthEncrypter instance + SystemEncrypter instance """ - global _oauth_encrypter - if _oauth_encrypter is None: - _oauth_encrypter = SystemOAuthEncrypter() - return _oauth_encrypter + global _encrypter + if _encrypter is None: + _encrypter = SystemEncrypter() + return _encrypter # Convenience functions for backward compatibility -def encrypt_system_oauth_params(oauth_params: Mapping[str, Any]) -> str: +def encrypt_system_params(params: Mapping[str, Any]) -> str: """ - Encrypt OAuth parameters using the global encrypter. + Encrypt parameters using the global encrypter. Args: - oauth_params: OAuth parameters dictionary + params: Parameters dictionary Returns: Base64-encoded encrypted string """ - return get_system_oauth_encrypter().encrypt_oauth_params(oauth_params) + return get_system_encrypter().encrypt_params(params) -def decrypt_system_oauth_params(encrypted_data: str) -> Mapping[str, Any]: +def decrypt_system_params(encrypted_data: str) -> Mapping[str, Any]: """ - Decrypt OAuth parameters using the global encrypter. + Decrypt parameters using the global encrypter. Args: encrypted_data: Base64-encoded encrypted string Returns: - Decrypted OAuth parameters dictionary + Decrypted parameters dictionary """ - return get_system_oauth_encrypter().decrypt_oauth_params(encrypted_data) + return get_system_encrypter().decrypt_params(encrypted_data) diff --git a/api/core/workflow/human_input_forms.py b/api/core/workflow/human_input_forms.py index f124b321d4..b02f69ec33 100644 --- a/api/core/workflow/human_input_forms.py +++ b/api/core/workflow/human_input_forms.py @@ -12,20 +12,16 @@ from collections.abc import Sequence from sqlalchemy import select from sqlalchemy.orm import Session +from core.workflow.human_input_policy import HumanInputSurface, get_preferred_form_token from extensions.ext_database import db from models.human_input import HumanInputFormRecipient, RecipientType -_FORM_TOKEN_PRIORITY = { - RecipientType.BACKSTAGE: 0, - RecipientType.CONSOLE: 1, - RecipientType.STANDALONE_WEB_APP: 2, -} - def load_form_tokens_by_form_id( form_ids: Sequence[str], *, session: Session | None = None, + surface: HumanInputSurface | None = None, ) -> dict[str, str]: """Load the preferred access token for each human input form.""" unique_form_ids = list(dict.fromkeys(form_ids)) @@ -33,23 +29,43 @@ def load_form_tokens_by_form_id( return {} if session is not None: - return _load_form_tokens_by_form_id(session, unique_form_ids) + return _load_form_tokens_by_form_id(session, unique_form_ids, surface=surface) with Session(bind=db.engine, expire_on_commit=False) as new_session: - return _load_form_tokens_by_form_id(new_session, unique_form_ids) + return _load_form_tokens_by_form_id(new_session, unique_form_ids, surface=surface) -def _load_form_tokens_by_form_id(session: Session, form_ids: Sequence[str]) -> dict[str, str]: - tokens_by_form_id: dict[str, tuple[int, str]] = {} +def _load_form_tokens_by_form_id( + session: Session, + form_ids: Sequence[str], + *, + surface: HumanInputSurface | None = None, +) -> dict[str, str]: + recipients_by_form_id: dict[str, list[tuple[RecipientType, str]]] = {} stmt = select(HumanInputFormRecipient).where(HumanInputFormRecipient.form_id.in_(form_ids)) for recipient in session.scalars(stmt): - priority = _FORM_TOKEN_PRIORITY.get(recipient.recipient_type) - if priority is None or not recipient.access_token: + if not recipient.access_token: continue + recipients_by_form_id.setdefault(recipient.form_id, []).append( + (recipient.recipient_type, recipient.access_token) + ) - candidate = (priority, recipient.access_token) - current = tokens_by_form_id.get(recipient.form_id) - if current is None or candidate[0] < current[0]: - tokens_by_form_id[recipient.form_id] = candidate + tokens_by_form_id: dict[str, str] = {} + for form_id, recipients in recipients_by_form_id.items(): + token = _get_surface_form_token(recipients, surface=surface) + if token is not None: + tokens_by_form_id[form_id] = token + return tokens_by_form_id - return {form_id: token for form_id, (_, token) in tokens_by_form_id.items()} + +def _get_surface_form_token( + recipients: Sequence[tuple[RecipientType, str]], + *, + surface: HumanInputSurface | None, +) -> str | None: + if surface == HumanInputSurface.SERVICE_API: + for recipient_type, token in recipients: + if recipient_type == RecipientType.STANDALONE_WEB_APP and token: + return token + + return get_preferred_form_token(recipients) diff --git a/api/core/workflow/human_input_policy.py b/api/core/workflow/human_input_policy.py new file mode 100644 index 0000000000..798eb8723f --- /dev/null +++ b/api/core/workflow/human_input_policy.py @@ -0,0 +1,73 @@ +from __future__ import annotations + +from collections.abc import Mapping, Sequence +from enum import StrEnum +from typing import Any + +from graphon.entities.pause_reason import PauseReasonType +from models.human_input import RecipientType + + +class HumanInputSurface(StrEnum): + SERVICE_API = "service_api" + CONSOLE = "console" + + +# Service API is intentionally narrower than other surfaces: app-token callers +# should only be able to act on end-user web forms, not internal console flows. +_ALLOWED_RECIPIENT_TYPES_BY_SURFACE: dict[HumanInputSurface, frozenset[RecipientType]] = { + HumanInputSurface.SERVICE_API: frozenset({RecipientType.STANDALONE_WEB_APP}), + HumanInputSurface.CONSOLE: frozenset({RecipientType.CONSOLE, RecipientType.BACKSTAGE}), +} + +# A single HITL form can have multiple recipient records; this shared priority +# keeps every API surface consistent about which resume token to expose. +_RECIPIENT_TOKEN_PRIORITY: dict[RecipientType, int] = { + RecipientType.BACKSTAGE: 0, + RecipientType.CONSOLE: 1, + RecipientType.STANDALONE_WEB_APP: 2, +} + + +def is_recipient_type_allowed_for_surface( + recipient_type: RecipientType | None, + surface: HumanInputSurface, +) -> bool: + if recipient_type is None: + return False + return recipient_type in _ALLOWED_RECIPIENT_TYPES_BY_SURFACE[surface] + + +def get_preferred_form_token( + recipients: Sequence[tuple[RecipientType, str]], +) -> str | None: + chosen_token: str | None = None + chosen_priority: int | None = None + for recipient_type, token in recipients: + priority = _RECIPIENT_TOKEN_PRIORITY.get(recipient_type) + if priority is None or not token: + continue + if chosen_priority is None or priority < chosen_priority: + chosen_priority = priority + chosen_token = token + return chosen_token + + +def enrich_human_input_pause_reasons( + reasons: Sequence[Mapping[str, Any]], + *, + form_tokens_by_form_id: Mapping[str, str], + expiration_times_by_form_id: Mapping[str, int], +) -> list[dict[str, Any]]: + enriched: list[dict[str, Any]] = [] + for reason in reasons: + updated = dict(reason) + if updated.get("TYPE") == PauseReasonType.HUMAN_INPUT_REQUIRED: + form_id = updated.get("form_id") + if isinstance(form_id, str): + updated["form_token"] = form_tokens_by_form_id.get(form_id) + expiration_time = expiration_times_by_form_id.get(form_id) + if expiration_time is not None: + updated["expiration_time"] = expiration_time + enriched.append(updated) + return enriched diff --git a/api/dev/generate_swagger_specs.py b/api/dev/generate_swagger_specs.py new file mode 100644 index 0000000000..7e9688bfb4 --- /dev/null +++ b/api/dev/generate_swagger_specs.py @@ -0,0 +1,172 @@ +"""Generate Flask-RESTX Swagger 2.0 specs without booting the full backend. + +This helper intentionally avoids `app_factory.create_app()`. The normal backend +startup eagerly initializes database, Redis, Celery, and storage extensions, +which is unnecessary when the goal is only to serialize the Flask-RESTX +`/swagger.json` documents. +""" + +from __future__ import annotations + +import argparse +import json +import logging +import os +import sys +from dataclasses import dataclass +from pathlib import Path + +from flask import Flask +from flask_restx.swagger import Swagger + +logger = logging.getLogger(__name__) + +API_ROOT = Path(__file__).resolve().parents[1] +if str(API_ROOT) not in sys.path: + sys.path.insert(0, str(API_ROOT)) + + +@dataclass(frozen=True) +class SpecTarget: + route: str + filename: str + + +SPEC_TARGETS: tuple[SpecTarget, ...] = ( + SpecTarget(route="/console/api/swagger.json", filename="console-swagger.json"), + SpecTarget(route="/api/swagger.json", filename="web-swagger.json"), + SpecTarget(route="/v1/swagger.json", filename="service-swagger.json"), +) + +_ORIGINAL_REGISTER_MODEL = Swagger.register_model +_ORIGINAL_REGISTER_FIELD = Swagger.register_field + + +def _apply_runtime_defaults() -> None: + """Force the small config surface required for Swagger generation.""" + + os.environ.setdefault("SECRET_KEY", "spec-export") + os.environ.setdefault("STORAGE_TYPE", "local") + os.environ.setdefault("STORAGE_LOCAL_PATH", "/tmp/dify-storage") + os.environ.setdefault("SWAGGER_UI_ENABLED", "true") + + from configs import dify_config + + dify_config.SECRET_KEY = os.environ["SECRET_KEY"] + dify_config.STORAGE_TYPE = "local" + dify_config.STORAGE_LOCAL_PATH = os.environ["STORAGE_LOCAL_PATH"] + dify_config.SWAGGER_UI_ENABLED = os.environ["SWAGGER_UI_ENABLED"].lower() == "true" + + +def _patch_swagger_for_inline_nested_dicts() -> None: + """Teach Flask-RESTX Swagger generation to tolerate inline nested field maps. + + Some existing controllers use `fields.Nested({...})` with a raw field mapping + instead of a named `api.model(...)`. Flask-RESTX crashes on those anonymous + dicts during schema registration, so this helper upgrades them into temporary + named models at export time. + """ + + if getattr(Swagger, "_dify_inline_nested_dict_patch", False): + return + + def get_or_create_inline_model(self: Swagger, nested_fields: dict[object, object]) -> object: + anonymous_models = getattr(self, "_anonymous_inline_models", None) + if anonymous_models is None: + anonymous_models = {} + self._anonymous_inline_models = anonymous_models + + anonymous_name = anonymous_models.get(id(nested_fields)) + if anonymous_name is None: + anonymous_name = f"_AnonymousInlineModel{len(anonymous_models) + 1}" + anonymous_models[id(nested_fields)] = anonymous_name + self.api.model(anonymous_name, nested_fields) + + return self.api.models[anonymous_name] + + def register_model_with_inline_dict_support(self: Swagger, model: object) -> dict[str, str]: + if isinstance(model, dict): + model = get_or_create_inline_model(self, model) + + return _ORIGINAL_REGISTER_MODEL(self, model) + + def register_field_with_inline_dict_support(self: Swagger, field: object) -> None: + nested = getattr(field, "nested", None) + if isinstance(nested, dict): + field.model = get_or_create_inline_model(self, nested) # type: ignore + + _ORIGINAL_REGISTER_FIELD(self, field) + + Swagger.register_model = register_model_with_inline_dict_support + Swagger.register_field = register_field_with_inline_dict_support + Swagger._dify_inline_nested_dict_patch = True + + +def create_spec_app() -> Flask: + """Build a minimal Flask app that only mounts the Swagger-producing blueprints.""" + + _apply_runtime_defaults() + _patch_swagger_for_inline_nested_dicts() + + app = Flask(__name__) + + from controllers.console import bp as console_bp + from controllers.service_api import bp as service_api_bp + from controllers.web import bp as web_bp + + app.register_blueprint(console_bp) + app.register_blueprint(web_bp) + app.register_blueprint(service_api_bp) + + return app + + +def generate_specs(output_dir: Path) -> list[Path]: + """Write all Swagger specs to `output_dir` and return the written paths.""" + + output_dir.mkdir(parents=True, exist_ok=True) + + app = create_spec_app() + client = app.test_client() + + written_paths: list[Path] = [] + for target in SPEC_TARGETS: + response = client.get(target.route) + if response.status_code != 200: + raise RuntimeError(f"failed to fetch {target.route}: {response.status_code}") + + payload = response.get_json() + if not isinstance(payload, dict): + raise RuntimeError(f"unexpected response payload for {target.route}") + + output_path = output_dir / target.filename + output_path.write_text(json.dumps(payload, indent=2, sort_keys=True) + "\n", encoding="utf-8") + written_paths.append(output_path) + + return written_paths + + +def parse_args() -> argparse.Namespace: + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "-o", + "--output-dir", + type=Path, + default=Path("openapi"), + help="Directory where the Swagger JSON files will be written.", + ) + return parser.parse_args() + + +def main() -> int: + args = parse_args() + written_paths = generate_specs(args.output_dir) + + for path in written_paths: + logger.debug(path) + + return 0 + + +if __name__ == "__main__": + raise SystemExit(main()) diff --git a/api/models/comment.py b/api/models/comment.py index 1154e16788..5d4a08e783 100644 --- a/api/models/comment.py +++ b/api/models/comment.py @@ -8,7 +8,7 @@ from sqlalchemy import Index, func from sqlalchemy.orm import Mapped, mapped_column, relationship from .account import Account -from .base import Base +from .base import Base, gen_uuidv7_string from .engine import db from .types import StringUUID @@ -42,7 +42,7 @@ class WorkflowComment(Base): Index("workflow_comments_created_at_idx", "created_at"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuidv7()")) + id: Mapped[str] = mapped_column(StringUUID, default=gen_uuidv7_string) tenant_id: Mapped[str] = mapped_column(StringUUID, nullable=False) app_id: Mapped[str] = mapped_column(StringUUID, nullable=False) position_x: Mapped[float] = mapped_column(sa.Float) @@ -149,7 +149,7 @@ class WorkflowCommentReply(Base): Index("comment_replies_created_at_idx", "created_at"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuidv7()")) + id: Mapped[str] = mapped_column(StringUUID, default=gen_uuidv7_string) comment_id: Mapped[str] = mapped_column( StringUUID, sa.ForeignKey("workflow_comments.id", ondelete="CASCADE"), nullable=False ) @@ -194,7 +194,7 @@ class WorkflowCommentMention(Base): Index("comment_mentions_user_idx", "mentioned_user_id"), ) - id: Mapped[str] = mapped_column(StringUUID, server_default=sa.text("uuidv7()")) + id: Mapped[str] = mapped_column(StringUUID, default=gen_uuidv7_string) comment_id: Mapped[str] = mapped_column( StringUUID, sa.ForeignKey("workflow_comments.id", ondelete="CASCADE"), nullable=False ) diff --git a/api/models/model.py b/api/models/model.py index a1117fc43a..a632735f39 100644 --- a/api/models/model.py +++ b/api/models/model.py @@ -91,6 +91,19 @@ class EnabledConfig(TypedDict): enabled: bool +class SuggestedQuestionsAfterAnswerModelConfig(TypedDict): + provider: str + name: str + mode: NotRequired[str] + completion_params: NotRequired[dict[str, Any]] + + +class SuggestedQuestionsAfterAnswerConfig(TypedDict): + enabled: bool + model: NotRequired[SuggestedQuestionsAfterAnswerModelConfig] + prompt: NotRequired[str] + + class EmbeddingModelInfo(TypedDict): embedding_provider_name: str embedding_model_name: str @@ -220,7 +233,7 @@ class ModelConfig(TypedDict): class AppModelConfigDict(TypedDict): opening_statement: str | None suggested_questions: list[str] - suggested_questions_after_answer: EnabledConfig + suggested_questions_after_answer: SuggestedQuestionsAfterAnswerConfig speech_to_text: EnabledConfig text_to_speech: EnabledConfig retriever_resource: EnabledConfig @@ -680,8 +693,13 @@ class AppModelConfig(TypeBase): return cast(EnabledConfig, json.loads(value) if value else {"enabled": default_enabled}) @property - def suggested_questions_after_answer_dict(self) -> EnabledConfig: - return self._get_enabled_config(self.suggested_questions_after_answer) + def suggested_questions_after_answer_dict(self) -> SuggestedQuestionsAfterAnswerConfig: + return cast( + SuggestedQuestionsAfterAnswerConfig, + json.loads(self.suggested_questions_after_answer) + if self.suggested_questions_after_answer + else {"enabled": False}, + ) @property def speech_to_text_dict(self) -> EnabledConfig: diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py index 286dda419c..ac09060e9d 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/data_exporter/test_traceclient.py @@ -225,8 +225,10 @@ class TestSpanBuilder: span = builder.build_span(span_data) assert isinstance(span, ReadableSpan) assert span.name == "test-span" + assert span.context is not None assert span.context.trace_id == 123 assert span.context.span_id == 456 + assert span.parent is not None assert span.parent.span_id == 789 assert span.resource == resource assert span.attributes == {"attr1": "val1"} diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py index 38d33dd21b..a6808fec0a 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/entities/test_aliyun_trace_entity.py @@ -64,12 +64,13 @@ class TestSpanData: def test_span_data_missing_required_fields(self): with pytest.raises(ValidationError): - SpanData( - trace_id=123, - # span_id missing - name="test_span", - start_time=1000, - end_time=2000, + SpanData.model_validate( + { + "trace_id": 123, + "name": "test_span", + "start_time": 1000, + "end_time": 2000, + } ) def test_span_data_arbitrary_types_allowed(self): diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py index c1b11c9186..fa00829653 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace.py @@ -2,12 +2,14 @@ from __future__ import annotations from datetime import UTC, datetime from types import SimpleNamespace +from typing import cast from unittest.mock import MagicMock import dify_trace_aliyun.aliyun_trace as aliyun_trace_module import pytest from dify_trace_aliyun.aliyun_trace import AliyunDataTrace from dify_trace_aliyun.config import AliyunConfig +from dify_trace_aliyun.entities.aliyun_trace_entity import SpanData, TraceMetadata from dify_trace_aliyun.entities.semconv import ( GEN_AI_COMPLETION, GEN_AI_INPUT_MESSAGE, @@ -44,7 +46,7 @@ class RecordingTraceClient: self.endpoint = endpoint self.added_spans: list[object] = [] - def add_span(self, span) -> None: + def add_span(self, span: object) -> None: self.added_spans.append(span) def api_check(self) -> bool: @@ -63,11 +65,35 @@ def _make_link(trace_id: int = 1, span_id: int = 2) -> Link: trace_id=trace_id, span_id=span_id, is_remote=False, - trace_flags=TraceFlags.SAMPLED, + trace_flags=TraceFlags(TraceFlags.SAMPLED), ) return Link(context) +def _make_trace_metadata( + trace_id: int = 1, + workflow_span_id: int = 2, + session_id: str = "s", + user_id: str = "u", + links: list[Link] | None = None, +) -> TraceMetadata: + return TraceMetadata( + trace_id=trace_id, + workflow_span_id=workflow_span_id, + session_id=session_id, + user_id=user_id, + links=[] if links is None else links, + ) + + +def _recording_trace_client(trace_instance: AliyunDataTrace) -> RecordingTraceClient: + return cast(RecordingTraceClient, trace_instance.trace_client) + + +def _recorded_span_data(trace_instance: AliyunDataTrace) -> list[SpanData]: + return cast(list[SpanData], _recording_trace_client(trace_instance).added_spans) + + def _make_workflow_trace_info(**overrides) -> WorkflowTraceInfo: defaults = { "workflow_id": "workflow-id", @@ -263,20 +289,20 @@ def test_workflow_trace_adds_workflow_and_node_spans(trace_instance: AliyunDataT trace_instance.workflow_trace(trace_info) add_workflow_span.assert_called_once() - passed_trace_metadata = add_workflow_span.call_args.args[1] + passed_trace_metadata = cast(TraceMetadata, add_workflow_span.call_args.args[1]) assert passed_trace_metadata.trace_id == 111 assert passed_trace_metadata.workflow_span_id == 222 assert passed_trace_metadata.session_id == "c" assert passed_trace_metadata.user_id == "u" assert passed_trace_metadata.links == [] - assert trace_instance.trace_client.added_spans == ["span-1", "span-2"] + assert _recording_trace_client(trace_instance).added_spans == ["span-1", "span-2"] def test_message_trace_returns_early_if_no_message_data(trace_instance: AliyunDataTrace): trace_info = _make_message_trace_info(message_data=None) trace_instance.message_trace(trace_info) - assert trace_instance.trace_client.added_spans == [] + assert _recording_trace_client(trace_instance).added_spans == [] def test_message_trace_creates_message_and_llm_spans(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): @@ -302,8 +328,9 @@ def test_message_trace_creates_message_and_llm_spans(trace_instance: AliyunDataT ) trace_instance.message_trace(trace_info) - assert len(trace_instance.trace_client.added_spans) == 2 - message_span, llm_span = trace_instance.trace_client.added_spans + spans = _recorded_span_data(trace_instance) + assert len(spans) == 2 + message_span, llm_span = spans assert message_span.name == "message" assert message_span.trace_id == 10 @@ -324,7 +351,7 @@ def test_message_trace_creates_message_and_llm_spans(trace_instance: AliyunDataT def test_dataset_retrieval_trace_returns_early_if_no_message_data(trace_instance: AliyunDataTrace): trace_info = _make_dataset_retrieval_trace_info(message_data=None) trace_instance.dataset_retrieval_trace(trace_info) - assert trace_instance.trace_client.added_spans == [] + assert _recording_trace_client(trace_instance).added_spans == [] def test_dataset_retrieval_trace_creates_span(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): @@ -338,8 +365,9 @@ def test_dataset_retrieval_trace_creates_span(trace_instance: AliyunDataTrace, m monkeypatch.setattr(aliyun_trace_module, "extract_retrieval_documents", lambda _: [{"doc": "d"}]) trace_instance.dataset_retrieval_trace(_make_dataset_retrieval_trace_info(inputs="query")) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "dataset_retrieval" assert span.attributes[RETRIEVAL_QUERY] == "query" assert span.attributes[RETRIEVAL_DOCUMENT] == '[{"doc": "d"}]' @@ -348,7 +376,7 @@ def test_dataset_retrieval_trace_creates_span(trace_instance: AliyunDataTrace, m def test_tool_trace_returns_early_if_no_message_data(trace_instance: AliyunDataTrace): trace_info = _make_tool_trace_info(message_data=None) trace_instance.tool_trace(trace_info) - assert trace_instance.trace_client.added_spans == [] + assert _recording_trace_client(trace_instance).added_spans == [] def test_tool_trace_creates_span(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): @@ -371,8 +399,9 @@ def test_tool_trace_creates_span(trace_instance: AliyunDataTrace, monkeypatch: p ) ) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "my-tool" assert span.status == status assert span.attributes[TOOL_NAME] == "my-tool" @@ -409,7 +438,7 @@ def test_get_workflow_node_executions_builds_repo_and_fetches( def test_build_workflow_node_span_routes_llm_type(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_llm_span", MagicMock(return_value="llm")) @@ -422,7 +451,7 @@ def test_build_workflow_node_span_routes_knowledge_retrieval_type( ): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_retrieval_span", MagicMock(return_value="retrieval")) @@ -433,7 +462,7 @@ def test_build_workflow_node_span_routes_knowledge_retrieval_type( def test_build_workflow_node_span_routes_tool_type(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_tool_span", MagicMock(return_value="tool")) @@ -444,7 +473,7 @@ def test_build_workflow_node_span_routes_tool_type(trace_instance: AliyunDataTra def test_build_workflow_node_span_routes_code_type(trace_instance: AliyunDataTrace, monkeypatch: pytest.MonkeyPatch): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_task_span", MagicMock(return_value="task")) @@ -457,7 +486,7 @@ def test_build_workflow_node_span_handles_errors( ): node_execution = MagicMock(spec=WorkflowNodeExecution) trace_info = _make_workflow_trace_info() - trace_metadata = MagicMock() + trace_metadata = _make_trace_metadata() monkeypatch.setattr(trace_instance, "build_workflow_task_span", MagicMock(side_effect=RuntimeError("boom"))) node_execution.node_type = BuiltinNodeTypes.CODE @@ -472,7 +501,7 @@ def test_build_workflow_task_span(trace_instance: AliyunDataTrace, monkeypatch: status = Status(StatusCode.OK) monkeypatch.setattr(aliyun_trace_module, "get_workflow_node_status", lambda _: status) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "title" @@ -494,7 +523,7 @@ def test_build_workflow_tool_span(trace_instance: AliyunDataTrace, monkeypatch: status = Status(StatusCode.OK) monkeypatch.setattr(aliyun_trace_module, "get_workflow_node_status", lambda _: status) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[_make_link()]) + trace_metadata = _make_trace_metadata(links=[_make_link()]) node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "my-tool" @@ -527,7 +556,7 @@ def test_build_workflow_retrieval_span(trace_instance: AliyunDataTrace, monkeypa aliyun_trace_module, "format_retrieval_documents", lambda docs: [{"formatted": True}] if docs else [] ) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "retrieval" @@ -556,7 +585,7 @@ def test_build_workflow_llm_span(trace_instance: AliyunDataTrace, monkeypatch: p monkeypatch.setattr(aliyun_trace_module, "format_input_messages", lambda _: "in") monkeypatch.setattr(aliyun_trace_module, "format_output_messages", lambda _: "out") - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() node_execution = MagicMock(spec=WorkflowNodeExecution) node_execution.id = "node-id" node_execution.title = "llm" @@ -594,7 +623,7 @@ def test_add_workflow_span(trace_instance: AliyunDataTrace, monkeypatch: pytest. status = Status(StatusCode.OK) monkeypatch.setattr(aliyun_trace_module, "create_status_from_error", lambda _: status) - trace_metadata = SimpleNamespace(trace_id=1, workflow_span_id=2, session_id="s", user_id="u", links=[]) + trace_metadata = _make_trace_metadata() # CASE 1: With message_id trace_info = _make_workflow_trace_info( @@ -602,9 +631,11 @@ def test_add_workflow_span(trace_instance: AliyunDataTrace, monkeypatch: pytest. ) trace_instance.add_workflow_span(trace_info, trace_metadata) - assert len(trace_instance.trace_client.added_spans) == 2 - message_span = trace_instance.trace_client.added_spans[0] - workflow_span = trace_instance.trace_client.added_spans[1] + client = _recording_trace_client(trace_instance) + spans = _recorded_span_data(trace_instance) + assert len(spans) == 2 + message_span = spans[0] + workflow_span = spans[1] assert message_span.name == "message" assert message_span.span_kind == SpanKind.SERVER @@ -614,13 +645,14 @@ def test_add_workflow_span(trace_instance: AliyunDataTrace, monkeypatch: pytest. assert workflow_span.span_kind == SpanKind.INTERNAL assert workflow_span.parent_span_id == 20 - trace_instance.trace_client.added_spans.clear() + client.added_spans.clear() # CASE 2: Without message_id trace_info_no_msg = _make_workflow_trace_info(message_id=None) trace_instance.add_workflow_span(trace_info_no_msg, trace_metadata) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "workflow" assert span.span_kind == SpanKind.SERVER assert span.parent_span_id is None @@ -641,7 +673,8 @@ def test_suggested_question_trace(trace_instance: AliyunDataTrace, monkeypatch: trace_info = _make_suggested_question_trace_info(suggested_question=["how?"]) trace_instance.suggested_question_trace(trace_info) - assert len(trace_instance.trace_client.added_spans) == 1 - span = trace_instance.trace_client.added_spans[0] + spans = _recorded_span_data(trace_instance) + assert len(spans) == 1 + span = spans[0] assert span.name == "suggested_question" assert span.attributes[GEN_AI_COMPLETION] == '["how?"]' diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py index a9e7b80c2a..1b97746dea 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/aliyun_trace/test_aliyun_trace_utils.py @@ -1,4 +1,6 @@ import json +from collections.abc import Mapping +from typing import Any, cast from unittest.mock import MagicMock from dify_trace_aliyun.entities.semconv import ( @@ -170,7 +172,7 @@ def test_create_common_span_attributes(): def test_format_retrieval_documents(): # Not a list - assert format_retrieval_documents("not a list") == [] + assert format_retrieval_documents(cast(list[object], "not a list")) == [] # Valid list docs = [ @@ -211,7 +213,7 @@ def test_format_retrieval_documents(): def test_format_input_messages(): # Not a dict - assert format_input_messages(None) == serialize_json_data([]) + assert format_input_messages(cast(Mapping[str, Any], None)) == serialize_json_data([]) # No prompts assert format_input_messages({}) == serialize_json_data([]) @@ -244,7 +246,7 @@ def test_format_input_messages(): def test_format_output_messages(): # Not a dict - assert format_output_messages(None) == serialize_json_data([]) + assert format_output_messages(cast(Mapping[str, Any], None)) == serialize_json_data([]) # No text assert format_output_messages({"finish_reason": "stop"}) == serialize_json_data([]) diff --git a/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py index 1b24ee7421..8068ee1328 100644 --- a/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-aliyun/tests/unit_tests/test_config_entity.py @@ -25,13 +25,13 @@ class TestAliyunConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - AliyunConfig() + AliyunConfig.model_validate({}) with pytest.raises(ValidationError): - AliyunConfig(license_key="test_license") + AliyunConfig.model_validate({"license_key": "test_license"}) with pytest.raises(ValidationError): - AliyunConfig(endpoint="https://tracing-analysis-dc-hz.aliyuncs.com") + AliyunConfig.model_validate({"endpoint": "https://tracing-analysis-dc-hz.aliyuncs.com"}) def test_app_name_validation_empty(self): """Test app_name validation with empty value""" diff --git a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py index b0691a87ea..e9ecc2e083 100644 --- a/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py +++ b/api/providers/trace/trace-arize-phoenix/tests/unit_tests/arize_phoenix_trace/test_arize_phoenix_trace.py @@ -1,4 +1,5 @@ from datetime import UTC, datetime, timedelta +from typing import cast from unittest.mock import MagicMock, patch import pytest @@ -129,7 +130,7 @@ def test_set_span_status(): return "SilentErrorRepr" span.reset_mock() - set_span_status(span, SilentError()) + set_span_status(span, cast(Exception | str | None, SilentError())) assert span.add_event.call_args[1]["attributes"][OTELSpanAttributes.EXCEPTION_MESSAGE] == "SilentErrorRepr" diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py index 103d888eef..0c3c3fc81e 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/test_config_entity.py @@ -28,13 +28,13 @@ class TestLangfuseConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - LangfuseConfig() + LangfuseConfig.model_validate({}) with pytest.raises(ValidationError): - LangfuseConfig(public_key="public") + LangfuseConfig.model_validate({"public_key": "public"}) with pytest.raises(ValidationError): - LangfuseConfig(secret_key="secret") + LangfuseConfig.model_validate({"secret_key": "secret"}) def test_host_validation_empty(self): """Test host validation with empty value""" diff --git a/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py b/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py index 0340ffb669..82d69b6180 100644 --- a/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py +++ b/api/providers/trace/trace-langfuse/tests/unit_tests/test_langfuse_trace.py @@ -2,6 +2,7 @@ from datetime import datetime, timedelta from types import SimpleNamespace +from typing import cast from unittest.mock import MagicMock, patch from dify_trace_langfuse.config import LangfuseConfig @@ -134,4 +135,4 @@ class TestLangFuseDataTraceCompletionStartTime: assert trace._get_completion_start_time(start_time, None) is None assert trace._get_completion_start_time(start_time, -1) is None - assert trace._get_completion_start_time(start_time, "invalid") is None + assert trace._get_completion_start_time(start_time, cast(float | int | None, "invalid")) is None diff --git a/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py index 37efaf69cf..bd226c9f1a 100644 --- a/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-langsmith/tests/unit_tests/test_config_entity.py @@ -21,13 +21,13 @@ class TestLangSmithConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - LangSmithConfig() + LangSmithConfig.model_validate({}) with pytest.raises(ValidationError): - LangSmithConfig(api_key="key") + LangSmithConfig.model_validate({"api_key": "key"}) with pytest.raises(ValidationError): - LangSmithConfig(project="project") + LangSmithConfig.model_validate({"project": "project"}) def test_endpoint_validation_https_only(self): """Test endpoint validation only allows HTTPS""" diff --git a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py index 20211456e3..46c9750a5d 100644 --- a/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py +++ b/api/providers/trace/trace-mlflow/tests/unit_tests/mlflow_trace/test_mlflow_trace.py @@ -599,7 +599,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_instance.message_trace(_make_message_trace_info()) mock_tracing["start"].assert_called_once() @@ -609,7 +608,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_info = _make_message_trace_info(error="something broke") trace_instance.message_trace(trace_info) @@ -620,7 +618,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None monkeypatch.setenv("FILES_URL", "http://files.test") file_data = SimpleNamespace(url="path/to/file.png") @@ -638,7 +635,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_info = _make_message_trace_info(file_list=None, message_file_data=None) trace_instance.message_trace(trace_info) @@ -651,7 +647,6 @@ class TestMessageTrace: end_user = MagicMock() end_user.session_id = "session-xyz" - mock_db.session.query.return_value.where.return_value.first.return_value = end_user trace_info = _make_message_trace_info( metadata={"from_end_user_id": "eu-1", "conversation_id": "c1"}, @@ -664,7 +659,6 @@ class TestMessageTrace: span = MagicMock() mock_tracing["start"].return_value = span mock_tracing["set"].return_value = "token" - mock_db.session.query.return_value.where.return_value.first.return_value = None trace_info = _make_message_trace_info( metadata={"from_account_id": "acc-1"}, diff --git a/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py b/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py index fba290f5b8..2e0796c291 100644 --- a/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py +++ b/api/providers/trace/trace-opik/tests/unit_tests/test_opik_trace.py @@ -12,6 +12,7 @@ from __future__ import annotations import uuid from datetime import datetime +from typing import cast from unittest.mock import MagicMock, patch from dify_trace_opik.opik_trace import OpikDataTrace, _seed_to_uuid4, prepare_opik_uuid @@ -69,6 +70,14 @@ def _make_opik_trace_instance() -> OpikDataTrace: return instance +def _add_trace_mock(instance: OpikDataTrace) -> MagicMock: + return cast(MagicMock, instance.add_trace) + + +def _add_span_mock(instance: OpikDataTrace) -> MagicMock: + return cast(MagicMock, instance.add_span) + + # --------------------------------------------------------------------------- # _seed_to_uuid4 # --------------------------------------------------------------------------- @@ -155,21 +164,21 @@ class TestWorkflowTraceWithoutMessageId: def test_root_span_is_created(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - assert instance.add_span.called + assert _add_span_mock(instance).called def test_root_span_id_matches_expected(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) expected = self._expected_root_span_id(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["id"] == expected def test_root_span_has_no_parent(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["parent_span_id"] is None def test_trace_name_is_workflow_trace(self): @@ -177,21 +186,21 @@ class TestWorkflowTraceWithoutMessageId: trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - trace_kwargs = instance.add_trace.call_args_list[0][0][0] + trace_kwargs = _add_trace_mock(instance).call_args_list[0][0][0] assert trace_kwargs["name"] == TraceTaskName.WORKFLOW_TRACE def test_root_span_name_is_workflow_trace(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["name"] == TraceTaskName.WORKFLOW_TRACE def test_root_span_has_workflow_tag(self): trace_info = _make_workflow_trace_info(message_id=None) instance = self._run(trace_info) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert "workflow" in root_span_kwargs["tags"] def test_node_execution_spans_are_parented_to_root(self): @@ -214,8 +223,9 @@ class TestWorkflowTraceWithoutMessageId: instance = self._run(trace_info, node_executions=[node_exec]) # call_args_list[0] = root span, [1] = node execution span - assert instance.add_span.call_count == 2 - node_span_kwargs = instance.add_span.call_args_list[1][0][0] + add_span = _add_span_mock(instance) + assert add_span.call_count == 2 + node_span_kwargs = add_span.call_args_list[1][0][0] assert node_span_kwargs["parent_span_id"] == expected_root_span_id def test_node_span_not_parented_to_workflow_app_log_id(self): @@ -240,7 +250,7 @@ class TestWorkflowTraceWithoutMessageId: instance = self._run(trace_info, node_executions=[node_exec]) old_parent_id = prepare_opik_uuid(trace_info.start_time, trace_info.workflow_app_log_id) - node_span_kwargs = instance.add_span.call_args_list[1][0][0] + node_span_kwargs = _add_span_mock(instance).call_args_list[1][0][0] assert node_span_kwargs["parent_span_id"] != old_parent_id def test_root_span_id_differs_from_trace_id(self): @@ -283,7 +293,7 @@ class TestWorkflowTraceWithMessageId: trace_info = _make_workflow_trace_info(message_id=self._MESSAGE_ID) instance = self._run(trace_info) - trace_kwargs = instance.add_trace.call_args_list[0][0][0] + trace_kwargs = _add_trace_mock(instance).call_args_list[0][0][0] assert trace_kwargs["name"] == TraceTaskName.MESSAGE_TRACE def test_root_span_uses_workflow_run_id_directly(self): @@ -292,7 +302,7 @@ class TestWorkflowTraceWithMessageId: instance = self._run(trace_info) expected_root_span_id = prepare_opik_uuid(trace_info.start_time, trace_info.workflow_run_id) - root_span_kwargs = instance.add_span.call_args_list[0][0][0] + root_span_kwargs = _add_span_mock(instance).call_args_list[0][0][0] assert root_span_kwargs["id"] == expected_root_span_id def test_root_span_id_differs_from_no_message_id_case(self): @@ -326,5 +336,5 @@ class TestWorkflowTraceWithMessageId: instance = self._run(trace_info, node_executions=[node_exec]) - node_span_kwargs = instance.add_span.call_args_list[1][0][0] + node_span_kwargs = _add_span_mock(instance).call_args_list[1][0][0] assert node_span_kwargs["parent_span_id"] == expected_root_span_id diff --git a/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py b/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py index 1e656e2462..3cd918f408 100644 --- a/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py +++ b/api/providers/trace/trace-tencent/tests/unit_tests/tencent_trace/test_client.py @@ -5,6 +5,7 @@ from __future__ import annotations import sys import types from types import SimpleNamespace +from typing import Any, TypedDict, cast from unittest.mock import MagicMock import pytest @@ -12,7 +13,7 @@ from dify_trace_tencent import client as client_module from dify_trace_tencent.client import TencentTraceClient, _get_opentelemetry_sdk_version from dify_trace_tencent.entities.tencent_trace_entity import SpanData from opentelemetry.sdk.trace import Event -from opentelemetry.trace import Status, StatusCode +from opentelemetry.trace import SpanContext, Status, StatusCode, TraceFlags metric_reader_instances: list[DummyMetricReader] = [] meter_provider_instances: list[DummyMeterProvider] = [] @@ -80,6 +81,16 @@ class DummyJsonMetricExporterNoTemporality: self.kwargs = kwargs +class PatchedCoreComponents(TypedDict): + span_exporter: MagicMock + span_processor: MagicMock + tracer: MagicMock + span: MagicMock + tracer_provider: MagicMock + logger: MagicMock + trace_api: Any + + def _add_stub_modules(monkeypatch: pytest.MonkeyPatch) -> None: """Drop fake metric modules into sys.modules so the client imports resolve.""" @@ -118,7 +129,7 @@ def stub_metric_modules(monkeypatch: pytest.MonkeyPatch) -> None: @pytest.fixture(autouse=True) -def patch_core_components(monkeypatch: pytest.MonkeyPatch) -> dict[str, object]: +def patch_core_components(monkeypatch: pytest.MonkeyPatch) -> PatchedCoreComponents: span_exporter = MagicMock(name="span_exporter") monkeypatch.setattr(client_module, "OTLPSpanExporter", MagicMock(return_value=span_exporter)) @@ -168,6 +179,15 @@ def patch_core_components(monkeypatch: pytest.MonkeyPatch) -> dict[str, object]: } +def _make_span_context(trace_id: int = 1, span_id: int = 2) -> SpanContext: + return SpanContext( + trace_id=trace_id, + span_id=span_id, + is_remote=False, + trace_flags=TraceFlags(TraceFlags.SAMPLED), + ) + + def _build_client() -> TencentTraceClient: return TencentTraceClient( service_name="service", @@ -208,7 +228,7 @@ def test_resolve_grpc_target_parsable_variants(endpoint: str, expected: tuple[st def test_resolve_grpc_target_handles_errors() -> None: - assert TencentTraceClient._resolve_grpc_target(123) == ("localhost:4317", True, "localhost", 4317) + assert TencentTraceClient._resolve_grpc_target(cast(str, 123)) == ("localhost:4317", True, "localhost", 4317) @pytest.mark.parametrize( @@ -248,7 +268,7 @@ def test_record_methods_skip_when_histogram_missing() -> None: client.record_trace_duration(0.5) -def test_record_llm_duration_handles_exceptions(patch_core_components: dict[str, object]) -> None: +def test_record_llm_duration_handles_exceptions(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() client.hist_llm_duration = MagicMock(name="hist_llm_duration") client.hist_llm_duration.record.side_effect = RuntimeError("boom") @@ -258,10 +278,11 @@ def test_record_llm_duration_handles_exceptions(patch_core_components: dict[str, logger.debug.assert_called() -def test_create_and_export_span_sets_attributes(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_sets_attributes(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span = patch_core_components["span"] - span.get_span_context.return_value = "ctx" + ctx = _make_span_context(span_id=2) + span.get_span_context.return_value = ctx data = SpanData( trace_id=1, @@ -280,14 +301,15 @@ def test_create_and_export_span_sets_attributes(patch_core_components: dict[str, span.add_event.assert_called_once() span.set_status.assert_called_once() span.end.assert_called_once_with(end_time=20) - assert client.span_contexts[2] == "ctx" + assert client.span_contexts[2] == ctx -def test_create_and_export_span_uses_parent_context(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_uses_parent_context(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() - client.span_contexts[10] = "existing" + existing_context = _make_span_context(span_id=10) + client.span_contexts[10] = existing_context span = patch_core_components["span"] - span.get_span_context.return_value = "child" + span.get_span_context.return_value = _make_span_context(span_id=11) data = SpanData( trace_id=1, @@ -302,14 +324,14 @@ def test_create_and_export_span_uses_parent_context(patch_core_components: dict[ client._create_and_export_span(data) trace_api = patch_core_components["trace_api"] - trace_api.NonRecordingSpan.assert_called_once_with("existing") + trace_api.NonRecordingSpan.assert_called_once_with(existing_context) trace_api.set_span_in_context.assert_called_once() -def test_create_and_export_span_exception_logs_error(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_exception_logs_error(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span = patch_core_components["span"] - span.get_span_context.return_value = "ctx" + span.get_span_context.return_value = _make_span_context(span_id=2) client.tracer.start_span.side_effect = RuntimeError("boom") client._create_and_export_span( @@ -385,7 +407,7 @@ def test_get_project_url() -> None: assert client.get_project_url() == "https://console.cloud.tencent.com/apm" -def test_shutdown_flushes_all_components(patch_core_components: dict[str, object]) -> None: +def test_shutdown_flushes_all_components(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span_processor = patch_core_components["span_processor"] tracer_provider = patch_core_components["tracer_provider"] @@ -401,10 +423,11 @@ def test_shutdown_flushes_all_components(patch_core_components: dict[str, object metric_reader.shutdown.assert_called_once() -def test_shutdown_logs_when_meter_provider_fails(patch_core_components: dict[str, object]) -> None: +def test_shutdown_logs_when_meter_provider_fails(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() meter_provider = meter_provider_instances[-1] meter_provider.shutdown.side_effect = RuntimeError("boom") + assert client.metric_reader is not None client.metric_reader.shutdown.side_effect = RuntimeError("boom") client.shutdown() @@ -433,7 +456,7 @@ def test_metrics_initialization_failure_sets_histogram_attributes(monkeypatch: p assert client.metric_reader is None -def test_add_span_logs_exception(monkeypatch: pytest.MonkeyPatch, patch_core_components: dict[str, object]) -> None: +def test_add_span_logs_exception(monkeypatch: pytest.MonkeyPatch, patch_core_components: PatchedCoreComponents) -> None: client = _build_client() monkeypatch.setattr(client, "_create_and_export_span", MagicMock(side_effect=RuntimeError("boom"))) @@ -454,10 +477,10 @@ def test_add_span_logs_exception(monkeypatch: pytest.MonkeyPatch, patch_core_com logger.exception.assert_called_once() -def test_create_and_export_span_converts_attribute_types(patch_core_components: dict[str, object]) -> None: +def test_create_and_export_span_converts_attribute_types(patch_core_components: PatchedCoreComponents) -> None: client = _build_client() span = patch_core_components["span"] - span.get_span_context.return_value = "ctx" + span.get_span_context.return_value = _make_span_context(span_id=2) data = SpanData.model_construct( trace_id=1, @@ -485,7 +508,7 @@ def test_record_llm_duration_converts_attributes() -> None: hist_mock = MagicMock(name="hist_llm_duration") client.hist_llm_duration = hist_mock - client.record_llm_duration(0.3, {"foo": object(), "bar": 2}) + client.record_llm_duration(0.3, cast(dict[str, str], {"foo": object(), "bar": 2})) _, attrs = hist_mock.record.call_args.args assert isinstance(attrs["foo"], str) assert attrs["bar"] == 2 @@ -496,7 +519,7 @@ def test_record_trace_duration_converts_attributes() -> None: hist_mock = MagicMock(name="hist_trace_duration") client.hist_trace_duration = hist_mock - client.record_trace_duration(1.0, {"meta": object(), "ok": True}) + client.record_trace_duration(1.0, cast(dict[str, str], {"meta": object(), "ok": True})) _, attrs = hist_mock.record.call_args.args assert isinstance(attrs["meta"], str) assert attrs["ok"] is True @@ -512,7 +535,7 @@ def test_record_trace_duration_converts_attributes() -> None: ], ) def test_record_methods_handle_exceptions( - method: str, attr_name: str, args: tuple[object, ...], patch_core_components: dict[str, object] + method: str, attr_name: str, args: tuple[object, ...], patch_core_components: PatchedCoreComponents ) -> None: client = _build_client() hist_mock = MagicMock(name=attr_name) @@ -527,35 +550,38 @@ def test_record_methods_handle_exceptions( def test_metrics_initializes_grpc_metric_exporter() -> None: client = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyGrpcMetricExporter, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyGrpcMetricExporter) + assert isinstance(exporter, DummyGrpcMetricExporter) assert metric_reader.export_interval_millis == client.metrics_export_interval_sec * 1000 - assert metric_reader.exporter.kwargs["endpoint"] == "trace.example.com:4317" - assert metric_reader.exporter.kwargs["insecure"] is False - assert metric_reader.exporter.kwargs["headers"]["authorization"] == "Bearer token" + assert exporter.kwargs["endpoint"] == "trace.example.com:4317" + assert exporter.kwargs["insecure"] is False + assert cast(dict[str, dict[str, str]], exporter.kwargs)["headers"]["authorization"] == "Bearer token" def test_metrics_initializes_http_protobuf_metric_exporter(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/protobuf") client = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyHttpMetricExporter, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyHttpMetricExporter) + assert isinstance(exporter, DummyHttpMetricExporter) assert metric_reader.export_interval_millis == client.metrics_export_interval_sec * 1000 - assert metric_reader.exporter.kwargs["endpoint"] == client.endpoint - assert metric_reader.exporter.kwargs["headers"]["authorization"] == "Bearer token" + assert exporter.kwargs["endpoint"] == client.endpoint + assert cast(dict[str, dict[str, str]], exporter.kwargs)["headers"]["authorization"] == "Bearer token" def test_metrics_initializes_http_json_metric_exporter(monkeypatch: pytest.MonkeyPatch) -> None: monkeypatch.setenv("OTEL_EXPORTER_OTLP_PROTOCOL", "http/json") client = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyJsonMetricExporter, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyJsonMetricExporter) + assert isinstance(exporter, DummyJsonMetricExporter) assert metric_reader.export_interval_millis == client.metrics_export_interval_sec * 1000 - assert metric_reader.exporter.kwargs["endpoint"] == client.endpoint - assert metric_reader.exporter.kwargs["headers"]["authorization"] == "Bearer token" - assert "preferred_temporality" in metric_reader.exporter.kwargs + assert exporter.kwargs["endpoint"] == client.endpoint + assert cast(dict[str, dict[str, str]], exporter.kwargs)["headers"]["authorization"] == "Bearer token" + assert "preferred_temporality" in exporter.kwargs def test_metrics_http_json_metric_exporter_falls_back_without_temporality(monkeypatch: pytest.MonkeyPatch) -> None: @@ -564,9 +590,10 @@ def test_metrics_http_json_metric_exporter_falls_back_without_temporality(monkey monkeypatch.setattr(exporter_module, "OTLPMetricExporter", DummyJsonMetricExporterNoTemporality) _ = _build_client() metric_reader = metric_reader_instances[-1] + exporter = cast(DummyJsonMetricExporterNoTemporality, metric_reader.exporter) - assert isinstance(metric_reader.exporter, DummyJsonMetricExporterNoTemporality) - assert "preferred_temporality" not in metric_reader.exporter.kwargs + assert isinstance(exporter, DummyJsonMetricExporterNoTemporality) + assert "preferred_temporality" not in exporter.kwargs def test_metrics_http_json_uses_http_fallback_when_no_json_exporter(monkeypatch: pytest.MonkeyPatch) -> None: diff --git a/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py b/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py index eeb1fe1d87..377c768198 100644 --- a/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py +++ b/api/providers/trace/trace-weave/tests/unit_tests/test_config_entity.py @@ -31,13 +31,13 @@ class TestWeaveConfig: def test_missing_required_fields(self): """Test that required fields are enforced""" with pytest.raises(ValidationError): - WeaveConfig() + WeaveConfig.model_validate({}) with pytest.raises(ValidationError): - WeaveConfig(api_key="key") + WeaveConfig.model_validate({"api_key": "key"}) with pytest.raises(ValidationError): - WeaveConfig(project="project") + WeaveConfig.model_validate({"project": "project"}) def test_endpoint_validation_https_only(self): """Test endpoint validation only allows HTTPS""" diff --git a/api/providers/vdb/vdb-couchbase/src/dify_vdb_couchbase/couchbase_vector.py b/api/providers/vdb/vdb-couchbase/src/dify_vdb_couchbase/couchbase_vector.py index 815ac30c0b..bab176e285 100644 --- a/api/providers/vdb/vdb-couchbase/src/dify_vdb_couchbase/couchbase_vector.py +++ b/api/providers/vdb/vdb-couchbase/src/dify_vdb_couchbase/couchbase_vector.py @@ -59,7 +59,7 @@ class CouchbaseVector(BaseVector): auth = PasswordAuthenticator(config.user, config.password) options = ClusterOptions(auth) - self._cluster = Cluster(config.connection_string, options) + self._cluster = Cluster(config.connection_string, options) # pyright: ignore[reportArgumentType] self._bucket = self._cluster.bucket(config.bucket_name) self._scope = self._bucket.scope(config.scope_name) self._bucket_name = config.bucket_name @@ -306,7 +306,7 @@ class CouchbaseVector(BaseVector): def search_by_full_text(self, query: str, **kwargs: Any) -> list[Document]: top_k = kwargs.get("top_k", 4) try: - CBrequest = search.SearchRequest.create(search.QueryStringQuery("text:" + query)) + CBrequest = search.SearchRequest.create(search.QueryStringQuery("text:" + query)) # pyright: ignore[reportCallIssue] search_iter = self._scope.search( self._collection_name + "_search", CBrequest, SearchOptions(limit=top_k, fields=["*"]) ) diff --git a/api/providers/vdb/vdb-milvus/src/dify_vdb_milvus/milvus_vector.py b/api/providers/vdb/vdb-milvus/src/dify_vdb_milvus/milvus_vector.py index 46f3224a95..823b877707 100644 --- a/api/providers/vdb/vdb-milvus/src/dify_vdb_milvus/milvus_vector.py +++ b/api/providers/vdb/vdb-milvus/src/dify_vdb_milvus/milvus_vector.py @@ -1,6 +1,6 @@ import json import logging -from typing import Any, TypedDict +from typing import Any, TypedDict, cast from packaging import version from pydantic import BaseModel, model_validator @@ -92,7 +92,7 @@ class MilvusVector(BaseVector): def _load_collection_fields(self, fields: list[str] | None = None): if fields is None: # Load collection fields from remote server - collection_info = self._client.describe_collection(self._collection_name) + collection_info = cast(dict[str, Any], self._client.describe_collection(self._collection_name)) fields = [field["name"] for field in collection_info["fields"]] # Since primary field is auto-id, no need to track it self._fields = [f for f in fields if f != Field.PRIMARY_KEY] @@ -106,7 +106,8 @@ class MilvusVector(BaseVector): return False try: - milvus_version = self._client.get_server_version() + milvus_version_raw = self._client.get_server_version() + milvus_version = milvus_version_raw if isinstance(milvus_version_raw, str) else str(milvus_version_raw) # Check if it's Zilliz Cloud - it supports full-text search with Milvus 2.5 compatibility if "Zilliz Cloud" in milvus_version: return True diff --git a/api/providers/vdb/vdb-oracle/src/dify_vdb_oracle/oraclevector.py b/api/providers/vdb/vdb-oracle/src/dify_vdb_oracle/oraclevector.py index 70377c82c8..5d9ab38529 100644 --- a/api/providers/vdb/vdb-oracle/src/dify_vdb_oracle/oraclevector.py +++ b/api/providers/vdb/vdb-oracle/src/dify_vdb_oracle/oraclevector.py @@ -3,7 +3,7 @@ import json import logging import re import uuid -from typing import Any +from typing import Any, TypedDict import jieba.posseg as pseg # type: ignore import numpy @@ -25,6 +25,18 @@ logger = logging.getLogger(__name__) oracledb.defaults.fetch_lobs = False +class _OraclePoolParams(TypedDict, total=False): + user: str + password: str + dsn: str + min: int + max: int + increment: int + config_dir: str | None + wallet_location: str | None + wallet_password: str | None + + class OracleVectorConfig(BaseModel): user: str password: str @@ -127,22 +139,18 @@ class OracleVector(BaseVector): return connection def _create_connection_pool(self, config: OracleVectorConfig): - pool_params = { - "user": config.user, - "password": config.password, - "dsn": config.dsn, - "min": 1, - "max": 5, - "increment": 1, - } + pool_params = _OraclePoolParams( + user=config.user, + password=config.password, + dsn=config.dsn, + min=1, + max=5, + increment=1, + ) if config.is_autonomous: - pool_params.update( - { - "config_dir": config.config_dir, - "wallet_location": config.wallet_location, - "wallet_password": config.wallet_password, - } - ) + pool_params["config_dir"] = config.config_dir + pool_params["wallet_location"] = config.wallet_location + pool_params["wallet_password"] = config.wallet_password return oracledb.create_pool(**pool_params) def create(self, texts: list[Document], embeddings: list[list[float]], **kwargs): diff --git a/api/pyproject.toml b/api/pyproject.toml index fbd5d394ad..31a6ea115c 100644 --- a/api/pyproject.toml +++ b/api/pyproject.toml @@ -173,7 +173,7 @@ dev = [ # "locust>=2.40.4", # Temporarily removed due to compatibility issues. Uncomment when resolved. "pytest-timeout>=2.4.0", "pytest-xdist>=3.8.0", - "pyrefly>=0.61.1", + "pyrefly>=0.62.0", "xinference-client>=2.5.0", ] diff --git a/api/repositories/sqlalchemy_api_workflow_run_repository.py b/api/repositories/sqlalchemy_api_workflow_run_repository.py index 474b200fc5..71a2554a60 100644 --- a/api/repositories/sqlalchemy_api_workflow_run_repository.py +++ b/api/repositories/sqlalchemy_api_workflow_run_repository.py @@ -42,7 +42,7 @@ from libs.helper import convert_datetime_to_date from libs.infinite_scroll_pagination import InfiniteScrollPagination from libs.time_parser import get_time_threshold from models.enums import WorkflowRunTriggeredFrom -from models.human_input import HumanInputForm +from models.human_input import HumanInputForm, HumanInputFormRecipient from models.workflow import WorkflowAppLog, WorkflowArchiveLog, WorkflowPause, WorkflowPauseReason, WorkflowRun from repositories.api_workflow_run_repository import APIWorkflowRunRepository, RunsWithRelatedCountsDict from repositories.entities.workflow_pause import WorkflowPauseEntity @@ -63,6 +63,7 @@ class _WorkflowRunError(Exception): def _build_human_input_required_reason( reason_model: WorkflowPauseReason, form_model: HumanInputForm | None, + recipients: Sequence[HumanInputFormRecipient] = (), ) -> HumanInputRequired: form_content = "" inputs = [] @@ -89,7 +90,7 @@ def _build_human_input_required_reason( resolved_default_values = dict(definition.default_values) node_title = definition.node_title or node_title - return HumanInputRequired( + reason = HumanInputRequired( form_id=form_id, form_content=form_content, inputs=inputs, @@ -98,6 +99,7 @@ def _build_human_input_required_reason( node_title=node_title, resolved_default_values=resolved_default_values, ) + return reason class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): @@ -804,12 +806,23 @@ class DifyAPISQLAlchemyWorkflowRunRepository(APIWorkflowRunRepository): form_stmt = select(HumanInputForm).where(HumanInputForm.id.in_(form_ids)) for form in session.scalars(form_stmt).all(): form_models[form.id] = form + recipients_by_form_id: dict[str, list[HumanInputFormRecipient]] = {} + if form_ids: + recipient_stmt = select(HumanInputFormRecipient).where(HumanInputFormRecipient.form_id.in_(form_ids)) + for recipient in session.scalars(recipient_stmt).all(): + recipients_by_form_id.setdefault(recipient.form_id, []).append(recipient) pause_reasons: list[PauseReason] = [] for reason in pause_reason_models: if reason.type_ == PauseReasonType.HUMAN_INPUT_REQUIRED: form_model = form_models.get(reason.form_id) - pause_reasons.append(_build_human_input_required_reason(reason, form_model)) + pause_reasons.append( + _build_human_input_required_reason( + reason, + form_model, + recipients_by_form_id.get(reason.form_id, ()), + ) + ) else: pause_reasons.append(reason.to_entity()) return pause_reasons diff --git a/api/services/app_generate_service.py b/api/services/app_generate_service.py index 5e8c7aa337..8ff53d143b 100644 --- a/api/services/app_generate_service.py +++ b/api/services/app_generate_service.py @@ -162,6 +162,7 @@ class AppGenerateService: invoke_from=invoke_from, streaming=True, call_depth=0, + workflow_run_id=str(uuid.uuid4()), ) payload_json = payload.model_dump_json() @@ -183,6 +184,10 @@ class AppGenerateService: else: # Blocking mode: run synchronously and return JSON instead of SSE # Keep behaviour consistent with WORKFLOW blocking branch. + pause_config = PauseStateLayerConfig( + session_factory=session_factory.get_session_maker(), + state_owner_user_id=workflow.created_by, + ) advanced_generator = AdvancedChatAppGenerator() return rate_limit.generate( advanced_generator.convert_to_event_stream( @@ -194,6 +199,7 @@ class AppGenerateService: invoke_from=invoke_from, workflow_run_id=str(uuid.uuid4()), streaming=False, + pause_state_config=pause_config, ) ), request_id=request_id, diff --git a/api/services/enterprise/enterprise_service.py b/api/services/enterprise/enterprise_service.py index 5040fcc7e3..bd7758f1c0 100644 --- a/api/services/enterprise/enterprise_service.py +++ b/api/services/enterprise/enterprise_service.py @@ -5,6 +5,7 @@ import uuid from datetime import datetime from typing import TYPE_CHECKING +from cachetools.func import ttl_cache from pydantic import BaseModel, ConfigDict, Field, model_validator from configs import dify_config @@ -99,6 +100,7 @@ def try_join_default_workspace(account_id: str) -> None: class EnterpriseService: @classmethod + @ttl_cache(ttl=5) def get_info(cls): return EnterpriseRequest.send_request("GET", "/info") diff --git a/api/services/feature_service.py b/api/services/feature_service.py index e18eb096c9..38518378f7 100644 --- a/api/services/feature_service.py +++ b/api/services/feature_service.py @@ -177,6 +177,7 @@ class SystemFeatureModel(BaseModel): enable_change_email: bool = True plugin_manager: PluginManagerModel = PluginManagerModel() trial_models: list[str] = [] + enable_creators_platform: bool = False enable_trial_app: bool = False enable_explore_banner: bool = False @@ -241,6 +242,9 @@ class FeatureService: if dify_config.MARKETPLACE_ENABLED: system_features.enable_marketplace = True + if dify_config.CREATORS_PLATFORM_FEATURES_ENABLED: + system_features.enable_creators_platform = True + return system_features @classmethod diff --git a/api/services/message_service.py b/api/services/message_service.py index 98f24dd6a6..8f5e028d4d 100644 --- a/api/services/message_service.py +++ b/api/services/message_service.py @@ -1,4 +1,6 @@ +import logging from collections.abc import Sequence +from typing import cast from pydantic import TypeAdapter from sqlalchemy import select @@ -17,7 +19,16 @@ from graphon.model_runtime.entities.model_entities import ModelType from libs.infinite_scroll_pagination import InfiniteScrollPagination from models import Account from models.enums import FeedbackFromSource, FeedbackRating -from models.model import App, AppMode, AppModelConfig, AppModelConfigDict, EndUser, Message, MessageFeedback +from models.model import ( + App, + AppMode, + AppModelConfig, + AppModelConfigDict, + EndUser, + Message, + MessageFeedback, + SuggestedQuestionsAfterAnswerConfig, +) from repositories.execution_extra_content_repository import ExecutionExtraContentRepository from repositories.sqlalchemy_execution_extra_content_repository import ( SQLAlchemyExecutionExtraContentRepository, @@ -32,6 +43,7 @@ from services.errors.message import ( from services.workflow_service import WorkflowService _app_model_config_adapter: TypeAdapter[AppModelConfigDict] = TypeAdapter(AppModelConfigDict) +logger = logging.getLogger(__name__) def _create_execution_extra_content_repository() -> ExecutionExtraContentRepository: @@ -252,6 +264,7 @@ class MessageService: ) model_manager = ModelManager.for_tenant(tenant_id=app_model.tenant_id) + suggested_questions_after_answer_config: SuggestedQuestionsAfterAnswerConfig = {"enabled": False} if app_model.mode == AppMode.ADVANCED_CHAT: workflow_service = WorkflowService() @@ -271,9 +284,11 @@ class MessageService: if not app_config.additional_features.suggested_questions_after_answer: raise SuggestedQuestionsAfterAnswerDisabledError() - model_instance = model_manager.get_default_model_instance( - tenant_id=app_model.tenant_id, model_type=ModelType.LLM - ) + suggested_questions_after_answer = workflow.features_dict.get("suggested_questions_after_answer") + if isinstance(suggested_questions_after_answer, dict): + suggested_questions_after_answer_config = cast( + SuggestedQuestionsAfterAnswerConfig, suggested_questions_after_answer + ) else: if not conversation.override_model_configs: app_model_config = db.session.scalar( @@ -293,16 +308,14 @@ class MessageService: if not app_model_config: raise ValueError("did not find app model config") - suggested_questions_after_answer = app_model_config.suggested_questions_after_answer_dict - if suggested_questions_after_answer.get("enabled", False) is False: + suggested_questions_after_answer_config = app_model_config.suggested_questions_after_answer_dict + if suggested_questions_after_answer_config.get("enabled", False) is False: raise SuggestedQuestionsAfterAnswerDisabledError() - model_instance = model_manager.get_model_instance( - tenant_id=app_model.tenant_id, - provider=app_model_config.model_dict["provider"], - model_type=ModelType.LLM, - model=app_model_config.model_dict["name"], - ) + model_instance = model_manager.get_default_model_instance( + tenant_id=app_model.tenant_id, + model_type=ModelType.LLM, + ) # get memory of conversation (read-only) memory = TokenBufferMemory(conversation=conversation, model_instance=model_instance) @@ -312,9 +325,17 @@ class MessageService: message_limit=3, ) + instruction_prompt = suggested_questions_after_answer_config.get("prompt") + if not isinstance(instruction_prompt, str) or not instruction_prompt.strip(): + instruction_prompt = None + + configured_model = suggested_questions_after_answer_config.get("model") with measure_time() as timer: questions_sequence = LLMGenerator.generate_suggested_questions_after_answer( - tenant_id=app_model.tenant_id, histories=histories + tenant_id=app_model.tenant_id, + histories=histories, + instruction_prompt=instruction_prompt, + model_config=configured_model, ) questions: list[str] = list(questions_sequence) diff --git a/api/services/tools/builtin_tools_manage_service.py b/api/services/tools/builtin_tools_manage_service.py index 7bd056b8a0..b8242ab3a5 100644 --- a/api/services/tools/builtin_tools_manage_service.py +++ b/api/services/tools/builtin_tools_manage_service.py @@ -26,7 +26,7 @@ from core.tools.plugin_tool.provider import PluginToolProviderController from core.tools.tool_label_manager import ToolLabelManager from core.tools.tool_manager import ToolManager from core.tools.utils.encryption import create_provider_encrypter -from core.tools.utils.system_oauth_encryption import decrypt_system_oauth_params +from core.tools.utils.system_encryption import decrypt_system_params from extensions.ext_database import db from extensions.ext_redis import redis_client from models.provider_ids import ToolProviderID @@ -521,7 +521,7 @@ class BuiltinToolManageService: ) if system_client: try: - oauth_params = decrypt_system_oauth_params(system_client.encrypted_oauth_params) + oauth_params = decrypt_system_params(system_client.encrypted_oauth_params) except Exception as e: raise ValueError(f"Error decrypting system oauth params: {e}") diff --git a/api/services/trigger/trigger_provider_service.py b/api/services/trigger/trigger_provider_service.py index 6e14d996ea..b8a76e4945 100644 --- a/api/services/trigger/trigger_provider_service.py +++ b/api/services/trigger/trigger_provider_service.py @@ -14,7 +14,7 @@ from core.helper.provider_cache import NoOpProviderCredentialCache from core.helper.provider_encryption import ProviderConfigEncrypter, create_provider_encrypter from core.plugin.entities.plugin_daemon import CredentialType from core.plugin.impl.oauth import OAuthHandler -from core.tools.utils.system_oauth_encryption import decrypt_system_oauth_params +from core.tools.utils.system_encryption import decrypt_system_params from core.trigger.entities.api_entities import ( TriggerProviderApiEntity, TriggerProviderSubscriptionApiEntity, @@ -635,7 +635,7 @@ class TriggerProviderService: if system_client: try: - oauth_params = decrypt_system_oauth_params(system_client.encrypted_oauth_params) + oauth_params = decrypt_system_params(system_client.encrypted_oauth_params) except Exception as e: raise ValueError(f"Error decrypting system oauth params: {e}") diff --git a/api/services/workflow_event_snapshot_service.py b/api/services/workflow_event_snapshot_service.py index 5fca444723..94f88f8c49 100644 --- a/api/services/workflow_event_snapshot_service.py +++ b/api/services/workflow_event_snapshot_service.py @@ -14,6 +14,7 @@ from sqlalchemy.orm import Session, sessionmaker from core.app.apps.message_generator import MessageGenerator from core.app.entities.task_entities import ( + HumanInputRequiredResponse, MessageReplaceStreamResponse, NodeFinishStreamResponse, NodeStartStreamResponse, @@ -22,10 +23,14 @@ from core.app.entities.task_entities import ( WorkflowStartStreamResponse, ) from core.app.layers.pause_state_persist_layer import WorkflowResumptionContext +from core.workflow.human_input_forms import load_form_tokens_by_form_id +from core.workflow.human_input_policy import HumanInputSurface, enrich_human_input_pause_reasons from graphon.entities import WorkflowStartReason +from graphon.entities.pause_reason import PauseReasonType from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus from graphon.runtime import GraphRuntimeState from graphon.workflow_type_encoder import WorkflowRuntimeTypeConverter +from models.human_input import HumanInputForm from models.model import AppMode, Message from models.workflow import WorkflowNodeExecutionTriggeredFrom, WorkflowRun from repositories.api_workflow_node_execution_repository import WorkflowNodeExecutionSnapshot @@ -59,8 +64,10 @@ def build_workflow_event_stream( tenant_id: str, app_id: str, session_maker: sessionmaker[Session], + human_input_surface: HumanInputSurface | None = None, idle_timeout: float = 300, ping_interval: float = 10.0, + close_on_pause: bool = True, ) -> Generator[Mapping[str, Any] | str, None, None]: topic = MessageGenerator.get_response_topic(app_mode, workflow_run.id) workflow_run_repo = DifyAPIRepositoryFactory.create_api_workflow_run_repository(session_maker) @@ -115,13 +122,15 @@ def build_workflow_event_stream( message_context=message_context, pause_entity=pause_entity, resumption_context=resumption_context, + session_maker=session_maker, + human_input_surface=human_input_surface, ) for event in snapshot_events: last_msg_time = time.time() last_ping_time = last_msg_time yield event - if _is_terminal_event(event, include_paused=True): + if _is_terminal_event(event, close_on_pause=close_on_pause): return while True: @@ -146,7 +155,7 @@ def build_workflow_event_stream( last_msg_time = time.time() last_ping_time = last_msg_time yield event - if _is_terminal_event(event, include_paused=True): + if _is_terminal_event(event, close_on_pause=close_on_pause): return finally: buffer_state.stop_event.set() @@ -207,6 +216,8 @@ def _build_snapshot_events( message_context: MessageContext | None, pause_entity: WorkflowPauseEntity | None, resumption_context: WorkflowResumptionContext | None, + session_maker: sessionmaker[Session] | None = None, + human_input_surface: HumanInputSurface | None = None, ) -> list[Mapping[str, Any]]: events: list[Mapping[str, Any]] = [] @@ -241,12 +252,24 @@ def _build_snapshot_events( events.append(node_finished) if workflow_run.status == WorkflowExecutionStatus.PAUSED and pause_entity is not None: + for human_input_event in _build_human_input_required_events( + workflow_run_id=workflow_run.id, + task_id=task_id, + pause_entity=pause_entity, + session_maker=session_maker, + human_input_surface=human_input_surface, + ): + _apply_message_context(human_input_event, message_context) + events.append(human_input_event) + pause_event = _build_pause_event( workflow_run=workflow_run, workflow_run_id=workflow_run.id, task_id=task_id, pause_entity=pause_entity, resumption_context=resumption_context, + session_maker=session_maker, + human_input_surface=human_input_surface, ) if pause_event is not None: _apply_message_context(pause_event, message_context) @@ -314,6 +337,97 @@ def _build_node_started_event( return response.to_ignore_detail_dict() +def _build_human_input_required_events( + *, + workflow_run_id: str, + task_id: str, + pause_entity: WorkflowPauseEntity, + session_maker: sessionmaker[Session] | None, + human_input_surface: HumanInputSurface | None, +) -> list[dict[str, Any]]: + reasons = [reason.model_dump(mode="json") for reason in pause_entity.get_pause_reasons()] + human_input_form_ids = [ + form_id + for reason in reasons + if reason.get("TYPE") == PauseReasonType.HUMAN_INPUT_REQUIRED + for form_id in [reason.get("form_id")] + if isinstance(form_id, str) + ] + + expiration_times_by_form_id: dict[str, int] = {} + display_in_ui_by_form_id: dict[str, bool] = {} + form_tokens_by_form_id: dict[str, str] = {} + if human_input_form_ids and session_maker is not None: + stmt = select(HumanInputForm.id, HumanInputForm.expiration_time, HumanInputForm.form_definition).where( + HumanInputForm.id.in_(human_input_form_ids) + ) + with session_maker() as session: + for form_id, expiration_time, form_definition in session.execute(stmt): + expiration_times_by_form_id[str(form_id)] = int(expiration_time.timestamp()) + try: + definition_payload = json.loads(form_definition) if form_definition else {} + except (TypeError, json.JSONDecodeError): + definition_payload = {} + display_in_ui_by_form_id[str(form_id)] = bool(definition_payload.get("display_in_ui")) + form_tokens_by_form_id = load_form_tokens_by_form_id( + human_input_form_ids, + session=session, + surface=human_input_surface, + ) + + events: list[dict[str, Any]] = [] + for reason in reasons: + if reason.get("TYPE") != PauseReasonType.HUMAN_INPUT_REQUIRED: + continue + + form_id_raw = reason.get("form_id") + node_id_raw = reason.get("node_id") + node_title_raw = reason.get("node_title") + form_content_raw = reason.get("form_content") + if not isinstance(form_id_raw, str): + continue + if not isinstance(node_id_raw, str): + continue + if not isinstance(node_title_raw, str): + continue + if not isinstance(form_content_raw, str): + continue + form_id = form_id_raw + node_id = node_id_raw + node_title = node_title_raw + form_content = form_content_raw + + inputs = reason.get("inputs") + actions = reason.get("actions") + resolved_default_values = reason.get("resolved_default_values") + + expiration_time = expiration_times_by_form_id.get(form_id) + if expiration_time is None: + continue + + response = HumanInputRequiredResponse( + task_id=task_id, + workflow_run_id=workflow_run_id, + data=HumanInputRequiredResponse.Data( + form_id=form_id, + node_id=node_id, + node_title=node_title, + form_content=form_content, + inputs=inputs if isinstance(inputs, list) else [], + actions=actions if isinstance(actions, list) else [], + display_in_ui=display_in_ui_by_form_id.get(form_id, False), + form_token=form_tokens_by_form_id.get(form_id), + resolved_default_values=(resolved_default_values if isinstance(resolved_default_values, dict) else {}), + expiration_time=expiration_time, + ), + ) + payload = response.model_dump(mode="json") + payload["event"] = response.event.value + events.append(payload) + + return events + + def _build_node_finished_event( *, workflow_run_id: str, @@ -356,6 +470,8 @@ def _build_pause_event( task_id: str, pause_entity: WorkflowPauseEntity, resumption_context: WorkflowResumptionContext | None, + session_maker: sessionmaker[Session] | None, + human_input_surface: HumanInputSurface | None = None, ) -> dict[str, Any] | None: paused_nodes: list[str] = [] outputs: dict[str, Any] = {} @@ -365,6 +481,36 @@ def _build_pause_event( outputs = dict(WorkflowRuntimeTypeConverter().to_json_encodable(state.outputs or {})) reasons = [reason.model_dump(mode="json") for reason in pause_entity.get_pause_reasons()] + human_input_form_ids = [ + form_id + for reason in reasons + if reason.get("TYPE") == PauseReasonType.HUMAN_INPUT_REQUIRED + for form_id in [reason.get("form_id")] + if isinstance(form_id, str) + ] + form_tokens_by_form_id: dict[str, str] = {} + expiration_times_by_form_id: dict[str, int] = {} + if human_input_form_ids and session_maker is not None: + with session_maker() as session: + form_tokens_by_form_id = load_form_tokens_by_form_id( + human_input_form_ids, + session=session, + surface=human_input_surface, + ) + stmt = select(HumanInputForm.id, HumanInputForm.expiration_time).where( + HumanInputForm.id.in_(human_input_form_ids) + ) + for row in session.execute(stmt): + form_id, expiration_time, *_rest = row + expiration_times_by_form_id[str(form_id)] = int(expiration_time.timestamp()) + # Reconnect paths must preserve the same pause-reason contract as live streams; + # otherwise clients see schema drift after resume. + reasons = enrich_human_input_pause_reasons( + reasons, + form_tokens_by_form_id=form_tokens_by_form_id, + expiration_times_by_form_id=expiration_times_by_form_id, + ) + response = WorkflowPauseStreamResponse( task_id=task_id, workflow_run_id=workflow_run_id, @@ -449,12 +595,19 @@ def _parse_event_message(message: bytes) -> Mapping[str, Any] | None: return event -def _is_terminal_event(event: Mapping[str, Any] | str, include_paused=False) -> bool: +def _is_terminal_event( + event: Mapping[str, Any] | str, + close_on_pause: bool = True, + *, + include_paused: bool | None = None, +) -> bool: + if include_paused is not None: + close_on_pause = include_paused if not isinstance(event, Mapping): return False event_type = event.get("event") if event_type == StreamEvent.WORKFLOW_FINISHED.value: return True - if include_paused: + if close_on_pause: return event_type == StreamEvent.WORKFLOW_PAUSED.value return False diff --git a/api/tasks/app_generate/workflow_execute_task.py b/api/tasks/app_generate/workflow_execute_task.py index c22e7e9918..5ceeb302c8 100644 --- a/api/tasks/app_generate/workflow_execute_task.py +++ b/api/tasks/app_generate/workflow_execute_task.py @@ -399,6 +399,8 @@ def _resume_advanced_chat( workflow_run_id: str, workflow_run: WorkflowRun, ) -> None: + resumed_generate_entity = generate_entity.model_copy(update={"stream": True}) + try: triggered_from = WorkflowRunTriggeredFrom(workflow_run.triggered_from) except ValueError: @@ -426,7 +428,7 @@ def _resume_advanced_chat( user=user, conversation=conversation, message=message, - application_generate_entity=generate_entity, + application_generate_entity=resumed_generate_entity, workflow_execution_repository=workflow_execution_repository, workflow_node_execution_repository=workflow_node_execution_repository, graph_runtime_state=graph_runtime_state, @@ -436,9 +438,8 @@ def _resume_advanced_chat( logger.exception("Failed to resume chatflow execution for workflow run %s", workflow_run_id) raise - if generate_entity.stream: - assert isinstance(response, Generator) - _publish_streaming_response(response, workflow_run_id, AppMode.ADVANCED_CHAT) + assert isinstance(response, Generator) + _publish_streaming_response(response, workflow_run_id, AppMode.ADVANCED_CHAT) def _resume_workflow( @@ -455,6 +456,8 @@ def _resume_workflow( workflow_run_repo, pause_entity, ) -> None: + resumed_generate_entity = generate_entity.model_copy(update={"stream": True}) + try: triggered_from = WorkflowRunTriggeredFrom(workflow_run.triggered_from) except ValueError: @@ -480,7 +483,7 @@ def _resume_workflow( app_model=app_model, workflow=workflow, user=user, - application_generate_entity=generate_entity, + application_generate_entity=resumed_generate_entity, graph_runtime_state=graph_runtime_state, workflow_execution_repository=workflow_execution_repository, workflow_node_execution_repository=workflow_node_execution_repository, @@ -490,11 +493,18 @@ def _resume_workflow( logger.exception("Failed to resume workflow execution for workflow run %s", workflow_run_id) raise - if generate_entity.stream: - assert isinstance(response, Generator) - _publish_streaming_response(response, workflow_run_id, AppMode.WORKFLOW) + assert isinstance(response, Generator) + _publish_streaming_response(response, workflow_run_id, AppMode.WORKFLOW) - workflow_run_repo.delete_workflow_pause(pause_entity) + try: + workflow_run_repo.delete_workflow_pause(pause_entity) + except Exception as exc: + if exc.__class__.__name__ != "_WorkflowRunError" or "WorkflowPause not found" not in str(exc): + raise + logger.info( + "Skipped deleting workflow pause %s after resume because it was already replaced or removed", + pause_entity.id, + ) @shared_task(queue=WORKFLOW_BASED_APP_EXECUTION_QUEUE, name="resume_app_execution") diff --git a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py index d10e5ed13c..3b5e822b90 100644 --- a/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py +++ b/api/tests/integration_tests/controllers/console/app/test_chat_message_permissions.py @@ -171,35 +171,13 @@ class TestChatMessageApiPermissions: parent_message_id=None, ) - class MockQuery: - def __init__(self, model): - self.model = model - - def where(self, *args, **kwargs): - return self - - def first(self): - if getattr(self.model, "__name__", "") == "Conversation": - return mock_conversation - return None - - def order_by(self, *args, **kwargs): - return self - - def limit(self, *_): - return self - - def all(self): - if getattr(self.model, "__name__", "") == "Message": - return [mock_message] - return [] - mock_session = mock.Mock() - mock_session.query.side_effect = MockQuery - mock_session.scalar.return_value = False + mock_session.scalar.return_value = mock_conversation + mock_session.scalars.return_value.all.return_value = [mock_message] monkeypatch.setattr(message_api, "db", SimpleNamespace(session=mock_session)) monkeypatch.setattr(message_api, "current_user", mock_account) + monkeypatch.setattr(message_api, "attach_message_extra_contents", mock.Mock()) class DummyPagination: def __init__(self, data, limit, has_more): diff --git a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py index f14b2c0ae5..635cfee2da 100644 --- a/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py +++ b/api/tests/test_containers_integration_tests/controllers/web/test_web_forgot_password.py @@ -24,7 +24,6 @@ def _patch_wraps(): patch("controllers.console.wraps.dify_config", dify_settings), patch("controllers.console.wraps.FeatureService.get_system_features", return_value=wraps_features), ): - mock_db.session.query.return_value.first.return_value = MagicMock() yield diff --git a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py index aebe87839c..d9828e19c5 100644 --- a/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py +++ b/api/tests/test_containers_integration_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py @@ -2,6 +2,7 @@ from __future__ import annotations +import secrets from dataclasses import dataclass, field from datetime import datetime, timedelta from unittest.mock import Mock @@ -11,6 +12,7 @@ import pytest from sqlalchemy import Engine, delete, select from sqlalchemy.orm import Session, sessionmaker +from core.workflow.human_input_adapter import DeliveryMethodType from extensions.ext_storage import storage from graphon.entities import WorkflowExecution from graphon.entities.pause_reason import HumanInputRequired, PauseReasonType @@ -20,9 +22,11 @@ from graphon.nodes.human_input.enums import FormInputType, HumanInputFormStatus from libs.datetime_utils import naive_utc_now from models.enums import CreatorUserRole, WorkflowRunTriggeredFrom from models.human_input import ( + BackstageRecipientPayload, HumanInputDelivery, HumanInputForm, HumanInputFormRecipient, + RecipientType, ) from models.workflow import WorkflowAppLog, WorkflowAppLogCreatedFrom, WorkflowPause, WorkflowPauseReason, WorkflowRun from repositories.entities.workflow_pause import WorkflowPauseEntity @@ -628,12 +632,12 @@ class TestPrivateWorkflowPauseEntity: class TestBuildHumanInputRequiredReason: """Integration tests for _build_human_input_required_reason using real DB models.""" - def test_builds_reason_from_form_definition( + def test_prefers_standalone_web_app_token_when_available( self, db_session_with_containers: Session, test_scope: _TestScope, ) -> None: - """Build the graph pause reason from the stored form definition.""" + """Use the public standalone web-app token for service API payloads.""" expiration_time = naive_utc_now() form_definition = FormDefinition( @@ -660,6 +664,40 @@ class TestBuildHumanInputRequiredReason: db_session_with_containers.add(form_model) db_session_with_containers.flush() + delivery = HumanInputDelivery( + form_id=form_model.id, + delivery_method_type=DeliveryMethodType.WEBAPP, + channel_payload="{}", + ) + db_session_with_containers.add(delivery) + db_session_with_containers.flush() + + backstage_access_token = secrets.token_urlsafe(8) + backstage_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.BACKSTAGE, + recipient_payload=BackstageRecipientPayload().model_dump_json(), + access_token=backstage_access_token, + ) + console_access_token = secrets.token_urlsafe(8) + console_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.CONSOLE, + recipient_payload="{}", + access_token=console_access_token, + ) + web_app_access_token = secrets.token_urlsafe(8) + web_app_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.STANDALONE_WEB_APP, + recipient_payload="{}", + access_token=web_app_access_token, + ) + db_session_with_containers.add_all([backstage_recipient, console_recipient, web_app_recipient]) + db_session_with_containers.flush() # Create a pause so the reason has a valid pause_id workflow_run = _create_workflow_run( db_session_with_containers, @@ -688,8 +726,15 @@ class TestBuildHumanInputRequiredReason: # Refresh to ensure we have DB-round-tripped objects db_session_with_containers.refresh(form_model) db_session_with_containers.refresh(reason_model) + db_session_with_containers.refresh(backstage_recipient) + db_session_with_containers.refresh(console_recipient) + db_session_with_containers.refresh(web_app_recipient) - reason = _build_human_input_required_reason(reason_model, form_model) + reason = _build_human_input_required_reason( + reason_model, + form_model, + [backstage_recipient, console_recipient, web_app_recipient], + ) assert isinstance(reason, HumanInputRequired) assert reason.node_title == "Ask Name" @@ -697,3 +742,92 @@ class TestBuildHumanInputRequiredReason: assert reason.inputs[0].output_variable_name == "name" assert reason.actions[0].id == "approve" assert reason.resolved_default_values == {"name": "Alice"} + assert not hasattr(reason, "form_token") + + def test_falls_back_to_console_token_when_web_app_token_missing( + self, + db_session_with_containers: Session, + test_scope: _TestScope, + ) -> None: + """Use the console token only when no standalone web-app token exists.""" + + expiration_time = naive_utc_now() + form_definition = FormDefinition( + form_content="content", + inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], + user_actions=[UserAction(id="approve", title="Approve")], + rendered_content="rendered", + expiration_time=expiration_time, + default_values={"name": "Alice"}, + node_title="Ask Name", + display_in_ui=True, + ) + + form_model = HumanInputForm( + tenant_id=test_scope.tenant_id, + app_id=test_scope.app_id, + workflow_run_id=str(uuid4()), + node_id="node-1", + form_definition=form_definition.model_dump_json(), + rendered_content="rendered", + status=HumanInputFormStatus.WAITING, + expiration_time=expiration_time, + ) + db_session_with_containers.add(form_model) + db_session_with_containers.flush() + + delivery = HumanInputDelivery( + form_id=form_model.id, + delivery_method_type=DeliveryMethodType.WEBAPP, + channel_payload="{}", + ) + db_session_with_containers.add(delivery) + db_session_with_containers.flush() + + backstage_access_token = secrets.token_urlsafe(8) + backstage_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.BACKSTAGE, + recipient_payload=BackstageRecipientPayload().model_dump_json(), + access_token=backstage_access_token, + ) + console_access_token = secrets.token_urlsafe(8) + console_recipient = HumanInputFormRecipient( + form_id=form_model.id, + delivery_id=delivery.id, + recipient_type=RecipientType.CONSOLE, + recipient_payload="{}", + access_token=console_access_token, + ) + db_session_with_containers.add_all([backstage_recipient, console_recipient]) + db_session_with_containers.flush() + + workflow_run = _create_workflow_run( + db_session_with_containers, + test_scope, + status=WorkflowExecutionStatus.RUNNING, + ) + pause = WorkflowPause( + workflow_id=test_scope.workflow_id, + workflow_run_id=workflow_run.id, + state_object_key=f"workflow-state-{uuid4()}.json", + ) + db_session_with_containers.add(pause) + db_session_with_containers.flush() + test_scope.state_keys.add(pause.state_object_key) + + reason_model = WorkflowPauseReason( + pause_id=pause.id, + type_=PauseReasonType.HUMAN_INPUT_REQUIRED, + form_id=form_model.id, + node_id="node-1", + message="", + ) + db_session_with_containers.add(reason_model) + db_session_with_containers.commit() + + reason = _build_human_input_required_reason(reason_model, form_model, [backstage_recipient, console_recipient]) + + assert isinstance(reason, HumanInputRequired) + assert not hasattr(reason, "form_token") diff --git a/api/tests/test_containers_integration_tests/services/test_feedback_service.py b/api/tests/test_containers_integration_tests/services/test_feedback_service.py index d82933ccb9..3dcd6586e2 100644 --- a/api/tests/test_containers_integration_tests/services/test_feedback_service.py +++ b/api/tests/test_containers_integration_tests/services/test_feedback_service.py @@ -13,6 +13,12 @@ from models.model import App, Conversation, Message from services.feedback_service import FeedbackService +def _execute_result(rows): + result = mock.Mock() + result.all.return_value = rows + return result + + class TestFeedbackService: """Test FeedbackService methods.""" @@ -81,25 +87,17 @@ class TestFeedbackService: def test_export_feedbacks_csv_format(self, mock_db_session, sample_data): """Test exporting feedback data in CSV format.""" - - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["user_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["user_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["user_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["user_feedback"].from_account, + ) + ] + ) # Test CSV export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="csv") @@ -120,25 +118,17 @@ class TestFeedbackService: def test_export_feedbacks_json_format(self, mock_db_session, sample_data): """Test exporting feedback data in JSON format.""" - - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["admin_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["admin_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["admin_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["admin_feedback"].from_account, + ) + ] + ) # Test JSON export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="json") @@ -157,25 +147,17 @@ class TestFeedbackService: def test_export_feedbacks_with_filters(self, mock_db_session, sample_data): """Test exporting feedback with various filters.""" - - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["admin_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["admin_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["admin_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["admin_feedback"].from_account, + ) + ] + ) # Test with filters result = FeedbackService.export_feedbacks( @@ -193,17 +175,7 @@ class TestFeedbackService: def test_export_feedbacks_no_data(self, mock_db_session, sample_data): """Test exporting feedback when no data exists.""" - - # Setup mock query result with no data - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result([]) result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="csv") @@ -251,24 +223,17 @@ class TestFeedbackService: created_at=datetime(2024, 1, 1, 10, 0, 0), ) - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["user_feedback"], - long_message, - sample_data["conversation"], - sample_data["app"], - sample_data["user_feedback"].from_account, - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["user_feedback"], + long_message, + sample_data["conversation"], + sample_data["app"], + sample_data["user_feedback"].from_account, + ) + ] + ) # Test export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="json") @@ -309,24 +274,17 @@ class TestFeedbackService: created_at=datetime(2024, 1, 1, 10, 0, 0), ) - # Setup mock query result - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - chinese_feedback, - chinese_message, - sample_data["conversation"], - sample_data["app"], - None, # No account for user feedback - ) - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + chinese_feedback, + chinese_message, + sample_data["conversation"], + sample_data["app"], + None, + ) + ] + ) # Test export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="csv") @@ -339,32 +297,24 @@ class TestFeedbackService: def test_export_feedbacks_emoji_ratings(self, mock_db_session, sample_data): """Test that rating emojis are properly formatted in export.""" - - # Setup mock query result with both like and dislike feedback - mock_query = mock.Mock() - mock_query.join.return_value = mock_query - mock_query.outerjoin.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.filter.return_value = mock_query - mock_query.order_by.return_value = mock_query - mock_query.all.return_value = [ - ( - sample_data["user_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["user_feedback"].from_account, - ), - ( - sample_data["admin_feedback"], - sample_data["message"], - sample_data["conversation"], - sample_data["app"], - sample_data["admin_feedback"].from_account, - ), - ] - - mock_db_session.execute.return_value = mock_query + mock_db_session.execute.return_value = _execute_result( + [ + ( + sample_data["user_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["user_feedback"].from_account, + ), + ( + sample_data["admin_feedback"], + sample_data["message"], + sample_data["conversation"], + sample_data["app"], + sample_data["admin_feedback"].from_account, + ), + ] + ) # Test export result = FeedbackService.export_feedbacks(app_id=sample_data["app"].id, format_type="json") diff --git a/api/tests/unit_tests/commands/test_generate_swagger_specs.py b/api/tests/unit_tests/commands/test_generate_swagger_specs.py new file mode 100644 index 0000000000..e77e875081 --- /dev/null +++ b/api/tests/unit_tests/commands/test_generate_swagger_specs.py @@ -0,0 +1,37 @@ +"""Unit tests for the standalone Swagger export helper.""" + +import importlib.util +import json +import sys +from pathlib import Path + + +def _load_generate_swagger_specs_module(): + api_dir = Path(__file__).resolve().parents[3] + script_path = api_dir / "dev" / "generate_swagger_specs.py" + + spec = importlib.util.spec_from_file_location("generate_swagger_specs", script_path) + assert spec + assert spec.loader + + module = importlib.util.module_from_spec(spec) + sys.modules[spec.name] = module + spec.loader.exec_module(module) # type: ignore[attr-defined] + return module + + +def test_generate_specs_writes_console_web_and_service_swagger_files(tmp_path): + module = _load_generate_swagger_specs_module() + + written_paths = module.generate_specs(tmp_path) + + assert [path.name for path in written_paths] == [ + "console-swagger.json", + "web-swagger.json", + "service-swagger.json", + ] + + for path in written_paths: + payload = json.loads(path.read_text(encoding="utf-8")) + assert payload["swagger"] == "2.0" + assert "paths" in payload diff --git a/api/tests/unit_tests/conftest.py b/api/tests/unit_tests/conftest.py index 55873b06a8..7174530e97 100644 --- a/api/tests/unit_tests/conftest.py +++ b/api/tests/unit_tests/conftest.py @@ -121,33 +121,32 @@ def _configure_session_factory(_unit_test_engine): configure_session_factory(_unit_test_engine, expire_on_commit=False) -def setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account): +def setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_owner): """ - Helper to set up the mock DB execute chain for tenant/account authentication. + Helper to stub the tenant-owner execute result for service API app authentication. - This configures the mock to return (tenant, account) for the - db.session.execute(select(...).join().join().where()).one_or_none() - query used by validate_app_token decorator. + The validate_app_token decorator currently resolves the active tenant owner + via db.session.execute(select(Tenant, Account)...).one_or_none(). Args: mock_db: The mocked db object mock_tenant: Mock tenant object to return - mock_account: Mock account object to return + mock_owner: Mock owner object to return from the execute result """ - mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_account) + mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_owner) -def setup_mock_dataset_tenant_query(mock_db, mock_tenant, mock_ta): +def setup_mock_dataset_owner_execute_result(mock_db, mock_tenant, mock_tenant_account_join): """ - Helper to set up the mock DB execute chain for dataset tenant authentication. + Helper to stub the tenant-owner execute result for dataset token authentication. - This configures the mock to return (tenant, tenant_account) for the - db.session.execute(select(...).where().where().where().where()).one_or_none() - query used by validate_dataset_token decorator. + The validate_dataset_token decorator currently resolves the owner mapping via + db.session.execute(select(Tenant, TenantAccountJoin)...).one_or_none(), and + then loads the Account separately via db.session.get(...). Args: mock_db: The mocked db object mock_tenant: Mock tenant object to return - mock_ta: Mock tenant account object to return + mock_tenant_account_join: Mock tenant-account join object to return """ - mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_ta) + mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_tenant_account_join) diff --git a/api/tests/unit_tests/controllers/console/app/test_annotation_security.py b/api/tests/unit_tests/controllers/console/app/test_annotation_security.py index 9f1ff9b40f..bfa4048191 100644 --- a/api/tests/unit_tests/controllers/console/app/test_annotation_security.py +++ b/api/tests/unit_tests/controllers/console/app/test_annotation_security.py @@ -208,8 +208,6 @@ class TestAnnotationImportServiceValidation: file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with patch("services.annotation_service.current_account_with_tenant") as mock_auth: mock_auth.return_value = (MagicMock(id="user_id"), "tenant_id") @@ -230,8 +228,6 @@ class TestAnnotationImportServiceValidation: file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with patch("services.annotation_service.current_account_with_tenant") as mock_auth: mock_auth.return_value = (MagicMock(id="user_id"), "tenant_id") @@ -248,8 +244,6 @@ class TestAnnotationImportServiceValidation: csv_content = 'invalid,csv,format\nwith,unbalanced,quotes,and"stuff' file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with ( patch("services.annotation_service.current_account_with_tenant") as mock_auth, patch("services.annotation_service.pd.read_csv", side_effect=ParserError("malformed CSV")), @@ -269,8 +263,6 @@ class TestAnnotationImportServiceValidation: file = FileStorage(stream=io.BytesIO(csv_content.encode()), filename="test.csv", content_type="text/csv") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_app - with patch("services.annotation_service.current_account_with_tenant") as mock_auth: mock_auth.return_value = (MagicMock(id="user_id"), "tenant_id") diff --git a/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py b/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py index cb4fe40944..17bee94c52 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py +++ b/api/tests/unit_tests/controllers/console/auth/test_authentication_security.py @@ -43,7 +43,6 @@ class TestAuthenticationSecurity: mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = services.errors.account.AccountPasswordError("Invalid email or password.") - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists mock_features.return_value.is_allow_register = True # Act @@ -76,7 +75,6 @@ class TestAuthenticationSecurity: mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = services.errors.account.AccountPasswordError("Wrong password") - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists # Act with self.app.test_request_context( @@ -109,7 +107,6 @@ class TestAuthenticationSecurity: mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = services.errors.account.AccountPasswordError("Invalid email or password.") - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists mock_features.return_value.is_allow_register = False # Act @@ -135,7 +132,6 @@ class TestAuthenticationSecurity: def test_reset_password_with_existing_account(self, mock_send_email, mock_get_user, mock_features, mock_db): """Test that reset password returns success with token for existing accounts.""" # Mock the setup check - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists # Test with existing account mock_get_user.return_value = MagicMock(email="existing@example.com") diff --git a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py index 9929a71120..b7bc73da5f 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_email_verification.py +++ b/api/tests/unit_tests/controllers/console/auth/test_email_verification.py @@ -65,7 +65,6 @@ class TestEmailCodeLoginSendEmailApi: - IP rate limiting is checked """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = mock_account mock_send_email.return_value = "email_token_123" @@ -98,7 +97,6 @@ class TestEmailCodeLoginSendEmailApi: - Registration is allowed by system features """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = None mock_get_features.return_value.is_allow_register = True @@ -130,7 +128,6 @@ class TestEmailCodeLoginSendEmailApi: - Registration is blocked by system features """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = None mock_get_features.return_value.is_allow_register = False @@ -152,7 +149,6 @@ class TestEmailCodeLoginSendEmailApi: - Prevents spam and abuse """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = True # Act & Assert @@ -172,7 +168,6 @@ class TestEmailCodeLoginSendEmailApi: - AccountInFreezeError is raised for frozen accounts """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.side_effect = AccountRegisterError("Account frozen") @@ -213,7 +208,6 @@ class TestEmailCodeLoginSendEmailApi: - Defaults to en-US when not specified """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_ip_limit.return_value = False mock_get_user.return_value = mock_account mock_send_email.return_value = "token" @@ -286,7 +280,6 @@ class TestEmailCodeLoginApi: - User is logged in with token pair """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [MagicMock()] @@ -335,7 +328,6 @@ class TestEmailCodeLoginApi: - User is logged in after account creation """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "newuser@example.com", "code": "123456"} mock_get_user.return_value = None mock_create_account.return_value = mock_account @@ -369,7 +361,6 @@ class TestEmailCodeLoginApi: - InvalidTokenError is raised for invalid/expired tokens """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = None # Act & Assert @@ -392,7 +383,6 @@ class TestEmailCodeLoginApi: - InvalidEmailError is raised when email doesn't match token """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "original@example.com", "code": "123456"} # Act & Assert @@ -415,7 +405,6 @@ class TestEmailCodeLoginApi: - EmailCodeError is raised for wrong verification code """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} # Act & Assert @@ -453,7 +442,6 @@ class TestEmailCodeLoginApi: - User is added as owner of new workspace """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [] @@ -496,7 +484,6 @@ class TestEmailCodeLoginApi: - WorkspacesLimitExceeded is raised when limit reached """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [] @@ -538,7 +525,6 @@ class TestEmailCodeLoginApi: - NotAllowedCreateWorkspace is raised when creation disabled """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_data.return_value = {"email": "test@example.com", "code": "123456"} mock_get_user.return_value = mock_account mock_get_tenants.return_value = [] diff --git a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py index 0cf97da878..d089be8905 100644 --- a/api/tests/unit_tests/controllers/console/auth/test_login_logout.py +++ b/api/tests/unit_tests/controllers/console/auth/test_login_logout.py @@ -110,7 +110,6 @@ class TestLoginApi: - Rate limit is reset after successful login """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.return_value = mock_account @@ -162,7 +161,6 @@ class TestLoginApi: - Authentication proceeds with invitation token """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = {"data": {"email": "test@example.com"}} mock_authenticate.return_value = mock_account @@ -199,7 +197,6 @@ class TestLoginApi: - EmailPasswordLoginLimitError is raised when limit exceeded """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = True mock_get_invitation.return_value = None @@ -228,7 +225,6 @@ class TestLoginApi: - AccountInFreezeError is raised for frozen accounts """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_frozen.return_value = True # Act & Assert @@ -268,7 +264,6 @@ class TestLoginApi: - Generic error message prevents user enumeration """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = AccountPasswordError("Invalid password") @@ -305,7 +300,6 @@ class TestLoginApi: - Login is prevented even with valid credentials """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = AccountLoginError("Account is banned") @@ -351,7 +345,6 @@ class TestLoginApi: - User cannot login without an assigned workspace """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.return_value = mock_account @@ -383,7 +376,6 @@ class TestLoginApi: - Security check prevents invitation token abuse """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = {"data": {"email": "invited@example.com"}} @@ -425,7 +417,6 @@ class TestLoginApi: mock_token_pair, ): """Test that login retries with lowercase email when uppercase lookup fails.""" - mock_db.session.query.return_value.first.return_value = MagicMock() mock_is_rate_limit.return_value = False mock_get_invitation.return_value = None mock_authenticate.side_effect = [AccountPasswordError("Invalid"), mock_account] @@ -459,7 +450,6 @@ class TestLoginApi: mock_db, app, ): - mock_db.session.query.return_value.first.return_value = MagicMock() mock_get_token_data.return_value = {"email": "User@Example.com", "code": "123456"} mock_get_account.side_effect = Unauthorized("Account is banned.") @@ -513,7 +503,6 @@ class TestLogoutApi: - Success response is returned """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() mock_current_account.return_value = (mock_account, MagicMock()) # Act @@ -539,7 +528,6 @@ class TestLogoutApi: - Success response is returned """ # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() # Create a mock anonymous user that will pass isinstance check anonymous_user = MagicMock() mock_flask_login.AnonymousUserMixin = type("AnonymousUserMixin", (), {}) diff --git a/api/tests/unit_tests/controllers/console/billing/test_billing.py b/api/tests/unit_tests/controllers/console/billing/test_billing.py index c80758c857..810f1b94fc 100644 --- a/api/tests/unit_tests/controllers/console/billing/test_billing.py +++ b/api/tests/unit_tests/controllers/console/billing/test_billing.py @@ -46,7 +46,6 @@ class TestPartnerTenants: patch("libs.login.dify_config.LOGIN_DISABLED", False), patch("libs.login.check_csrf_token") as mock_csrf, ): - mock_db.session.query.return_value.first.return_value = MagicMock() # Mock setup exists mock_csrf.return_value = None yield {"db": mock_db, "csrf": mock_csrf} diff --git a/api/tests/unit_tests/controllers/console/tag/test_tags.py b/api/tests/unit_tests/controllers/console/tag/test_tags.py index 2be5a21f28..6405558bb4 100644 --- a/api/tests/unit_tests/controllers/console/tag/test_tags.py +++ b/api/tests/unit_tests/controllers/console/tag/test_tags.py @@ -8,8 +8,10 @@ from werkzeug.exceptions import Forbidden import controllers.console.tag.tags as module from controllers.console import console_ns from controllers.console.tag.tags import ( - TagBindingCreateApi, - TagBindingDeleteApi, + DeprecatedTagBindingCreateApi, + DeprecatedTagBindingRemoveApi, + TagBindingCollectionApi, + TagBindingItemApi, TagListApi, TagUpdateDeleteApi, ) @@ -205,9 +207,9 @@ class TestTagUpdateDeleteApi: assert status == 204 -class TestTagBindingCreateApi: +class TestTagBindingCollectionApi: def test_create_success(self, app, admin_user, payload_patch): - api = TagBindingCreateApi() + api = TagBindingCollectionApi() method = unwrap(api.post) payload = { @@ -232,7 +234,7 @@ class TestTagBindingCreateApi: assert result["result"] == "success" def test_create_forbidden(self, app, readonly_user, payload_patch): - api = TagBindingCreateApi() + api = TagBindingCollectionApi() method = unwrap(api.post) with app.test_request_context("/", json={}): @@ -247,9 +249,78 @@ class TestTagBindingCreateApi: method(api) -class TestTagBindingDeleteApi: +class TestDeprecatedTagBindingCreateApi: + def test_create_success(self, app, admin_user, payload_patch): + api = DeprecatedTagBindingCreateApi() + method = unwrap(api.post) + + payload = { + "tag_ids": ["tag-1"], + "target_id": "target-1", + "type": "knowledge", + } + + with app.test_request_context("/", json=payload): + with ( + patch( + "controllers.console.tag.tags.current_account_with_tenant", + return_value=(admin_user, None), + ), + payload_patch(payload), + patch("controllers.console.tag.tags.TagService.save_tag_binding") as save_mock, + ): + result, status = method(api) + + save_mock.assert_called_once() + assert status == 200 + assert result["result"] == "success" + + +class TestTagBindingItemApi: + def test_delete_success(self, app, admin_user, payload_patch): + api = TagBindingItemApi() + method = unwrap(api.delete) + + payload = { + "target_id": "target-1", + "type": "knowledge", + } + + with app.test_request_context("/", json=payload): + with ( + patch( + "controllers.console.tag.tags.current_account_with_tenant", + return_value=(admin_user, None), + ), + payload_patch(payload), + patch("controllers.console.tag.tags.TagService.delete_tag_binding") as delete_mock, + ): + result, status = method(api, "tag-1") + + delete_mock.assert_called_once() + delete_payload = delete_mock.call_args.args[0] + assert delete_payload.tag_id == "tag-1" + assert delete_payload.target_id == "target-1" + assert delete_payload.type == TagType.KNOWLEDGE + assert status == 200 + assert result["result"] == "success" + + def test_delete_forbidden(self, app, readonly_user): + api = TagBindingItemApi() + method = unwrap(api.delete) + + with app.test_request_context("/"): + with patch( + "controllers.console.tag.tags.current_account_with_tenant", + return_value=(readonly_user, None), + ): + with pytest.raises(Forbidden): + method(api, "tag-1") + + +class TestDeprecatedTagBindingRemoveApi: def test_remove_success(self, app, admin_user, payload_patch): - api = TagBindingDeleteApi() + api = DeprecatedTagBindingRemoveApi() method = unwrap(api.post) payload = { @@ -274,7 +345,7 @@ class TestTagBindingDeleteApi: assert result["result"] == "success" def test_remove_forbidden(self, app, readonly_user, payload_patch): - api = TagBindingDeleteApi() + api = DeprecatedTagBindingRemoveApi() method = unwrap(api.post) with app.test_request_context("/", json={}): @@ -297,3 +368,35 @@ class TestTagResponseModel: assert payload["type"] == "knowledge" assert payload["binding_count"] == "1" + + +class TestTagBindingRouteMetadata: + def test_legacy_write_routes_are_marked_deprecated(self): + assert DeprecatedTagBindingCreateApi.post.__apidoc__["deprecated"] is True + assert DeprecatedTagBindingRemoveApi.post.__apidoc__["deprecated"] is True + assert TagBindingCollectionApi.post.__apidoc__.get("deprecated") is not True + assert TagBindingItemApi.delete.__apidoc__.get("deprecated") is not True + + def test_write_routes_have_stable_operation_ids(self): + assert TagBindingCollectionApi.post.__apidoc__["id"] == "create_tag_binding" + assert TagBindingItemApi.delete.__apidoc__["id"] == "delete_tag_binding" + assert DeprecatedTagBindingCreateApi.post.__apidoc__["id"] == "create_tag_binding_deprecated" + assert DeprecatedTagBindingRemoveApi.post.__apidoc__["id"] == "delete_tag_binding_deprecated" + + def test_canonical_and_legacy_write_routes_are_registered(self): + route_map = { + resource.__name__: urls + for resource, urls, _route_doc, _kwargs in console_ns.resources + if resource.__name__ + in { + "TagBindingCollectionApi", + "TagBindingItemApi", + "DeprecatedTagBindingCreateApi", + "DeprecatedTagBindingRemoveApi", + } + } + + assert route_map["TagBindingCollectionApi"] == ("/tag-bindings",) + assert route_map["TagBindingItemApi"] == ("/tag-bindings/",) + assert route_map["DeprecatedTagBindingCreateApi"] == ("/tag-bindings/create",) + assert route_map["DeprecatedTagBindingRemoveApi"] == ("/tag-bindings/remove",) diff --git a/api/tests/unit_tests/controllers/console/test_human_input_form.py b/api/tests/unit_tests/controllers/console/test_human_input_form.py index 232b6eee79..ebf803cac9 100644 --- a/api/tests/unit_tests/controllers/console/test_human_input_form.py +++ b/api/tests/unit_tests/controllers/console/test_human_input_form.py @@ -122,6 +122,35 @@ def test_post_form_invalid_recipient_type(app, monkeypatch: pytest.MonkeyPatch) handler(api, form_token="token") +def test_post_form_rejects_webapp_recipient_type(app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace(tenant_id="tenant-1", recipient_type=RecipientType.STANDALONE_WEB_APP) + + class _ServiceStub: + def __init__(self, *_args, **_kwargs): + pass + + def get_form_by_token(self, _token): + return form + + monkeypatch.setattr("controllers.console.human_input_form.HumanInputService", _ServiceStub) + monkeypatch.setattr( + "controllers.console.human_input_form.current_account_with_tenant", + lambda: (SimpleNamespace(id="user-1"), "tenant-1"), + ) + monkeypatch.setattr("controllers.console.human_input_form.db", SimpleNamespace(engine=object())) + + api = ConsoleHumanInputFormApi() + handler = _unwrap(api.post) + + with app.test_request_context( + "/console/api/form/human_input/token", + method="POST", + json={"inputs": {"content": "ok"}, "action": "approve"}, + ): + with pytest.raises(NotFoundError): + handler(api, form_token="token") + + def test_post_form_success(app, monkeypatch: pytest.MonkeyPatch) -> None: submit_mock = Mock() form = SimpleNamespace(tenant_id="tenant-1", recipient_type=RecipientType.CONSOLE) diff --git a/api/tests/unit_tests/controllers/console/test_workspace_account.py b/api/tests/unit_tests/controllers/console/test_workspace_account.py index 26ff264f18..0b1a32581a 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_account.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_account.py @@ -24,10 +24,6 @@ def app(): return app -def _mock_wraps_db(mock_db): - mock_db.session.query.return_value.first.return_value = MagicMock() - - def _build_account(email: str, account_id: str = "acc", tenant: object | None = None) -> Account: tenant_obj = tenant if tenant is not None else SimpleNamespace(id="tenant-id") account = Account(name=account_id, email=email) @@ -64,7 +60,6 @@ class TestChangeEmailSend: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("current@example.com", "acc1") mock_current_account.return_value = (mock_account, None) @@ -117,7 +112,6 @@ class TestChangeEmailSend: """GHSA-4q3w-q5mc-45rq: a phase-1 token must not unlock the new-email send step.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("current@example.com", "acc1") mock_current_account.return_value = (mock_account, None) @@ -163,7 +157,6 @@ class TestChangeEmailValidity: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_account = _build_account("user@example.com", "acc2") mock_current_account.return_value = (mock_account, None) @@ -223,7 +216,6 @@ class TestChangeEmailValidity: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) mock_is_rate_limit.return_value = False @@ -280,7 +272,6 @@ class TestChangeEmailValidity: """A token whose phase marker is a string but not a known transition must be rejected.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) mock_is_rate_limit.return_value = False @@ -330,7 +321,6 @@ class TestChangeEmailValidity: """A token minted without a phase marker (e.g. a hand-crafted token) must not validate.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) mock_current_account.return_value = (_build_account("old@example.com", "acc"), None) mock_is_rate_limit.return_value = False @@ -378,7 +368,6 @@ class TestChangeEmailReset: mock_db, app, ): - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") mock_current_account.return_value = (current_user, None) @@ -434,7 +423,6 @@ class TestChangeEmailReset: """GHSA-4q3w-q5mc-45rq PoC: phase-1 token must not be usable against /reset.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") mock_current_account.return_value = (current_user, None) @@ -488,7 +476,6 @@ class TestChangeEmailReset: """A verified token for address A must not be replayed to change to address B.""" from controllers.console.auth.error import InvalidTokenError - _mock_wraps_db(mock_db) mock_features.return_value = SimpleNamespace(enable_change_email=True) current_user = _build_account("old@example.com", "acc3") mock_current_account.return_value = (current_user, None) @@ -561,7 +548,6 @@ class TestAccountDeletionFeedback: @patch("controllers.console.wraps.db") @patch("controllers.console.workspace.account.BillingService.update_account_deletion_feedback") def test_should_normalize_feedback_email(self, mock_update, mock_db, app): - _mock_wraps_db(mock_db) with app.test_request_context( "/account/delete/feedback", method="POST", @@ -578,7 +564,6 @@ class TestCheckEmailUnique: @patch("controllers.console.workspace.account.AccountService.check_email_unique") @patch("controllers.console.workspace.account.AccountService.is_account_in_freeze") def test_should_normalize_email(self, mock_is_freeze, mock_check_unique, mock_db, app): - _mock_wraps_db(mock_db) mock_is_freeze.return_value = False mock_check_unique.return_value = True diff --git a/api/tests/unit_tests/controllers/console/test_workspace_members.py b/api/tests/unit_tests/controllers/console/test_workspace_members.py index 239fec8430..811bf5b1e7 100644 --- a/api/tests/unit_tests/controllers/console/test_workspace_members.py +++ b/api/tests/unit_tests/controllers/console/test_workspace_members.py @@ -1,5 +1,5 @@ from types import SimpleNamespace -from unittest.mock import MagicMock, patch +from unittest.mock import patch import pytest from flask import Flask, g @@ -16,10 +16,6 @@ def app(): return flask_app -def _mock_wraps_db(mock_db): - mock_db.session.query.return_value.first.return_value = MagicMock() - - def _build_feature_flags(): placeholder_quota = SimpleNamespace(limit=0, size=0) workspace_members = SimpleNamespace(is_available=lambda count: True) @@ -49,7 +45,6 @@ class TestMemberInviteEmailApi: mock_get_features, app, ): - _mock_wraps_db(mock_db) mock_get_features.return_value = _build_feature_flags() mock_invite_member.return_value = "token-abc" diff --git a/api/tests/unit_tests/controllers/console/test_wraps.py b/api/tests/unit_tests/controllers/console/test_wraps.py index f6e096a97b..aa4973851a 100644 --- a/api/tests/unit_tests/controllers/console/test_wraps.py +++ b/api/tests/unit_tests/controllers/console/test_wraps.py @@ -310,7 +310,6 @@ class TestSystemSetup: def test_should_allow_when_setup_complete(self, mock_db): """Test that requests are allowed when setup is complete""" # Arrange - mock_db.session.query.return_value.first.return_value = MagicMock() # Setup exists @setup_required def admin_view(): diff --git a/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py b/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py index 51f76af172..0b3d7ef6d7 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_endpoint.py @@ -2,14 +2,17 @@ from unittest.mock import MagicMock, patch import pytest +from controllers.console import console_ns from controllers.console.workspace.endpoint import ( - EndpointCreateApi, - EndpointDeleteApi, + DeprecatedEndpointCreateApi, + DeprecatedEndpointDeleteApi, + DeprecatedEndpointUpdateApi, + EndpointCollectionApi, EndpointDisableApi, EndpointEnableApi, + EndpointItemApi, EndpointListApi, EndpointListForSinglePluginApi, - EndpointUpdateApi, ) from core.plugin.impl.exc import PluginPermissionDeniedError @@ -35,9 +38,9 @@ def patch_current_account(user_and_tenant): @pytest.mark.usefixtures("patch_current_account") -class TestEndpointCreateApi: +class TestEndpointCollectionApi: def test_create_success(self, app): - api = EndpointCreateApi() + api = EndpointCollectionApi() method = unwrap(api.post) payload = { @@ -55,7 +58,7 @@ class TestEndpointCreateApi: assert result["success"] is True def test_create_permission_denied(self, app): - api = EndpointCreateApi() + api = EndpointCollectionApi() method = unwrap(api.post) payload = { @@ -75,7 +78,7 @@ class TestEndpointCreateApi: method(api) def test_create_validation_error(self, app): - api = EndpointCreateApi() + api = EndpointCollectionApi() method = unwrap(api.post) payload = { @@ -91,6 +94,27 @@ class TestEndpointCreateApi: method(api) +@pytest.mark.usefixtures("patch_current_account") +class TestDeprecatedEndpointCreateApi: + def test_create_success(self, app): + api = DeprecatedEndpointCreateApi() + method = unwrap(api.post) + + payload = { + "plugin_unique_identifier": "plugin-1", + "name": "endpoint", + "settings": {"a": 1}, + } + + with ( + app.test_request_context("/", json=payload), + patch("controllers.console.workspace.endpoint.EndpointService.create_endpoint", return_value=True), + ): + result = method(api) + + assert result["success"] is True + + @pytest.mark.usefixtures("patch_current_account") class TestEndpointListApi: def test_list_success(self, app): @@ -146,9 +170,96 @@ class TestEndpointListForSinglePluginApi: @pytest.mark.usefixtures("patch_current_account") -class TestEndpointDeleteApi: +class TestEndpointItemApi: def test_delete_success(self, app): - api = EndpointDeleteApi() + api = EndpointItemApi() + method = unwrap(api.delete) + + with ( + app.test_request_context("/", method="DELETE"), + patch( + "controllers.console.workspace.endpoint.EndpointService.delete_endpoint", + return_value=True, + ) as mock_delete, + ): + result = method(api, "e1") + + assert result["success"] is True + mock_delete.assert_called_once_with(tenant_id="t1", user_id="u1", endpoint_id="e1") + + def test_delete_service_failure(self, app): + api = EndpointItemApi() + method = unwrap(api.delete) + + with ( + app.test_request_context("/", method="DELETE"), + patch("controllers.console.workspace.endpoint.EndpointService.delete_endpoint", return_value=False), + ): + result = method(api, "e1") + + assert result["success"] is False + + def test_update_success(self, app): + api = EndpointItemApi() + method = unwrap(api.patch) + + payload = { + "name": "new-name", + "settings": {"x": 1}, + } + + with ( + app.test_request_context("/", method="PATCH", json=payload), + patch( + "controllers.console.workspace.endpoint.EndpointService.update_endpoint", + return_value=True, + ) as mock_update, + ): + result = method(api, "e1") + + assert result["success"] is True + mock_update.assert_called_once_with( + tenant_id="t1", + user_id="u1", + endpoint_id="e1", + name="new-name", + settings={"x": 1}, + ) + + def test_update_validation_error(self, app): + api = EndpointItemApi() + method = unwrap(api.patch) + + payload = {"settings": {}} + + with ( + app.test_request_context("/", method="PATCH", json=payload), + ): + with pytest.raises(ValueError): + method(api, "e1") + + def test_update_service_failure(self, app): + api = EndpointItemApi() + method = unwrap(api.patch) + + payload = { + "name": "n", + "settings": {}, + } + + with ( + app.test_request_context("/", method="PATCH", json=payload), + patch("controllers.console.workspace.endpoint.EndpointService.update_endpoint", return_value=False), + ): + result = method(api, "e1") + + assert result["success"] is False + + +@pytest.mark.usefixtures("patch_current_account") +class TestDeprecatedEndpointDeleteApi: + def test_delete_success(self, app): + api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) payload = {"endpoint_id": "e1"} @@ -162,7 +273,7 @@ class TestEndpointDeleteApi: assert result["success"] is True def test_delete_invalid_payload(self, app): - api = EndpointDeleteApi() + api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) with ( @@ -172,7 +283,7 @@ class TestEndpointDeleteApi: method(api) def test_delete_service_failure(self, app): - api = EndpointDeleteApi() + api = DeprecatedEndpointDeleteApi() method = unwrap(api.post) payload = {"endpoint_id": "e1"} @@ -187,9 +298,9 @@ class TestEndpointDeleteApi: @pytest.mark.usefixtures("patch_current_account") -class TestEndpointUpdateApi: +class TestDeprecatedEndpointUpdateApi: def test_update_success(self, app): - api = EndpointUpdateApi() + api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) payload = { @@ -207,7 +318,7 @@ class TestEndpointUpdateApi: assert result["success"] is True def test_update_validation_error(self, app): - api = EndpointUpdateApi() + api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) payload = {"endpoint_id": "e1", "settings": {}} @@ -219,7 +330,7 @@ class TestEndpointUpdateApi: method(api) def test_update_service_failure(self, app): - api = EndpointUpdateApi() + api = DeprecatedEndpointUpdateApi() method = unwrap(api.post) payload = { @@ -237,6 +348,36 @@ class TestEndpointUpdateApi: assert result["success"] is False +class TestEndpointRouteMetadata: + def test_legacy_write_routes_are_marked_deprecated(self): + assert DeprecatedEndpointCreateApi.post.__apidoc__["deprecated"] is True + assert DeprecatedEndpointDeleteApi.post.__apidoc__["deprecated"] is True + assert DeprecatedEndpointUpdateApi.post.__apidoc__["deprecated"] is True + assert EndpointCollectionApi.post.__apidoc__.get("deprecated") is not True + assert EndpointItemApi.delete.__apidoc__.get("deprecated") is not True + assert EndpointItemApi.patch.__apidoc__.get("deprecated") is not True + + def test_canonical_and_legacy_write_routes_are_registered(self): + route_map = { + resource.__name__: urls + for resource, urls, _route_doc, _kwargs in console_ns.resources + if resource.__name__ + in { + "EndpointCollectionApi", + "EndpointItemApi", + "DeprecatedEndpointCreateApi", + "DeprecatedEndpointDeleteApi", + "DeprecatedEndpointUpdateApi", + } + } + + assert route_map["EndpointCollectionApi"] == ("/workspaces/current/endpoints",) + assert route_map["EndpointItemApi"] == ("/workspaces/current/endpoints/",) + assert route_map["DeprecatedEndpointCreateApi"] == ("/workspaces/current/endpoints/create",) + assert route_map["DeprecatedEndpointDeleteApi"] == ("/workspaces/current/endpoints/delete",) + assert route_map["DeprecatedEndpointUpdateApi"] == ("/workspaces/current/endpoints/update",) + + @pytest.mark.usefixtures("patch_current_account") class TestEndpointEnableApi: def test_enable_success(self, app): diff --git a/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py b/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py index 44feacf2ad..1422f29849 100644 --- a/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py +++ b/api/tests/unit_tests/controllers/console/workspace/test_tool_providers.py @@ -22,7 +22,7 @@ _WRAPS_MODULE: ModuleType | None = None @contextmanager def _mock_db(): - mock_session = SimpleNamespace(query=lambda *args, **kwargs: SimpleNamespace(first=lambda: True)) + mock_session = SimpleNamespace(scalar=lambda *args, **kwargs: True) with patch("extensions.ext_database.db.session", mock_session): yield diff --git a/api/tests/unit_tests/controllers/service_api/app/test_app.py b/api/tests/unit_tests/controllers/service_api/app/test_app.py index f48ace427d..f5d93b5ac3 100644 --- a/api/tests/unit_tests/controllers/service_api/app/test_app.py +++ b/api/tests/unit_tests/controllers/service_api/app/test_app.py @@ -12,7 +12,7 @@ from controllers.service_api.app.app import AppInfoApi, AppMetaApi, AppParameter from controllers.service_api.app.error import AppUnavailableError from models.account import TenantStatus from models.model import App, AppMode -from tests.unit_tests.conftest import setup_mock_tenant_account_query +from tests.unit_tests.conftest import setup_mock_tenant_owner_execute_result class TestAppParameterApi: @@ -74,7 +74,7 @@ class TestAppParameterApi: # Mock tenant owner info for login mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -120,7 +120,7 @@ class TestAppParameterApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -161,7 +161,7 @@ class TestAppParameterApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act & Assert with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -200,7 +200,7 @@ class TestAppParameterApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act & Assert with app.test_request_context("/parameters", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -263,7 +263,7 @@ class TestAppMetaApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/meta", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -331,7 +331,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -388,7 +388,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -434,7 +434,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): @@ -486,7 +486,7 @@ class TestAppInfoApi: mock_account = Mock() mock_account.current_tenant = mock_tenant - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_account) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) # Act with app.test_request_context("/info", method="GET", headers={"Authorization": "Bearer test_token"}): diff --git a/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py new file mode 100644 index 0000000000..846d5368f3 --- /dev/null +++ b/api/tests/unit_tests/controllers/service_api/app/test_hitl_service_api.py @@ -0,0 +1,707 @@ +"""Dedicated tests for HITL behavior exposed through the Service API.""" + +from __future__ import annotations + +import json +import sys +from collections.abc import Sequence +from dataclasses import dataclass +from datetime import UTC, datetime +from types import SimpleNamespace +from unittest.mock import ANY, MagicMock, Mock + +import pytest + +import services.app_generate_service as ags_module +from controllers.service_api.app.workflow_events import WorkflowEventsApi +from core.app.app_config.entities import AppAdditionalFeatures, WorkflowUIBasedAppConfig +from core.app.apps.common import workflow_response_converter +from core.app.apps.common.workflow_response_converter import WorkflowResponseConverter +from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom, WorkflowAppGenerateEntity +from core.app.entities.queue_entities import QueueWorkflowPausedEvent +from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, + HumanInputRequiredResponse, + WorkflowAppPausedBlockingResponse, + WorkflowPauseStreamResponse, +) +from core.app.layers.pause_state_persist_layer import WorkflowResumptionContext, _WorkflowGenerateEntityWrapper +from core.workflow.human_input_policy import HumanInputSurface +from core.workflow.system_variables import build_system_variables +from graphon.entities import WorkflowStartReason +from graphon.entities.pause_reason import HumanInputRequired, PauseReasonType +from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus +from graphon.nodes.human_input.entities import FormInput, UserAction +from graphon.nodes.human_input.enums import FormInputType +from graphon.runtime import GraphRuntimeState, VariablePool +from models.account import Account +from models.enums import CreatorUserRole +from models.model import AppMode +from models.workflow import WorkflowRun +from repositories.api_workflow_node_execution_repository import WorkflowNodeExecutionSnapshot +from repositories.entities.workflow_pause import WorkflowPauseEntity +from services.app_generate_service import AppGenerateService +from services.workflow_event_snapshot_service import _build_snapshot_events +from tests.unit_tests.controllers.service_api.conftest import _unwrap + + +class _DummyRateLimit: + @staticmethod + def gen_request_key() -> str: + return "dummy-request-id" + + def __init__(self, client_id: str, max_active_requests: int) -> None: + self.client_id = client_id + self.max_active_requests = max_active_requests + + def enter(self, request_id: str | None = None) -> str: + return request_id or "dummy-request-id" + + def exit(self, request_id: str) -> None: + return None + + def generate(self, generator, request_id: str): + return generator + + +def _mock_repo_for_run(monkeypatch: pytest.MonkeyPatch, workflow_run): + workflow_events_module = sys.modules["controllers.service_api.app.workflow_events"] + repo = SimpleNamespace(get_workflow_run_by_id_and_tenant_id=lambda **_kwargs: workflow_run) + monkeypatch.setattr( + workflow_events_module.DifyAPIRepositoryFactory, + "create_api_workflow_run_repository", + lambda *_args, **_kwargs: repo, + ) + monkeypatch.setattr(workflow_events_module, "db", SimpleNamespace(engine=object())) + return workflow_events_module + + +def _build_service_api_pause_converter() -> WorkflowResponseConverter: + application_generate_entity = SimpleNamespace( + inputs={}, + files=[], + invoke_from=InvokeFrom.SERVICE_API, + app_config=SimpleNamespace(app_id="app-id", tenant_id="tenant-id"), + ) + system_variables = build_system_variables( + user_id="user", + app_id="app-id", + workflow_id="workflow-id", + workflow_execution_id="run-id", + ) + user = MagicMock(spec=Account) + user.id = "account-id" + user.name = "Tester" + user.email = "tester@example.com" + return WorkflowResponseConverter( + application_generate_entity=application_generate_entity, + user=user, + system_variables=system_variables, + ) + + +def _build_advanced_chat_paused_blocking_response() -> AdvancedChatPausedBlockingResponse: + data = AdvancedChatPausedBlockingResponse.Data( + id="msg-1", + mode="chat", + conversation_id="c1", + message_id="m1", + workflow_run_id="run-1", + answer="partial", + metadata={"usage": {"total_tokens": 1}}, + created_at=1, + paused_nodes=["node-1"], + reasons=[ + { + "type": PauseReasonType.HUMAN_INPUT_REQUIRED, + "form_id": "form-1", + "expiration_time": 100, + } + ], + status=WorkflowExecutionStatus.PAUSED, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ) + return AdvancedChatPausedBlockingResponse(task_id="t1", data=data) + + +def _build_workflow_paused_blocking_response() -> WorkflowAppPausedBlockingResponse: + return WorkflowAppPausedBlockingResponse( + task_id="t1", + workflow_run_id="r1", + data=WorkflowAppPausedBlockingResponse.Data( + id="r1", + workflow_id="wf-1", + status=WorkflowExecutionStatus.PAUSED, + outputs={}, + error=None, + elapsed_time=0.5, + total_tokens=0, + total_steps=2, + created_at=1, + finished_at=None, + paused_nodes=["node-1"], + reasons=[{"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 100}], + ), + ) + + +@dataclass(frozen=True) +class _FakePauseEntity(WorkflowPauseEntity): + pause_id: str + workflow_run_id: str + paused_at_value: datetime + pause_reasons: Sequence[HumanInputRequired] + + @property + def id(self) -> str: + return self.pause_id + + @property + def workflow_execution_id(self) -> str: + return self.workflow_run_id + + def get_state(self) -> bytes: + raise AssertionError("state is not required for snapshot tests") + + @property + def resumed_at(self) -> datetime | None: + return None + + @property + def paused_at(self) -> datetime: + return self.paused_at_value + + def get_pause_reasons(self) -> Sequence[HumanInputRequired]: + return self.pause_reasons + + +def _build_workflow_run(status: WorkflowExecutionStatus) -> WorkflowRun: + return WorkflowRun( + id="run-1", + tenant_id="tenant-1", + app_id="app-1", + workflow_id="workflow-1", + type="workflow", + triggered_from="app-run", + version="v1", + graph=None, + inputs=json.dumps({"input": "value"}), + status=status, + outputs=json.dumps({}), + error=None, + elapsed_time=0.0, + total_tokens=0, + total_steps=0, + created_by_role=CreatorUserRole.END_USER, + created_by="user-1", + created_at=datetime(2024, 1, 1, tzinfo=UTC), + ) + + +def _build_snapshot(status: WorkflowNodeExecutionStatus) -> WorkflowNodeExecutionSnapshot: + created_at = datetime(2024, 1, 1, tzinfo=UTC) + finished_at = datetime(2024, 1, 1, 0, 0, 5, tzinfo=UTC) + return WorkflowNodeExecutionSnapshot( + execution_id="exec-1", + node_id="node-1", + node_type="human-input", + title="Human Input", + index=1, + status=status.value, + elapsed_time=0.5, + created_at=created_at, + finished_at=finished_at, + iteration_id=None, + loop_id=None, + ) + + +def _build_resumption_context(task_id: str) -> WorkflowResumptionContext: + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant-1", + app_id="app-1", + app_mode=AppMode.WORKFLOW, + workflow_id="workflow-1", + ) + generate_entity = WorkflowAppGenerateEntity( + task_id=task_id, + app_config=app_config, + inputs={}, + files=[], + user_id="user-1", + stream=True, + invoke_from=InvokeFrom.EXPLORE, + call_depth=0, + workflow_execution_id="run-1", + ) + runtime_state = GraphRuntimeState(variable_pool=VariablePool(), start_at=0.0) + runtime_state.register_paused_node("node-1") + runtime_state.outputs = {"result": "value"} + wrapper = _WorkflowGenerateEntityWrapper(entity=generate_entity) + return WorkflowResumptionContext( + generate_entity=wrapper, + serialized_graph_runtime_state=runtime_state.dumps(), + ) + + +class TestHitlServiceApi: + # Service API event-stream continuation + def test_workflow_events_continue_on_pause_keeps_stream_open(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + msg_generator.retrieve_events.return_value = ["raw-event"] + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: streamed\n\n"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1&continue_on_pause=true", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: streamed\n\n" + msg_generator.retrieve_events.assert_called_once_with( + AppMode.WORKFLOW, + "run-1", + terminal_events=[], + ) + workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) + + def test_workflow_events_snapshot_continue_on_pause_keeps_pause_open( + self, app, monkeypatch: pytest.MonkeyPatch + ) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: snapshot\n\n"]) + snapshot_builder = Mock(return_value=["snapshot-events"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + monkeypatch.setattr(workflow_events_module, "build_workflow_event_stream", snapshot_builder) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context( + "/workflow/run-1/events?user=u1&include_state_snapshot=true&continue_on_pause=true", + method="GET", + ): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: snapshot\n\n" + msg_generator.retrieve_events.assert_not_called() + snapshot_builder.assert_called_once_with( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=ANY, + human_input_surface=HumanInputSurface.SERVICE_API, + close_on_pause=False, + ) + workflow_generator.convert_to_event_stream.assert_called_once_with(["snapshot-events"]) + + def test_advanced_chat_blocking_injects_pause_state_config(self, monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setattr(ags_module.dify_config, "BILLING_ENABLED", False) + monkeypatch.setattr(ags_module, "RateLimit", _DummyRateLimit) + + workflow = MagicMock() + workflow.created_by = "owner-id" + monkeypatch.setattr(AppGenerateService, "_get_workflow", lambda *args, **kwargs: workflow) + monkeypatch.setattr(ags_module.session_factory, "get_session_maker", lambda: "session-maker") + + generator_instance = MagicMock() + generator_instance.generate.return_value = {"result": "advanced-blocking"} + generator_instance.convert_to_event_stream.side_effect = lambda payload: payload + monkeypatch.setattr(ags_module, "AdvancedChatAppGenerator", lambda: generator_instance) + + app_model = MagicMock() + app_model.mode = AppMode.ADVANCED_CHAT + app_model.id = "app-id" + app_model.tenant_id = "tenant-id" + app_model.max_active_requests = 0 + app_model.is_agent = False + + user = MagicMock() + user.id = "user-id" + + result = AppGenerateService.generate( + app_model=app_model, + user=user, + args={"workflow_id": None, "query": "hi", "inputs": {}}, + invoke_from=InvokeFrom.SERVICE_API, + streaming=False, + ) + + assert result == {"result": "advanced-blocking"} + call_kwargs = generator_instance.generate.call_args.kwargs + assert call_kwargs["streaming"] is False + assert call_kwargs["pause_state_config"] is not None + assert call_kwargs["pause_state_config"].session_factory == "session-maker" + assert call_kwargs["pause_state_config"].state_owner_user_id == "owner-id" + + # Blocking payload contract + def test_advanced_chat_blocking_pause_payload_contract(self) -> None: + from core.app.apps.advanced_chat.generate_response_converter import AdvancedChatAppGenerateResponseConverter + + response = AdvancedChatAppGenerateResponseConverter.convert_blocking_full_response( + _build_advanced_chat_paused_blocking_response() + ) + + assert response["event"] == "workflow_paused" + assert response["workflow_run_id"] == "run-1" + assert response["answer"] == "partial" + assert response["data"]["reasons"][0]["type"] == PauseReasonType.HUMAN_INPUT_REQUIRED + assert response["data"]["reasons"][0]["expiration_time"] == 100 + assert "human_input_forms" not in response["data"] + + def test_workflow_blocking_pause_payload_contract(self) -> None: + from core.app.apps.workflow.generate_response_converter import WorkflowAppGenerateResponseConverter + + response = WorkflowAppGenerateResponseConverter.convert_blocking_full_response( + _build_workflow_paused_blocking_response() + ) + + assert response["workflow_run_id"] == "r1" + assert response["data"]["status"] == WorkflowExecutionStatus.PAUSED + assert response["data"]["paused_nodes"] == ["node-1"] + assert response["data"]["reasons"] == [ + {"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 100} + ] + assert "human_input_forms" not in response["data"] + + def test_advanced_chat_blocking_pipeline_pause_payload_contract(self) -> None: + from core.app.app_config.entities import AppAdditionalFeatures + from core.app.apps.advanced_chat.generate_task_pipeline import AdvancedChatAppGenerateTaskPipeline + from models.enums import MessageStatus + from models.model import EndUser + + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant", + app_id="app", + app_mode=AppMode.ADVANCED_CHAT, + additional_features=AppAdditionalFeatures(), + variables=[], + workflow_id="workflow-id", + ) + application_generate_entity = AdvancedChatAppGenerateEntity.model_construct( + task_id="task", + app_config=app_config, + inputs={}, + query="hello", + files=[], + user_id="user", + stream=False, + invoke_from=InvokeFrom.WEB_APP, + extras={}, + trace_manager=None, + workflow_run_id="run-id", + ) + pipeline = AdvancedChatAppGenerateTaskPipeline( + application_generate_entity=application_generate_entity, + workflow=SimpleNamespace(id="workflow-id", tenant_id="tenant", features_dict={}), + queue_manager=SimpleNamespace(invoke_from=InvokeFrom.WEB_APP, graph_runtime_state=None), + conversation=SimpleNamespace(id="conv-id", mode=AppMode.ADVANCED_CHAT), + message=SimpleNamespace( + id="message-id", + query="hello", + created_at=datetime.utcnow(), + status=MessageStatus.NORMAL, + answer="", + ), + user=EndUser(tenant_id="tenant", type="session", name="tester", session_id="session"), + stream=False, + dialogue_count=1, + draft_var_saver_factory=lambda **kwargs: None, + ) + pipeline._task_state.answer = "partial answer" + pipeline._workflow_run_id = "run-id" + + def _gen(): + yield HumanInputRequiredResponse( + task_id="task", + workflow_run_id="run-id", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Approval", + form_content="Need approval", + inputs=[], + actions=[UserAction(id="approve", title="Approve")], + display_in_ui=True, + form_token="token-1", + resolved_default_values={}, + expiration_time=123, + ), + ) + yield WorkflowPauseStreamResponse( + task_id="task", + workflow_run_id="run-id", + data=WorkflowPauseStreamResponse.Data( + workflow_run_id="run-id", + paused_nodes=["node-1"], + outputs={}, + reasons=[ + { + "type": PauseReasonType.HUMAN_INPUT_REQUIRED, + "form_id": "form-1", + "node_id": "node-1", + "expiration_time": 123, + }, + ], + status="paused", + created_at=1, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ), + ) + + response = pipeline._to_blocking_response(_gen()) + + assert isinstance(response, AdvancedChatPausedBlockingResponse) + assert response.data.answer == "partial answer" + assert response.data.workflow_run_id == "run-id" + assert response.data.reasons[0]["form_id"] == "form-1" + assert response.data.reasons[0]["expiration_time"] == 123 + + def test_workflow_blocking_pipeline_pause_payload_contract(self, monkeypatch: pytest.MonkeyPatch) -> None: + from core.app.apps.workflow import generate_task_pipeline as workflow_pipeline_module + from core.app.apps.workflow.generate_task_pipeline import WorkflowAppGenerateTaskPipeline + + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant", + app_id="app", + app_mode=AppMode.WORKFLOW, + additional_features=AppAdditionalFeatures(), + variables=[], + workflow_id="workflow-id", + ) + application_generate_entity = WorkflowAppGenerateEntity.model_construct( + task_id="task", + app_config=app_config, + inputs={}, + files=[], + user_id="user", + stream=False, + invoke_from=InvokeFrom.WEB_APP, + trace_manager=None, + workflow_execution_id="run-id", + extras={}, + call_depth=0, + ) + pipeline = WorkflowAppGenerateTaskPipeline( + application_generate_entity=application_generate_entity, + workflow=SimpleNamespace(id="workflow-id", tenant_id="tenant", features_dict={}), + queue_manager=SimpleNamespace(invoke_from=InvokeFrom.WEB_APP, graph_runtime_state=None), + user=SimpleNamespace(id="user", session_id="session"), + stream=False, + draft_var_saver_factory=lambda **kwargs: None, + ) + monkeypatch.setattr(workflow_pipeline_module.time, "time", lambda: 1700000000) + + def _gen(): + yield HumanInputRequiredResponse( + task_id="task", + workflow_run_id="run", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Human Input", + form_content="content", + expiration_time=1, + ), + ) + yield WorkflowPauseStreamResponse( + task_id="task", + workflow_run_id="run", + data=WorkflowPauseStreamResponse.Data( + workflow_run_id="run", + status=WorkflowExecutionStatus.PAUSED, + outputs={}, + paused_nodes=["node-1"], + reasons=[{"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 1}], + created_at=1, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ), + ) + + response = pipeline._to_blocking_response(_gen()) + + assert isinstance(response, WorkflowAppPausedBlockingResponse) + assert response.data.status == WorkflowExecutionStatus.PAUSED + assert response.data.paused_nodes == ["node-1"] + assert response.data.reasons == [{"TYPE": "human_input_required", "form_id": "form-1", "expiration_time": 1}] + + def test_service_api_pause_event_serializes_hitl_reason(self, monkeypatch: pytest.MonkeyPatch) -> None: + converter = _build_service_api_pause_converter() + converter.workflow_start_to_stream_response( + task_id="task", + workflow_run_id="run-id", + workflow_id="workflow-id", + reason=WorkflowStartReason.INITIAL, + ) + + expiration_time = datetime(2024, 1, 1, tzinfo=UTC) + + class _FakeSession: + def execute(self, _stmt): + return [("form-1", expiration_time, '{"display_in_ui": true}')] + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc, tb): + return False + + monkeypatch.setattr(workflow_response_converter, "Session", lambda **_: _FakeSession()) + monkeypatch.setattr(workflow_response_converter, "db", SimpleNamespace(engine=object())) + monkeypatch.setattr( + workflow_response_converter, + "load_form_tokens_by_form_id", + lambda form_ids, session=None, surface=None: {"form-1": "token"}, + ) + + reason = HumanInputRequired( + form_id="form-1", + form_content="Rendered", + inputs=[ + FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="field", default=None), + ], + actions=[UserAction(id="approve", title="Approve")], + display_in_ui=True, + node_id="node-id", + node_title="Human Step", + form_token="token", + ) + queue_event = QueueWorkflowPausedEvent( + reasons=[reason], + outputs={"answer": "value"}, + paused_nodes=["node-id"], + ) + + runtime_state = SimpleNamespace(total_tokens=0, node_run_steps=0) + responses = converter.workflow_pause_to_stream_response( + event=queue_event, + task_id="task", + graph_runtime_state=runtime_state, + ) + + assert isinstance(responses[-1], WorkflowPauseStreamResponse) + pause_resp = responses[-1] + assert pause_resp.workflow_run_id == "run-id" + assert pause_resp.data.paused_nodes == ["node-id"] + assert pause_resp.data.outputs == {} + assert pause_resp.data.reasons[0]["TYPE"] == "human_input_required" + assert pause_resp.data.reasons[0]["form_id"] == "form-1" + assert pause_resp.data.reasons[0]["form_token"] == "token" + assert pause_resp.data.reasons[0]["expiration_time"] == int(expiration_time.timestamp()) + + assert isinstance(responses[0], HumanInputRequiredResponse) + hi_resp = responses[0] + assert hi_resp.data.form_id == "form-1" + assert hi_resp.data.node_id == "node-id" + assert hi_resp.data.node_title == "Human Step" + assert hi_resp.data.inputs[0].output_variable_name == "field" + assert hi_resp.data.actions[0].id == "approve" + assert hi_resp.data.display_in_ui is True + assert hi_resp.data.form_token == "token" + assert hi_resp.data.expiration_time == int(expiration_time.timestamp()) + + # Snapshot payload contract + def test_snapshot_events_include_pause_payload_contract(self, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = _build_workflow_run(WorkflowExecutionStatus.PAUSED) + snapshot = _build_snapshot(WorkflowNodeExecutionStatus.PAUSED) + resumption_context = _build_resumption_context("task-ctx") + monkeypatch.setattr( + "services.workflow_event_snapshot_service.load_form_tokens_by_form_id", + lambda form_ids, session=None, surface=None: {"form-1": "wtok"}, + ) + + class _SessionContext: + def __init__(self, session): + self._session = session + + def __enter__(self): + return self._session + + def __exit__(self, exc_type, exc, tb): + return False + + def session_maker() -> _SessionContext: + return _SessionContext( + SimpleNamespace( + execute=lambda _stmt: [("form-1", datetime(2024, 1, 1, tzinfo=UTC), '{"display_in_ui": true}')], + ) + ) + + pause_entity = _FakePauseEntity( + pause_id="pause-1", + workflow_run_id="run-1", + paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), + pause_reasons=[ + HumanInputRequired( + form_id="form-1", + form_content="content", + node_id="node-1", + node_title="Human Input", + form_token="wtok", + ) + ], + ) + + events = _build_snapshot_events( + workflow_run=workflow_run, + node_snapshots=[snapshot], + task_id="task-ctx", + message_context=None, + pause_entity=pause_entity, + resumption_context=resumption_context, + session_maker=session_maker, + ) + + assert [event["event"] for event in events] == [ + "workflow_started", + "node_started", + "node_finished", + "human_input_required", + "workflow_paused", + ] + assert events[2]["data"]["status"] == WorkflowNodeExecutionStatus.PAUSED.value + assert events[3]["data"]["form_token"] == "wtok" + assert events[3]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + pause_data = events[-1]["data"] + assert pause_data["paused_nodes"] == ["node-1"] + assert pause_data["outputs"] == {"result": "value"} + assert pause_data["reasons"][0]["TYPE"] == "human_input_required" + assert pause_data["reasons"][0]["form_token"] == "wtok" + assert pause_data["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + assert pause_data["status"] == WorkflowExecutionStatus.PAUSED.value + assert pause_data["created_at"] == int(workflow_run.created_at.timestamp()) + assert pause_data["elapsed_time"] == workflow_run.elapsed_time + assert pause_data["total_tokens"] == workflow_run.total_tokens + assert pause_data["total_steps"] == workflow_run.total_steps diff --git a/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py b/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py new file mode 100644 index 0000000000..531f722ceb --- /dev/null +++ b/api/tests/unit_tests/controllers/service_api/app/test_human_input_form.py @@ -0,0 +1,184 @@ +"""Unit tests for Service API human input form endpoints.""" + +from __future__ import annotations + +import json +import sys +from datetime import UTC, datetime +from types import SimpleNamespace +from unittest.mock import Mock + +import pytest +from werkzeug.exceptions import NotFound + +from controllers.service_api.app.human_input_form import WorkflowHumanInputFormApi +from models.human_input import RecipientType +from tests.unit_tests.controllers.service_api.conftest import _unwrap + + +class TestWorkflowHumanInputFormApi: + def test_get_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + definition = SimpleNamespace( + model_dump=lambda: { + "rendered_content": "Rendered form content", + "inputs": [{"output_variable_name": "name"}], + "default_values": {"name": "Alice", "age": 30, "meta": {"k": "v"}}, + "user_actions": [{"id": "approve", "title": "Approve"}], + } + ) + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + expiration_time=datetime(2099, 1, 1, tzinfo=UTC), + get_definition=lambda: definition, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + + with app.test_request_context("/form/human_input/token-1", method="GET"): + response = handler(api, app_model=app_model, form_token="token-1") + + payload = json.loads(response.get_data(as_text=True)) + assert payload == { + "form_content": "Rendered form content", + "inputs": [{"output_variable_name": "name"}], + "resolved_default_values": {"name": "Alice", "age": "30", "meta": '{"k": "v"}'}, + "user_actions": [{"id": "approve", "title": "Approve"}], + "expiration_time": int(form.expiration_time.timestamp()), + } + service_mock.get_form_by_token.assert_called_once_with("token-1") + service_mock.ensure_form_active.assert_called_once_with(form) + + def test_get_form_not_in_app(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace( + app_id="another-app", + tenant_id="tenant-1", + expiration_time=datetime(2099, 1, 1, tzinfo=UTC), + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + + with app.test_request_context("/form/human_input/token-1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, form_token="token-1") + + @pytest.mark.parametrize( + "recipient_type", + [ + RecipientType.CONSOLE, + RecipientType.BACKSTAGE, + RecipientType.EMAIL_MEMBER, + RecipientType.EMAIL_EXTERNAL, + ], + ) + def test_get_rejects_non_service_api_recipient_types( + self, app, monkeypatch: pytest.MonkeyPatch, recipient_type: RecipientType + ) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=recipient_type, + expiration_time=datetime(2099, 1, 1, tzinfo=UTC), + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + + with app.test_request_context("/form/human_input/token-1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, form_token="token-1") + + service_mock.ensure_form_active.assert_not_called() + + def test_post_success(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.post) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context( + "/form/human_input/token-1", + method="POST", + json={"inputs": {"name": "Alice"}, "action": "approve", "user": "external-1"}, + ): + response, status = handler(api, app_model=app_model, end_user=end_user, form_token="token-1") + + assert response == {} + assert status == 200 + service_mock.submit_form_by_token.assert_called_once_with( + recipient_type=RecipientType.STANDALONE_WEB_APP, + form_token="token-1", + selected_action_id="approve", + form_data={"name": "Alice"}, + submission_end_user_id="end-user-1", + ) + + @pytest.mark.parametrize( + "recipient_type", + [ + RecipientType.CONSOLE, + RecipientType.BACKSTAGE, + RecipientType.EMAIL_MEMBER, + RecipientType.EMAIL_EXTERNAL, + ], + ) + def test_post_rejects_non_service_api_recipient_types( + self, app, monkeypatch: pytest.MonkeyPatch, recipient_type: RecipientType + ) -> None: + form = SimpleNamespace( + app_id="app-1", + tenant_id="tenant-1", + recipient_type=recipient_type, + ) + service_mock = Mock() + service_mock.get_form_by_token.return_value = form + workflow_module = sys.modules["controllers.service_api.app.human_input_form"] + monkeypatch.setattr(workflow_module, "HumanInputService", lambda _engine: service_mock) + monkeypatch.setattr(workflow_module, "db", SimpleNamespace(engine=object())) + + api = WorkflowHumanInputFormApi() + handler = _unwrap(api.post) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1") + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context( + "/form/human_input/token-1", + method="POST", + json={"inputs": {"name": "Alice"}, "action": "approve", "user": "external-1"}, + ): + with pytest.raises(NotFound): + handler(api, app_model=app_model, end_user=end_user, form_token="token-1") + + service_mock.submit_form_by_token.assert_not_called() diff --git a/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py new file mode 100644 index 0000000000..f45a7f9632 --- /dev/null +++ b/api/tests/unit_tests/controllers/service_api/app/test_workflow_events.py @@ -0,0 +1,166 @@ +"""Unit tests for Service API workflow event stream endpoints.""" + +from __future__ import annotations + +import json +import sys +from datetime import UTC, datetime +from types import SimpleNamespace +from unittest.mock import Mock + +import pytest +from werkzeug.exceptions import NotFound + +from controllers.service_api.app.error import NotWorkflowAppError +from controllers.service_api.app.workflow_events import WorkflowEventsApi +from models.enums import CreatorUserRole +from models.model import AppMode +from tests.unit_tests.controllers.service_api.conftest import _unwrap + + +def _mock_repo_for_run(monkeypatch: pytest.MonkeyPatch, workflow_run): + workflow_events_module = sys.modules["controllers.service_api.app.workflow_events"] + repo = SimpleNamespace(get_workflow_run_by_id_and_tenant_id=lambda **_kwargs: workflow_run) + monkeypatch.setattr( + workflow_events_module.DifyAPIRepositoryFactory, + "create_api_workflow_run_repository", + lambda *_args, **_kwargs: repo, + ) + monkeypatch.setattr(workflow_events_module, "db", SimpleNamespace(engine=object())) + return workflow_events_module + + +class TestWorkflowEventsApi: + def test_wrong_app_mode(self, app) -> None: + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(mode=AppMode.CHAT.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + with pytest.raises(NotWorkflowAppError): + handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + def test_workflow_run_not_found(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + _mock_repo_for_run(monkeypatch, workflow_run=None) + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + def test_workflow_run_permission_denied(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.ACCOUNT, + created_by="another-user", + finished_at=None, + ) + _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + with pytest.raises(NotFound): + handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + def test_finished_run_returns_sse(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=datetime(2099, 1, 1, tzinfo=UTC), + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + monkeypatch.setattr( + workflow_events_module.WorkflowResponseConverter, + "workflow_run_result_to_finish_response", + lambda **_kwargs: SimpleNamespace( + model_dump=lambda mode="json": {"task_id": "run-1", "status": "succeeded"}, + event=SimpleNamespace(value="workflow_finished"), + ), + ) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.mimetype == "text/event-stream" + body = response.get_data(as_text=True).strip() + assert body.startswith("data: ") + payload = json.loads(body[len("data: ") :]) + assert payload["task_id"] == "run-1" + assert payload["event"] == "workflow_finished" + + def test_running_run_streams_events(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + msg_generator.retrieve_events.return_value = ["raw-event"] + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: streamed\n\n"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: streamed\n\n" + msg_generator.retrieve_events.assert_called_once_with( + AppMode.WORKFLOW, + "run-1", + terminal_events=None, + ) + workflow_generator.convert_to_event_stream.assert_called_once_with(["raw-event"]) + + def test_running_run_with_snapshot(self, app, monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = SimpleNamespace( + id="run-1", + app_id="app-1", + created_by_role=CreatorUserRole.END_USER, + created_by="end-user-1", + finished_at=None, + ) + workflow_events_module = _mock_repo_for_run(monkeypatch, workflow_run=workflow_run) + msg_generator = Mock() + workflow_generator = Mock() + workflow_generator.convert_to_event_stream.return_value = iter(["data: snapshot\n\n"]) + snapshot_builder = Mock(return_value=["snapshot-events"]) + monkeypatch.setattr(workflow_events_module, "MessageGenerator", lambda: msg_generator) + monkeypatch.setattr(workflow_events_module, "WorkflowAppGenerator", lambda: workflow_generator) + monkeypatch.setattr(workflow_events_module, "build_workflow_event_stream", snapshot_builder) + + api = WorkflowEventsApi() + handler = _unwrap(api.get) + app_model = SimpleNamespace(id="app-1", tenant_id="tenant-1", mode=AppMode.WORKFLOW.value) + end_user = SimpleNamespace(id="end-user-1") + + with app.test_request_context("/workflow/run-1/events?user=u1&include_state_snapshot=true", method="GET"): + response = handler(api, app_model=app_model, end_user=end_user, task_id="run-1") + + assert response.get_data(as_text=True) == "data: snapshot\n\n" + msg_generator.retrieve_events.assert_not_called() + snapshot_builder.assert_called_once() + workflow_generator.convert_to_event_stream.assert_called_once_with(["snapshot-events"]) diff --git a/api/tests/unit_tests/controllers/service_api/conftest.py b/api/tests/unit_tests/controllers/service_api/conftest.py index eddba5a517..8c89812cb4 100644 --- a/api/tests/unit_tests/controllers/service_api/conftest.py +++ b/api/tests/unit_tests/controllers/service_api/conftest.py @@ -15,7 +15,10 @@ from flask import Flask from core.rag.index_processor.constant.index_type import IndexStructureType from models.account import TenantStatus from models.model import App, AppMode, EndUser -from tests.unit_tests.conftest import setup_mock_tenant_account_query +from tests.unit_tests.conftest import ( + setup_mock_dataset_owner_execute_result, + setup_mock_tenant_owner_execute_result, +) @pytest.fixture @@ -123,9 +126,7 @@ class AuthenticationMocker: mock_db.session.get.side_effect = [mock_app, mock_tenant] if mock_account: - mock_ta = Mock() - mock_ta.account_id = mock_account.id - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_ta) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) @staticmethod def setup_dataset_auth(mock_db, mock_tenant, mock_account): @@ -133,8 +134,7 @@ class AuthenticationMocker: mock_ta = Mock() mock_ta.account_id = mock_account.id - mock_db.session.execute.return_value.one_or_none.return_value = (mock_tenant, mock_ta) - + setup_mock_dataset_owner_execute_result(mock_db, mock_tenant, mock_ta) mock_db.session.get.return_value = mock_account diff --git a/api/tests/unit_tests/controllers/service_api/dataset/test_document.py b/api/tests/unit_tests/controllers/service_api/dataset/test_document.py index 12d5e7345d..1b391e67ec 100644 --- a/api/tests/unit_tests/controllers/service_api/dataset/test_document.py +++ b/api/tests/unit_tests/controllers/service_api/dataset/test_document.py @@ -22,6 +22,8 @@ import pytest from werkzeug.exceptions import Forbidden, NotFound from controllers.service_api.dataset.document import ( + DeprecatedDocumentAddByTextApi, + DeprecatedDocumentUpdateByTextApi, DocumentAddByFileApi, DocumentAddByTextApi, DocumentApi, @@ -699,8 +701,8 @@ class TestDocumentApiDelete: ``delete`` is wrapped by ``@cloud_edition_billing_rate_limit_check`` which internally calls ``validate_and_get_api_token``. To bypass the decorator we call the original function via ``__wrapped__`` (preserved by - ``functools.wraps``). ``delete`` queries the dataset via - ``db.session.query(Dataset)`` directly, so we patch ``db`` at the + ``functools.wraps``). ``delete`` loads the dataset via + ``db.session.scalar(select(Dataset)...)``, so we patch ``db`` at the controller module. """ @@ -1005,7 +1007,7 @@ class TestDocumentAddByTextApi: # Act with app.test_request_context( - f"/datasets/{mock_dataset.id}/document/create_by_text", + f"/datasets/{mock_dataset.id}/document/create-by-text", method="POST", json={ "name": "Test Document", @@ -1037,7 +1039,7 @@ class TestDocumentAddByTextApi: # Act & Assert with app.test_request_context( - f"/datasets/{mock_dataset.id}/document/create_by_text", + f"/datasets/{mock_dataset.id}/document/create-by-text", method="POST", json={"name": "Test Document", "text": "Content"}, headers={"Authorization": "Bearer test_token"}, @@ -1066,7 +1068,7 @@ class TestDocumentAddByTextApi: # Act & Assert with app.test_request_context( - f"/datasets/{mock_dataset.id}/document/create_by_text", + f"/datasets/{mock_dataset.id}/document/create-by-text", method="POST", json={"name": "Test Document", "text": "Content"}, headers={"Authorization": "Bearer test_token"}, @@ -1093,6 +1095,20 @@ class TestArchivedDocumentImmutableError: assert error.code == 403 +class TestDocumentTextRouteDeprecation: + """Test that legacy underscore text routes stay marked deprecated.""" + + def test_create_by_text_legacy_alias_is_deprecated(self): + """Ensure only the legacy create-by-text alias is marked deprecated.""" + assert DeprecatedDocumentAddByTextApi.post.__apidoc__["deprecated"] is True + assert DocumentAddByTextApi.post.__apidoc__.get("deprecated") is not True + + def test_update_by_text_legacy_alias_is_deprecated(self): + """Ensure only the legacy update-by-text alias is marked deprecated.""" + assert DeprecatedDocumentUpdateByTextApi.post.__apidoc__["deprecated"] is True + assert DocumentUpdateByTextApi.post.__apidoc__.get("deprecated") is not True + + # ============================================================================= # Endpoint tests for DocumentUpdateByTextApi, DocumentAddByFileApi, # DocumentUpdateByFileApi. @@ -1162,7 +1178,7 @@ class TestDocumentUpdateByTextApiPost: doc_id = str(uuid.uuid4()) with app.test_request_context( - f"/datasets/{mock_dataset.id}/documents/{doc_id}/update_by_text", + f"/datasets/{mock_dataset.id}/documents/{doc_id}/update-by-text", method="POST", json={"name": "Updated Doc", "text": "New content"}, headers={"Authorization": "Bearer test_token"}, @@ -1195,7 +1211,7 @@ class TestDocumentUpdateByTextApiPost: doc_id = str(uuid.uuid4()) with app.test_request_context( - f"/datasets/{mock_dataset.id}/documents/{doc_id}/update_by_text", + f"/datasets/{mock_dataset.id}/documents/{doc_id}/update-by-text", method="POST", json={"name": "Doc", "text": "Content"}, headers={"Authorization": "Bearer test_token"}, diff --git a/api/tests/unit_tests/controllers/service_api/test_wraps.py b/api/tests/unit_tests/controllers/service_api/test_wraps.py index a2008e024b..6dfbdcf98e 100644 --- a/api/tests/unit_tests/controllers/service_api/test_wraps.py +++ b/api/tests/unit_tests/controllers/service_api/test_wraps.py @@ -24,8 +24,8 @@ from enums.cloud_plan import CloudPlan from models.account import TenantStatus from models.model import ApiToken from tests.unit_tests.conftest import ( - setup_mock_dataset_tenant_query, - setup_mock_tenant_account_query, + setup_mock_dataset_owner_execute_result, + setup_mock_tenant_owner_execute_result, ) @@ -141,14 +141,11 @@ class TestValidateAppToken: mock_account = Mock() mock_account.id = str(uuid.uuid4()) - mock_ta = Mock() - mock_ta.account_id = mock_account.id - # Use side_effect to return app first, then tenant via session.get() mock_db.session.get.side_effect = [mock_app, mock_tenant] - # Mock the tenant owner query (execute(select(...)).one_or_none()) - setup_mock_tenant_account_query(mock_db, mock_tenant, mock_ta) + # Mock the tenant owner execute result (execute(select(...)).one_or_none()) + setup_mock_tenant_owner_execute_result(mock_db, mock_tenant, mock_account) @validate_app_token def protected_view(app_model): @@ -471,7 +468,7 @@ class TestValidateDatasetToken: mock_account.current_tenant = mock_tenant # Mock the tenant account join query (execute(select(...)).one_or_none()) - setup_mock_dataset_tenant_query(mock_db, mock_tenant, mock_ta) + setup_mock_dataset_owner_execute_result(mock_db, mock_tenant, mock_ta) # Mock the account lookup via session.get() mock_db.session.get.return_value = mock_account diff --git a/api/tests/unit_tests/controllers/web/conftest.py b/api/tests/unit_tests/controllers/web/conftest.py index 274d78c9cf..b7f3244c6c 100644 --- a/api/tests/unit_tests/controllers/web/conftest.py +++ b/api/tests/unit_tests/controllers/web/conftest.py @@ -22,18 +22,16 @@ class FakeSession: def __init__(self, mapping: dict[str, Any] | None = None): self._mapping: dict[str, Any] = mapping or {} - self._model_name: str | None = None - def query(self, model: type) -> FakeSession: - self._model_name = model.__name__ - return self + def get(self, model: type, _ident: object) -> Any: + return self._mapping.get(model.__name__) - def where(self, *_args: object, **_kwargs: object) -> FakeSession: - return self - - def first(self) -> Any: - assert self._model_name is not None - return self._mapping.get(self._model_name) + def scalar(self, stmt: Any) -> Any: + try: + model = stmt.column_descriptions[0]["entity"] + except (AttributeError, IndexError, KeyError, TypeError): + return None + return self._mapping.get(model.__name__) class FakeDB: diff --git a/api/tests/unit_tests/controllers/web/test_human_input_form.py b/api/tests/unit_tests/controllers/web/test_human_input_form.py index a1dbc80b20..5f2dc19aab 100644 --- a/api/tests/unit_tests/controllers/web/test_human_input_form.py +++ b/api/tests/unit_tests/controllers/web/test_human_input_form.py @@ -36,18 +36,6 @@ class _FakeSession: def __init__(self, mapping: dict[str, Any]): self._mapping = mapping - self._model_name: str | None = None - - def query(self, model): - self._model_name = model.__name__ - return self - - def where(self, *args, **kwargs): - return self - - def first(self): - assert self._model_name is not None - return self._mapping.get(self._model_name) def get(self, model, ident): return self._mapping.get(model.__name__) diff --git a/api/tests/unit_tests/controllers/web/test_web_login.py b/api/tests/unit_tests/controllers/web/test_web_login.py index a01587d64a..13b953c04d 100644 --- a/api/tests/unit_tests/controllers/web/test_web_login.py +++ b/api/tests/unit_tests/controllers/web/test_web_login.py @@ -34,7 +34,6 @@ def _patch_wraps(): patch("controllers.console.wraps.FeatureService.get_system_features", return_value=wraps_features), patch("controllers.web.login.dify_config", web_dify), ): - mock_db.session.query.return_value.first.return_value = MagicMock() yield diff --git a/api/tests/unit_tests/core/app/app_config/features/test_additional_feature_managers.py b/api/tests/unit_tests/core/app/app_config/features/test_additional_feature_managers.py index dd00c3defc..0a0ffe657c 100644 --- a/api/tests/unit_tests/core/app/app_config/features/test_additional_feature_managers.py +++ b/api/tests/unit_tests/core/app/app_config/features/test_additional_feature_managers.py @@ -77,6 +77,38 @@ class TestAdditionalFeatureManagers: SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults( {"suggested_questions_after_answer": {"enabled": "yes"}} ) + with pytest.raises(ValueError): + SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults( + {"suggested_questions_after_answer": {"enabled": True, "prompt": 123}} + ) + with pytest.raises(ValueError, match="must be less than or equal to 1000 characters"): + SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults( + {"suggested_questions_after_answer": {"enabled": True, "prompt": "a" * 1001}} + ) + with pytest.raises(ValueError): + SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults( + {"suggested_questions_after_answer": {"enabled": True, "model": "bad"}} + ) + with pytest.raises(ValueError): + SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults( + {"suggested_questions_after_answer": {"enabled": True, "model": {"provider": "openai"}}} + ) + + validated_config, _ = SuggestedQuestionsAfterAnswerConfigManager.validate_and_set_defaults( + { + "suggested_questions_after_answer": { + "enabled": True, + "prompt": "custom prompt", + "model": { + "provider": "openai", + "name": "gpt-4o-mini", + "completion_params": {"max_tokens": 1024}, + }, + } + } + ) + assert validated_config["suggested_questions_after_answer"]["prompt"] == "custom prompt" + assert validated_config["suggested_questions_after_answer"]["model"]["name"] == "gpt-4o-mini" assert ( SuggestedQuestionsAfterAnswerConfigManager.convert({"suggested_questions_after_answer": {"enabled": True}}) diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py index 45d4b0e321..370f7abb8b 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_app_runner_conversation_variables.py @@ -154,7 +154,6 @@ class TestAdvancedChatAppRunnerConversationVariables: mock_sessionmaker.return_value.begin.return_value.__enter__.return_value = mock_session mock_sessionmaker.return_value.begin.return_value.__exit__ = MagicMock(return_value=False) mock_session_class.return_value.__enter__.return_value = MagicMock() - mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists mock_db.engine = MagicMock() # Mock GraphRuntimeState to accept the variable pool @@ -301,7 +300,6 @@ class TestAdvancedChatAppRunnerConversationVariables: mock_sessionmaker.return_value.begin.return_value.__enter__.return_value = mock_session mock_sessionmaker.return_value.begin.return_value.__exit__ = MagicMock(return_value=False) mock_session_class.return_value.__enter__.return_value = MagicMock() - mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists mock_db.engine = MagicMock() # Mock ConversationVariable.from_variable to return mock objects @@ -453,7 +451,6 @@ class TestAdvancedChatAppRunnerConversationVariables: mock_sessionmaker.return_value.begin.return_value.__enter__.return_value = mock_session mock_sessionmaker.return_value.begin.return_value.__exit__ = MagicMock(return_value=False) mock_session_class.return_value.__enter__.return_value = MagicMock() - mock_db.session.query.return_value.where.return_value.first.return_value = MagicMock() # App exists mock_db.engine = MagicMock() # Mock GraphRuntimeState to accept the variable pool diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py index f2df35d7d0..6debeb4fdd 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_response_converter.py @@ -1,7 +1,10 @@ from collections.abc import Generator +import pytest + from core.app.apps.advanced_chat.generate_response_converter import AdvancedChatAppGenerateResponseConverter from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, ChatbotAppBlockingResponse, ChatbotAppStreamResponse, ErrorStreamResponse, @@ -10,7 +13,8 @@ from core.app.entities.task_entities import ( NodeStartStreamResponse, PingStreamResponse, ) -from graphon.enums import WorkflowNodeExecutionStatus +from graphon.entities.pause_reason import PauseReasonType +from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus class TestAdvancedChatGenerateResponseConverter: @@ -28,6 +32,37 @@ class TestAdvancedChatGenerateResponseConverter: response = AdvancedChatAppGenerateResponseConverter.convert_blocking_simple_response(blocking) assert "usage" not in response["metadata"] + def test_blocking_full_response_derives_pause_data_from_model_dump(self, monkeypatch: pytest.MonkeyPatch): + data = AdvancedChatPausedBlockingResponse.Data( + id="msg-1", + mode="chat", + conversation_id="c1", + message_id="m1", + workflow_run_id="run-1", + answer="partial", + metadata={"usage": {"total_tokens": 1}}, + created_at=1, + paused_nodes=["node-1"], + reasons=[{"type": PauseReasonType.HUMAN_INPUT_REQUIRED, "form_id": "form-1"}], + status=WorkflowExecutionStatus.PAUSED, + elapsed_time=0.1, + total_tokens=0, + total_steps=0, + ) + original_model_dump = type(data).model_dump + + def _model_dump_with_future_field(self, *args, **kwargs): + payload = original_model_dump(self, *args, **kwargs) + payload["future_field"] = "future-value" + return payload + + monkeypatch.setattr(type(data), "model_dump", _model_dump_with_future_field) + blocking = AdvancedChatPausedBlockingResponse(task_id="t1", data=data) + + response = AdvancedChatAppGenerateResponseConverter.convert_blocking_full_response(blocking) + + assert response["data"]["future_field"] == "future-value" + def test_stream_simple_response_includes_node_events(self): node_start = NodeStartStreamResponse( task_id="t1", diff --git a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py index 29fd63c063..64bcfa9a18 100644 --- a/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/advanced_chat/test_generate_task_pipeline_core.py @@ -39,15 +39,19 @@ from core.app.entities.queue_entities import ( QueueWorkflowSucceededEvent, ) from core.app.entities.task_entities import ( + AdvancedChatPausedBlockingResponse, AnnotationReply, AnnotationReplyAccount, + HumanInputRequiredResponse, MessageAudioStreamResponse, MessageEndStreamResponse, PingStreamResponse, ) from core.base.tts.app_generator_tts_publisher import AudioTrunk from core.workflow.system_variables import build_system_variables +from graphon.entities.pause_reason import PauseReasonType from graphon.enums import BuiltinNodeTypes +from graphon.nodes.human_input.entities import UserAction from graphon.runtime import GraphRuntimeState, VariablePool from libs.datetime_utils import naive_utc_now from models.enums import MessageStatus @@ -123,6 +127,57 @@ class TestAdvancedChatGenerateTaskPipeline: assert response.data.answer == "done" assert response.data.metadata == {"k": "v"} + def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): + pipeline = _make_pipeline() + pipeline._task_state.answer = "partial answer" + pipeline._workflow_run_id = "run-id" + pipeline._graph_runtime_state = GraphRuntimeState( + variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + start_at=0.0, + total_tokens=7, + node_run_steps=3, + ) + + def _gen(): + yield HumanInputRequiredResponse( + task_id="task", + workflow_run_id="run-id", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Approval", + form_content="Need approval", + inputs=[], + actions=[UserAction(id="approve", title="Approve")], + display_in_ui=True, + form_token="token-1", + resolved_default_values={}, + expiration_time=123, + ), + ) + + response = pipeline._to_blocking_response(_gen()) + + assert isinstance(response, AdvancedChatPausedBlockingResponse) + assert response.data.workflow_run_id == "run-id" + assert response.data.status == "paused" + assert response.data.paused_nodes == ["node-1"] + assert response.data.reasons == [ + { + "TYPE": PauseReasonType.HUMAN_INPUT_REQUIRED, + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Approval", + "form_content": "Need approval", + "inputs": [], + "actions": [{"id": "approve", "title": "Approve", "button_style": "default"}], + "display_in_ui": True, + "form_token": "token-1", + "resolved_default_values": {}, + "expiration_time": 123, + } + ] + def test_handle_text_chunk_event_updates_state(self): pipeline = _make_pipeline() pipeline._message_cycle_manager = SimpleNamespace( diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py index 9a2dc38f74..c36edf48fc 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_generator.py @@ -375,7 +375,7 @@ def test_generate_success_returns_converted(generator, mocker): workflow = MagicMock(id="wf", tenant_id="tenant", app_id="pipe", graph_dict={}) session = MagicMock() - session.query.return_value.where.return_value.first.return_value = workflow + session.get.return_value = workflow mocker.patch.object(module.db, "session", session) queue_manager = MagicMock() diff --git a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py index 618c8fd76f..603062a51c 100644 --- a/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py +++ b/api/tests/unit_tests/core/app/apps/pipeline/test_pipeline_runner.py @@ -132,11 +132,8 @@ def test_run_pipeline_not_found(mocker): app_generate_entity.single_iteration_run = None app_generate_entity.single_loop_run = None - query = MagicMock() - query.where.return_value.first.return_value = None - session = MagicMock() - session.query.return_value = query + session.get.side_effect = [None, None] mocker.patch.object(module.db, "session", session) runner = PipelineRunner( @@ -157,11 +154,9 @@ def test_run_workflow_not_initialized(mocker): app_generate_entity = _build_app_generate_entity() pipeline = MagicMock(id="pipe") - query_pipeline = MagicMock() - query_pipeline.where.return_value.first.return_value = pipeline session = MagicMock() - session.query.return_value = query_pipeline + session.get.side_effect = [None, pipeline] mocker.patch.object(module.db, "session", session) runner = PipelineRunner( diff --git a/api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py b/api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py new file mode 100644 index 0000000000..560652f8cb --- /dev/null +++ b/api/tests/unit_tests/core/app/apps/test_base_app_generate_response_converter.py @@ -0,0 +1,102 @@ +from __future__ import annotations + +from collections.abc import Generator + +from core.app.apps.base_app_generate_response_converter import AppGenerateResponseConverter +from core.app.entities.app_invoke_entities import InvokeFrom +from core.app.entities.task_entities import ( + AppStreamResponse, + PingStreamResponse, + WorkflowAppBlockingResponse, + WorkflowAppStreamResponse, +) +from graphon.enums import WorkflowExecutionStatus + + +class _DummyConverter(AppGenerateResponseConverter[WorkflowAppBlockingResponse]): + blocking_full_calls: list[WorkflowAppBlockingResponse] = [] + blocking_simple_calls: list[WorkflowAppBlockingResponse] = [] + stream_full_calls: list[Generator[AppStreamResponse, None, None]] = [] + stream_simple_calls: list[Generator[AppStreamResponse, None, None]] = [] + + @classmethod + def reset(cls) -> None: + cls.blocking_full_calls = [] + cls.blocking_simple_calls = [] + cls.stream_full_calls = [] + cls.stream_simple_calls = [] + + @classmethod + def convert_blocking_full_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: + cls.blocking_full_calls.append(blocking_response) + return {"kind": "blocking-full", "task_id": blocking_response.task_id} + + @classmethod + def convert_blocking_simple_response(cls, blocking_response: WorkflowAppBlockingResponse) -> dict[str, object]: + cls.blocking_simple_calls.append(blocking_response) + return {"kind": "blocking-simple", "task_id": blocking_response.task_id} + + @classmethod + def convert_stream_full_response( + cls, stream_response: Generator[AppStreamResponse, None, None] + ) -> Generator[dict | str, None, None]: + cls.stream_full_calls.append(stream_response) + yield {"kind": "stream-full"} + + @classmethod + def convert_stream_simple_response( + cls, stream_response: Generator[AppStreamResponse, None, None] + ) -> Generator[dict | str, None, None]: + cls.stream_simple_calls.append(stream_response) + yield {"kind": "stream-simple"} + + +def _build_blocking_response() -> WorkflowAppBlockingResponse: + return WorkflowAppBlockingResponse( + task_id="task-1", + workflow_run_id="run-1", + data=WorkflowAppBlockingResponse.Data( + id="run-1", + workflow_id="workflow-1", + status=WorkflowExecutionStatus.SUCCEEDED, + outputs={"ok": True}, + error=None, + elapsed_time=0.1, + total_tokens=0, + total_steps=1, + created_at=1, + finished_at=2, + ), + ) + + +def _build_stream_response() -> Generator[AppStreamResponse, None, None]: + yield WorkflowAppStreamResponse( + workflow_run_id="run-1", + stream_response=PingStreamResponse(task_id="task-1"), + ) + + +def test_convert_routes_blocking_response_by_invoke_from() -> None: + _DummyConverter.reset() + blocking_response = _build_blocking_response() + + full_result = _DummyConverter.convert(blocking_response, InvokeFrom.SERVICE_API) + simple_result = _DummyConverter.convert(blocking_response, InvokeFrom.WEB_APP) + + assert full_result == {"kind": "blocking-full", "task_id": "task-1"} + assert simple_result == {"kind": "blocking-simple", "task_id": "task-1"} + assert _DummyConverter.blocking_full_calls == [blocking_response] + assert _DummyConverter.blocking_simple_calls == [blocking_response] + + +def test_convert_routes_stream_response_by_invoke_from() -> None: + _DummyConverter.reset() + + full_result = list(_DummyConverter.convert(_build_stream_response(), InvokeFrom.SERVICE_API)) + simple_result = list(_DummyConverter.convert(_build_stream_response(), InvokeFrom.WEB_APP)) + + assert full_result == [{"kind": "stream-full"}] + assert simple_result == [{"kind": "stream-simple"}] + assert len(_DummyConverter.stream_full_calls) == 1 + assert len(_DummyConverter.stream_simple_calls) == 1 diff --git a/api/tests/unit_tests/core/app/apps/test_message_generator.py b/api/tests/unit_tests/core/app/apps/test_message_generator.py index 25377e633e..90c9abf35c 100644 --- a/api/tests/unit_tests/core/app/apps/test_message_generator.py +++ b/api/tests/unit_tests/core/app/apps/test_message_generator.py @@ -1,6 +1,7 @@ from unittest.mock import Mock, patch from core.app.apps.message_generator import MessageGenerator +from core.app.entities.task_entities import StreamEvent from models.model import AppMode @@ -23,7 +24,21 @@ class TestMessageGenerator: "core.app.apps.message_generator.stream_topic_events", return_value=iter([{"event": "ping"}]) ) as mock_stream, ): - events = list(MessageGenerator.retrieve_events(AppMode.WORKFLOW, "run-1", idle_timeout=1, ping_interval=2)) + events = list( + MessageGenerator.retrieve_events( + AppMode.WORKFLOW, + "run-1", + idle_timeout=1, + ping_interval=2, + terminal_events=[StreamEvent.WORKFLOW_FINISHED.value], + ) + ) assert events == [{"event": "ping"}] - mock_stream.assert_called_once() + mock_stream.assert_called_once_with( + topic="topic", + idle_timeout=1, + ping_interval=2, + on_subscribe=None, + terminal_events=[StreamEvent.WORKFLOW_FINISHED.value], + ) diff --git a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py index a7714c56ce..58f0e47a4b 100644 --- a/api/tests/unit_tests/core/app/apps/test_streaming_utils.py +++ b/api/tests/unit_tests/core/app/apps/test_streaming_utils.py @@ -88,6 +88,10 @@ def test_normalize_terminal_events_defaults(): } +def test_normalize_terminal_events_empty_values(): + assert _normalize_terminal_events([]) == set({}) + + def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): topic = FakeTopic() times = [1000.0, 1000.0, 1001.0, 1001.0, 1002.0] @@ -106,3 +110,21 @@ def test_stream_topic_events_emits_ping_and_idle_timeout(monkeypatch): assert next(generator) == StreamEvent.PING.value # next receive yields None -> ping interval triggers assert next(generator) == StreamEvent.PING.value + + +def test_stream_topic_events_can_continue_past_pause(): + topic = FakeTopic() + topic.publish(json.dumps({"event": StreamEvent.WORKFLOW_PAUSED.value}).encode()) + topic.publish(json.dumps({"event": StreamEvent.WORKFLOW_FINISHED.value}).encode()) + + generator = stream_topic_events( + topic=topic, + idle_timeout=1.0, + terminal_events=[StreamEvent.WORKFLOW_FINISHED.value], + ) + + assert next(generator) == StreamEvent.PING.value + assert next(generator)["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert next(generator)["event"] == StreamEvent.WORKFLOW_FINISHED.value + with pytest.raises(StopIteration): + next(generator) diff --git a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py index 99433478d3..0bcc1029b0 100644 --- a/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py +++ b/api/tests/unit_tests/core/app/apps/workflow/test_generate_task_pipeline_core.py @@ -36,11 +36,12 @@ from core.app.entities.queue_entities import ( ) from core.app.entities.task_entities import ( ErrorStreamResponse, + HumanInputRequiredResponse, MessageAudioEndStreamResponse, MessageAudioStreamResponse, PingStreamResponse, + WorkflowAppPausedBlockingResponse, WorkflowFinishStreamResponse, - WorkflowPauseStreamResponse, WorkflowStartStreamResponse, ) from core.base.tts.app_generator_tts_publisher import AudioTrunk @@ -91,27 +92,50 @@ def _make_pipeline(): class TestWorkflowGenerateTaskPipeline: - def test_to_blocking_response_handles_pause(self): + def test_to_blocking_response_falls_back_to_human_input_required_when_pause_event_missing(self): pipeline = _make_pipeline() + pipeline._graph_runtime_state = GraphRuntimeState( + variable_pool=VariablePool(system_variables=build_system_variables(workflow_execution_id="run-id")), + start_at=0.0, + total_tokens=5, + node_run_steps=2, + ) def _gen(): - yield WorkflowPauseStreamResponse( + yield HumanInputRequiredResponse( task_id="task", - workflow_run_id="run", - data=WorkflowPauseStreamResponse.Data( - workflow_run_id="run", - status=WorkflowExecutionStatus.PAUSED, - outputs={}, - created_at=1, - elapsed_time=0.1, - total_tokens=0, - total_steps=0, + workflow_run_id="run-id", + data=HumanInputRequiredResponse.Data( + form_id="form-1", + node_id="node-1", + node_title="Human Input", + form_content="content", + expiration_time=1, ), ) response = pipeline._to_blocking_response(_gen()) + assert isinstance(response, WorkflowAppPausedBlockingResponse) + assert response.workflow_run_id == "run-id" assert response.data.status == WorkflowExecutionStatus.PAUSED + assert response.data.created_at == 0 + assert response.data.paused_nodes == ["node-1"] + assert response.data.reasons == [ + { + "TYPE": "human_input_required", + "form_id": "form-1", + "node_id": "node-1", + "node_title": "Human Input", + "form_content": "content", + "inputs": [], + "actions": [], + "display_in_ui": False, + "form_token": None, + "resolved_default_values": {}, + "expiration_time": 1, + } + ] def test_to_blocking_response_handles_finish(self): pipeline = _make_pipeline() diff --git a/api/tests/unit_tests/core/datasource/test_notion_provider.py b/api/tests/unit_tests/core/datasource/test_notion_provider.py index e4bd7d3bdf..d21b9e471b 100644 --- a/api/tests/unit_tests/core/datasource/test_notion_provider.py +++ b/api/tests/unit_tests/core/datasource/test_notion_provider.py @@ -775,9 +775,6 @@ class TestNotionExtractorLastEditedTime: "last_edited_time": "2024-11-27T18:00:00.000Z", } mock_request.return_value = mock_response - mock_query = Mock() - mock_db.session.query.return_value = mock_query - mock_query.filter_by.return_value = mock_query # Act extractor_page.update_last_edited_time(mock_document_model) @@ -863,9 +860,6 @@ class TestNotionExtractorIntegration: } mock_request.side_effect = [last_edited_response, block_response] - mock_query = Mock() - mock_db.session.query.return_value = mock_query - mock_query.filter_by.return_value = mock_query # Act documents = extractor.extract() @@ -919,10 +913,6 @@ class TestNotionExtractorIntegration: } mock_post.return_value = database_response - mock_query = Mock() - mock_db.session.query.return_value = mock_query - mock_query.filter_by.return_value = mock_query - # Act documents = extractor.extract() diff --git a/api/tests/unit_tests/core/helper/test_creators.py b/api/tests/unit_tests/core/helper/test_creators.py new file mode 100644 index 0000000000..df67d3f513 --- /dev/null +++ b/api/tests/unit_tests/core/helper/test_creators.py @@ -0,0 +1,106 @@ +"""Tests for the Creators Platform helper module.""" + +from unittest.mock import MagicMock, patch + +import httpx +import pytest +from yarl import URL + + +@pytest.fixture(autouse=True) +def _patch_creators_url(monkeypatch): + """Patch the module-level creators_platform_api_url for all tests.""" + monkeypatch.setattr( + "core.helper.creators.creators_platform_api_url", + URL("https://creators.example.com"), + ) + + +class TestUploadDSL: + @patch("core.helper.creators.httpx.post") + def test_returns_claim_code(self, mock_post): + mock_response = MagicMock(spec=httpx.Response) + mock_response.json.return_value = {"data": {"claim_code": "abc123"}} + mock_response.raise_for_status = MagicMock() + mock_post.return_value = mock_response + + from core.helper.creators import upload_dsl + + result = upload_dsl(b"app: demo", "demo.yaml") + + assert result == "abc123" + mock_post.assert_called_once() + call_kwargs = mock_post.call_args + assert "anonymous-upload" in call_kwargs.args[0] + assert call_kwargs.kwargs["timeout"] == 30 + + @patch("core.helper.creators.httpx.post") + def test_raises_on_missing_claim_code(self, mock_post): + mock_response = MagicMock(spec=httpx.Response) + mock_response.json.return_value = {"data": {}} + mock_response.raise_for_status = MagicMock() + mock_post.return_value = mock_response + + from core.helper.creators import upload_dsl + + with pytest.raises(ValueError, match="claim_code"): + upload_dsl(b"app: demo") + + @patch("core.helper.creators.httpx.post") + def test_raises_on_http_error(self, mock_post): + mock_response = MagicMock(spec=httpx.Response) + mock_response.raise_for_status.side_effect = httpx.HTTPStatusError( + "Server Error", + request=MagicMock(), + response=MagicMock(), + ) + mock_post.return_value = mock_response + + from core.helper.creators import upload_dsl + + with pytest.raises(httpx.HTTPStatusError): + upload_dsl(b"app: demo") + + +class TestGetRedirectUrl: + @patch("core.helper.creators.dify_config") + def test_without_oauth_client_id(self, mock_config): + mock_config.CREATORS_PLATFORM_API_URL = "https://creators.example.com" + mock_config.CREATORS_PLATFORM_OAUTH_CLIENT_ID = "" + + from core.helper.creators import get_redirect_url + + url = get_redirect_url("user-1", "claim-abc") + + assert "dsl_claim_code=claim-abc" in url + assert "oauth_code" not in url + assert url.startswith("https://creators.example.com") + + @patch("core.helper.creators.dify_config") + def test_with_oauth_client_id(self, mock_config): + mock_config.CREATORS_PLATFORM_API_URL = "https://creators.example.com" + mock_config.CREATORS_PLATFORM_OAUTH_CLIENT_ID = "client-xyz" + + with patch( + "services.oauth_server.OAuthServerService.sign_oauth_authorization_code", + return_value="oauth-code-123", + ) as mock_sign: + from core.helper.creators import get_redirect_url + + url = get_redirect_url("user-1", "claim-abc") + + mock_sign.assert_called_once_with("client-xyz", "user-1") + assert "dsl_claim_code=claim-abc" in url + assert "oauth_code=oauth-code-123" in url + + @patch("core.helper.creators.dify_config") + def test_strips_trailing_slash(self, mock_config): + mock_config.CREATORS_PLATFORM_API_URL = "https://creators.example.com/" + mock_config.CREATORS_PLATFORM_OAUTH_CLIENT_ID = "" + + from core.helper.creators import get_redirect_url + + url = get_redirect_url("user-1", "claim-abc") + + assert url.startswith("https://creators.example.com?") + assert "creators.example.com/?" not in url diff --git a/api/tests/unit_tests/core/helper/test_encrypter.py b/api/tests/unit_tests/core/helper/test_encrypter.py index f3ef7fccd0..73e081a570 100644 --- a/api/tests/unit_tests/core/helper/test_encrypter.py +++ b/api/tests/unit_tests/core/helper/test_encrypter.py @@ -40,11 +40,11 @@ class TestObfuscatedToken: class TestEncryptToken: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_successful_encryption(self, mock_encrypt, mock_query): + def test_successful_encryption(self, mock_encrypt, mock_get): """Test successful token encryption""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_data" result = encrypt_token("tenant-123", "test_token") @@ -53,9 +53,9 @@ class TestEncryptToken: mock_encrypt.assert_called_with("test_token", "mock_public_key") @patch("extensions.ext_database.db.session.get") - def test_tenant_not_found(self, mock_query): + def test_tenant_not_found(self, mock_get): """Test error when tenant doesn't exist""" - mock_query.return_value = None + mock_get.return_value = None with pytest.raises(ValueError) as exc_info: encrypt_token("invalid-tenant", "test_token") @@ -122,12 +122,12 @@ class TestEncryptDecryptIntegration: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") @patch("libs.rsa.decrypt") - def test_should_encrypt_and_decrypt_consistently(self, mock_decrypt, mock_encrypt, mock_query): + def test_should_encrypt_and_decrypt_consistently(self, mock_decrypt, mock_encrypt, mock_get): """Test that encryption and decryption are consistent""" # Setup mock tenant mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant # Setup mock encryption/decryption original_token = "test_token_123" @@ -148,12 +148,12 @@ class TestSecurity: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_cross_tenant_isolation(self, mock_encrypt, mock_query): + def test_cross_tenant_isolation(self, mock_encrypt, mock_get): """Ensure tokens encrypted for one tenant cannot be used by another""" # Setup mock tenant mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "tenant1_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_for_tenant1" # Encrypt token for tenant1 @@ -183,10 +183,10 @@ class TestSecurity: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_encryption_randomness(self, mock_encrypt, mock_query): + def test_encryption_randomness(self, mock_encrypt, mock_get): """Ensure same plaintext produces different ciphertext""" mock_tenant = MagicMock(encrypt_public_key="key") - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant # Different outputs for same input mock_encrypt.side_effect = [b"enc1", b"enc2", b"enc3"] @@ -207,11 +207,11 @@ class TestEdgeCases: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_should_handle_empty_token_encryption(self, mock_encrypt, mock_query): + def test_should_handle_empty_token_encryption(self, mock_encrypt, mock_get): """Test encryption of empty token""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_empty" result = encrypt_token("tenant-123", "") @@ -221,11 +221,11 @@ class TestEdgeCases: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_should_handle_special_characters_in_token(self, mock_encrypt, mock_query): + def test_should_handle_special_characters_in_token(self, mock_encrypt, mock_get): """Test tokens containing special/unicode characters""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant mock_encrypt.return_value = b"encrypted_special" # Test various special characters @@ -244,11 +244,11 @@ class TestEdgeCases: @patch("extensions.ext_database.db.session.get") @patch("libs.rsa.encrypt") - def test_should_handle_rsa_size_limits(self, mock_encrypt, mock_query): + def test_should_handle_rsa_size_limits(self, mock_encrypt, mock_get): """Test behavior when token exceeds RSA encryption limits""" mock_tenant = MagicMock() mock_tenant.encrypt_public_key = "mock_public_key" - mock_query.return_value = mock_tenant + mock_get.return_value = mock_tenant # RSA 2048-bit can only encrypt ~245 bytes # The actual limit depends on padding scheme diff --git a/api/tests/unit_tests/core/llm_generator/test_llm_generator.py b/api/tests/unit_tests/core/llm_generator/test_llm_generator.py index 2716f4712c..c4e610d5b0 100644 --- a/api/tests/unit_tests/core/llm_generator/test_llm_generator.py +++ b/api/tests/unit_tests/core/llm_generator/test_llm_generator.py @@ -7,6 +7,7 @@ from core.app.app_config.entities import ModelConfig from core.llm_generator.entities import RuleCodeGeneratePayload, RuleGeneratePayload, RuleStructuredOutputPayload from core.llm_generator.llm_generator import LLMGenerator from graphon.model_runtime.entities.llm_entities import LLMMode, LLMResult +from graphon.model_runtime.entities.model_entities import ModelType from graphon.model_runtime.errors.invoke import InvokeAuthorizationError, InvokeError @@ -96,6 +97,10 @@ class TestLLMGenerator: questions = LLMGenerator.generate_suggested_questions_after_answer("tenant_id", "histories") assert len(questions) == 2 assert questions[0] == "Question 1?" + assert mock_model_instance.invoke_llm.call_args.kwargs["model_parameters"] == { + "max_tokens": 2560, + "temperature": 0.0, + } def test_generate_suggested_questions_after_answer_auth_error(self, mock_model_instance): with patch("core.llm_generator.llm_generator.ModelManager.for_tenant") as mock_manager: @@ -113,6 +118,97 @@ class TestLLMGenerator: questions = LLMGenerator.generate_suggested_questions_after_answer("tenant_id", "histories") assert questions == [] + @patch("core.llm_generator.llm_generator.ModelManager.for_tenant") + def test_generate_suggested_questions_after_answer_with_custom_model_and_prompt(self, mock_for_tenant): + custom_model_instance = MagicMock() + custom_response = MagicMock() + custom_response.message.get_text_content.return_value = '["Question 1?"]' + custom_model_instance.invoke_llm.return_value = custom_response + + mock_for_tenant.return_value.get_model_instance.return_value = custom_model_instance + + questions = LLMGenerator.generate_suggested_questions_after_answer( + "tenant_id", + "histories", + instruction_prompt="custom prompt", + model_config={ + "provider": "openai", + "name": "gpt-4o", + "completion_params": {"temperature": 0.2}, + }, + ) + + assert questions == ["Question 1?"] + mock_for_tenant.return_value.get_model_instance.assert_called_once_with( + tenant_id="tenant_id", + model_type=ModelType.LLM, + provider="openai", + model="gpt-4o", + ) + + invoke_kwargs = custom_model_instance.invoke_llm.call_args.kwargs + assert invoke_kwargs["model_parameters"] == {"temperature": 0.2} + assert invoke_kwargs["stop"] == [] + assert "custom prompt" in invoke_kwargs["prompt_messages"][0].content + + @patch("core.llm_generator.llm_generator.ModelManager.for_tenant") + def test_generate_suggested_questions_after_answer_fallback_to_default_model(self, mock_for_tenant): + default_model_instance = MagicMock() + default_response = MagicMock() + default_response.message.get_text_content.return_value = '["Question 1?"]' + default_model_instance.invoke_llm.return_value = default_response + + mock_for_tenant.return_value.get_model_instance.side_effect = ValueError("invalid configured model") + mock_for_tenant.return_value.get_default_model_instance.return_value = default_model_instance + + questions = LLMGenerator.generate_suggested_questions_after_answer( + "tenant_id", + "histories", + model_config={ + "provider": "openai", + "name": "not-found-model", + "completion_params": {"temperature": 0.2}, + }, + ) + + assert questions == ["Question 1?"] + mock_for_tenant.return_value.get_default_model_instance.assert_called_once_with( + tenant_id="tenant_id", + model_type=ModelType.LLM, + ) + assert default_model_instance.invoke_llm.call_args.kwargs["model_parameters"] == { + "max_tokens": 2560, + "temperature": 0.0, + } + assert default_model_instance.invoke_llm.call_args.kwargs["stop"] == [] + + @patch("core.llm_generator.llm_generator.ModelManager.for_tenant") + def test_generate_suggested_questions_after_answer_drops_non_positive_max_tokens(self, mock_for_tenant): + custom_model_instance = MagicMock() + custom_response = MagicMock() + custom_response.message.get_text_content.return_value = '["Question 1?"]' + custom_model_instance.invoke_llm.return_value = custom_response + mock_for_tenant.return_value.get_model_instance.return_value = custom_model_instance + + questions = LLMGenerator.generate_suggested_questions_after_answer( + "tenant_id", + "histories", + model_config={ + "provider": "openai", + "name": "gpt-4o", + "completion_params": { + "temperature": 0.2, + "max_tokens": 0, + "stop": ["END"], + }, + }, + ) + + assert questions == ["Question 1?"] + invoke_kwargs = custom_model_instance.invoke_llm.call_args.kwargs + assert invoke_kwargs["model_parameters"] == {"temperature": 0.2} + assert invoke_kwargs["stop"] == ["END"] + def test_generate_rule_config_no_variable_success(self, mock_model_instance, model_config_entity): payload = RuleGeneratePayload( instruction="test instruction", model_config=model_config_entity, no_variable=True @@ -395,7 +491,7 @@ class TestLLMGenerator: def test_instruction_modify_workflow_no_last_run_fallback(self, mock_model_instance, model_config_entity): with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() workflow.graph_dict = {"graph": {"nodes": [{"id": "node_id", "data": {"type": "code"}}]}} @@ -421,7 +517,7 @@ class TestLLMGenerator: def test_instruction_modify_workflow_node_type_fallback(self, mock_model_instance, model_config_entity): with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() # Cause exception in node_type logic workflow.graph_dict = {"graph": {"nodes": []}} @@ -448,7 +544,7 @@ class TestLLMGenerator: def test_instruction_modify_workflow_empty_agent_log(self, mock_model_instance, model_config_entity): with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() workflow.graph_dict = {"graph": {"nodes": [{"id": "node_id", "data": {"type": "llm"}}]}} @@ -536,7 +632,7 @@ class TestLLMGenerator: instance.invoke_llm.return_value = mock_response with patch("extensions.ext_database.db.session") as mock_session: - mock_session.return_value.query.return_value.where.return_value.first.return_value = MagicMock() + mock_session.return_value.scalar.return_value = MagicMock() workflow = MagicMock() workflow.graph_dict = {"graph": {"nodes": [{"id": "node_id", "data": {"type": "other"}}]}} diff --git a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py index 136ac0c72a..1e91c2dd88 100644 --- a/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py +++ b/api/tests/unit_tests/core/rag/datasource/keyword/jieba/test_jieba.py @@ -29,15 +29,6 @@ class _Field: return ("in", self._name, tuple(values)) -class _FakeQuery: - def __init__(self): - self.where_calls: list[tuple] = [] - - def where(self, *conditions): - self.where_calls.append(conditions) - return self - - class _FakeExecuteResult: def __init__(self, segments: list[SimpleNamespace]): self._segments = segments diff --git a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py index 0baf85c314..b0ecad4d0c 100644 --- a/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py +++ b/api/tests/unit_tests/core/rag/datasource/test_datasource_retrieval.py @@ -109,17 +109,6 @@ class _FakeExecuteResult: return _FakeExecuteScalarResult(self._data) -class _FakeSummaryQuery: - def __init__(self, summaries: list) -> None: - self._summaries = summaries - - def filter(self, *args, **kwargs): - return self - - def all(self) -> list: - return self._summaries - - class _FakeScalarsResult: def __init__(self, data: list) -> None: self._data = data diff --git a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py index dc21d378a2..9de04c80ba 100644 --- a/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py +++ b/api/tests/unit_tests/core/rag/datasource/vdb/test_vector_factory.py @@ -372,19 +372,11 @@ def test_vector_delegation_methods(vector_factory_module): def test_search_by_file_handles_missing_and_existing_upload(vector_factory_module, monkeypatch): - class _Field: - def __eq__(self, value): - return value - - upload_query = MagicMock() - upload_query.where.return_value = upload_query - vector = vector_factory_module.Vector.__new__(vector_factory_module.Vector) vector._embeddings = MagicMock() vector._vector_processor = MagicMock() mock_session = SimpleNamespace(get=lambda _model, _id: None) - monkeypatch.setattr(vector_factory_module, "UploadFile", SimpleNamespace(id=_Field())) monkeypatch.setattr(vector_factory_module, "db", SimpleNamespace(session=mock_session)) assert vector.search_by_file("file-1") == [] diff --git a/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py b/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py index 7c4defc180..b4bb343533 100644 --- a/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py +++ b/api/tests/unit_tests/core/rag/indexing/test_indexing_runner.py @@ -1484,11 +1484,8 @@ class TestIndexingRunnerProcessChunk: mock_dependencies["redis"].get.return_value = None - # Mock database query for segment updates - mock_query = MagicMock() - mock_dependencies["db"].session.query.return_value = mock_query - mock_query.where.return_value = mock_query - mock_query.update.return_value = None + # Mock database update for segment status + mock_dependencies["db"].session.execute.return_value = None # Create a proper context manager mock mock_context = MagicMock() diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py index 89830f7517..fd607210f1 100644 --- a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval.py @@ -2417,12 +2417,11 @@ class TestDatasetRetrievalKnowledgeRetrieval: mock_document.data_source_type = "upload_file" mock_document.doc_metadata = {} - mock_session.query.return_value.filter.return_value.all.return_value = [ - mock_dataset_from_db - ] - mock_session.query.return_value.filter.return_value.all.__iter__ = lambda self: iter( - [mock_dataset_from_db, mock_document] - ) + mock_datasets = MagicMock() + mock_datasets.all.return_value = [mock_dataset_from_db] + mock_documents = MagicMock() + mock_documents.all.return_value = [mock_document] + mock_session.scalars.side_effect = [mock_datasets, mock_documents] # Act result = dataset_retrieval.knowledge_retrieval(request) diff --git a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py index 90feb4cf01..aace419d15 100644 --- a/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py +++ b/api/tests/unit_tests/core/rag/retrieval/test_dataset_retrieval_methods.py @@ -451,12 +451,11 @@ class TestDatasetRetrievalKnowledgeRetrieval: mock_document.data_source_type = "upload_file" mock_document.doc_metadata = {} - mock_session.query.return_value.filter.return_value.all.return_value = [ - mock_dataset_from_db - ] - mock_session.query.return_value.filter.return_value.all.__iter__ = lambda self: iter( - [mock_dataset_from_db, mock_document] - ) + mock_datasets = MagicMock() + mock_datasets.all.return_value = [mock_dataset_from_db] + mock_documents = MagicMock() + mock_documents.all.return_value = [mock_document] + mock_session.scalars.side_effect = [mock_datasets, mock_documents] # Act result = dataset_retrieval.knowledge_retrieval(request) diff --git a/api/tests/unit_tests/core/test_model_manager.py b/api/tests/unit_tests/core/test_model_manager.py index afea9144c0..5a7e7e30a5 100644 --- a/api/tests/unit_tests/core/test_model_manager.py +++ b/api/tests/unit_tests/core/test_model_manager.py @@ -5,7 +5,7 @@ import redis from pytest_mock import MockerFixture from core.entities.provider_entities import ModelLoadBalancingConfiguration -from core.model_manager import LBModelManager +from core.model_manager import LBModelManager, ModelManager from extensions.ext_redis import redis_client from graphon.model_runtime.entities.model_entities import ModelType @@ -40,6 +40,29 @@ def lb_model_manager(): return lb_model_manager +def test_model_manager_with_cache_enabled_reuses_stored_credentials(): + """With ``enable_credentials_cache=True``, later calls for the same key return cached creds.""" + provider_manager = MagicMock() + bundle = MagicMock() + bundle.configuration.provider.provider = "openai" + bundle.configuration.tenant_id = "tenant-1" + bundle.configuration.model_settings = [] + bundle.model_type_instance.model_type = ModelType.LLM + get_creds = MagicMock(return_value={"api_key": "first"}) + bundle.configuration.get_current_credentials = get_creds + provider_manager.get_provider_model_bundle.return_value = bundle + + manager = ModelManager(provider_manager, enable_credentials_cache=True) + first = manager.get_model_instance("tenant-1", "openai", ModelType.LLM, "gpt-4") + assert first.credentials == {"api_key": "first"} + get_creds.assert_called_once() + + get_creds.return_value = {"api_key": "second"} + second = manager.get_model_instance("tenant-1", "openai", ModelType.LLM, "gpt-4") + assert second.credentials == {"api_key": "first"} + get_creds.assert_called_once() + + def test_lb_model_manager_fetch_next(mocker: MockerFixture, lb_model_manager: LBModelManager): # initialize redis client redis_client.initialize(redis.Redis()) diff --git a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py index 5691f33e65..6bb86ebe78 100644 --- a/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py +++ b/api/tests/unit_tests/core/tools/utils/test_system_oauth_encryption.py @@ -2,50 +2,50 @@ from __future__ import annotations import pytest -from core.tools.utils import system_oauth_encryption as oauth_encryption -from core.tools.utils.system_oauth_encryption import OAuthEncryptionError, SystemOAuthEncrypter +from core.tools.utils import system_encryption as encryption +from core.tools.utils.system_encryption import EncryptionError, SystemEncrypter -def test_system_oauth_encrypter_roundtrip(): - encrypter = SystemOAuthEncrypter(secret_key="test-secret") +def test_system_encrypter_roundtrip(): + encrypter = SystemEncrypter(secret_key="test-secret") payload = {"client_id": "cid", "client_secret": "csecret", "grant_type": "authorization_code"} - encrypted = encrypter.encrypt_oauth_params(payload) - decrypted = encrypter.decrypt_oauth_params(encrypted) + encrypted = encrypter.encrypt_params(payload) + decrypted = encrypter.decrypt_params(encrypted) assert encrypted assert dict(decrypted) == payload -def test_system_oauth_encrypter_decrypt_validates_input(): - encrypter = SystemOAuthEncrypter(secret_key="test-secret") +def test_system_encrypter_decrypt_validates_input(): + encrypter = SystemEncrypter(secret_key="test-secret") with pytest.raises(ValueError, match="must be a string"): - encrypter.decrypt_oauth_params(123) # type: ignore[arg-type] + encrypter.decrypt_params(123) # type: ignore[arg-type] with pytest.raises(ValueError, match="cannot be empty"): - encrypter.decrypt_oauth_params("") + encrypter.decrypt_params("") -def test_system_oauth_encrypter_raises_oauth_error_for_invalid_ciphertext(): - encrypter = SystemOAuthEncrypter(secret_key="test-secret") +def test_system_encrypter_raises_error_for_invalid_ciphertext(): + encrypter = SystemEncrypter(secret_key="test-secret") - with pytest.raises(OAuthEncryptionError, match="Decryption failed"): - encrypter.decrypt_oauth_params("not-base64") + with pytest.raises(EncryptionError, match="Decryption failed"): + encrypter.decrypt_params("not-base64") -def test_system_oauth_helpers_use_global_cached_instance(monkeypatch): - monkeypatch.setattr(oauth_encryption, "_oauth_encrypter", None) - monkeypatch.setattr("core.tools.utils.system_oauth_encryption.dify_config.SECRET_KEY", "global-secret") +def test_system_helpers_use_global_cached_instance(monkeypatch): + monkeypatch.setattr(encryption, "_encrypter", None) + monkeypatch.setattr("core.tools.utils.system_encryption.dify_config.SECRET_KEY", "global-secret") - first = oauth_encryption.get_system_oauth_encrypter() - second = oauth_encryption.get_system_oauth_encrypter() + first = encryption.get_system_encrypter() + second = encryption.get_system_encrypter() assert first is second - encrypted = oauth_encryption.encrypt_system_oauth_params({"k": "v"}) - assert oauth_encryption.decrypt_system_oauth_params(encrypted) == {"k": "v"} + encrypted = encryption.encrypt_system_params({"k": "v"}) + assert encryption.decrypt_system_params(encrypted) == {"k": "v"} -def test_create_system_oauth_encrypter_factory(): - encrypter = oauth_encryption.create_system_oauth_encrypter(secret_key="factory-secret") - assert isinstance(encrypter, SystemOAuthEncrypter) +def test_create_system_encrypter_factory(): + encrypter = encryption.create_system_encrypter(secret_key="factory-secret") + assert isinstance(encrypter, SystemEncrypter) diff --git a/api/tests/unit_tests/core/workflow/test_human_input_forms.py b/api/tests/unit_tests/core/workflow/test_human_input_forms.py index 6071a95a57..e508815b35 100644 --- a/api/tests/unit_tests/core/workflow/test_human_input_forms.py +++ b/api/tests/unit_tests/core/workflow/test_human_input_forms.py @@ -1,6 +1,7 @@ from types import SimpleNamespace -from core.workflow.human_input_forms import load_form_tokens_by_form_id +from core.workflow.human_input_forms import _load_form_tokens_by_form_id, load_form_tokens_by_form_id +from core.workflow.human_input_policy import HumanInputSurface from models.human_input import RecipientType @@ -53,3 +54,50 @@ def test_load_form_tokens_by_form_id_ignores_unsupported_recipients() -> None: ) assert load_form_tokens_by_form_id(["form-1"], session=session) == {} + + +def test_load_form_tokens_by_form_id_uses_shared_priority() -> None: + session = _FakeSession( + recipients=[ + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + access_token="web-token", + ), + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.CONSOLE, + access_token="console-token", + ), + ] + ) + + assert _load_form_tokens_by_form_id(session, ["form-1"]) == {"form-1": "console-token"} + + +def test_load_form_tokens_by_form_id_uses_web_token_for_service_api_surface() -> None: + session = _FakeSession( + recipients=[ + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.STANDALONE_WEB_APP, + access_token="web-token", + ), + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.CONSOLE, + access_token="console-token", + ), + SimpleNamespace( + form_id="form-1", + recipient_type=RecipientType.BACKSTAGE, + access_token="backstage-token", + ), + ] + ) + + assert load_form_tokens_by_form_id( + ["form-1"], + session=session, + surface=HumanInputSurface.SERVICE_API, + ) == {"form-1": "web-token"} diff --git a/api/tests/unit_tests/core/workflow/test_human_input_policy.py b/api/tests/unit_tests/core/workflow/test_human_input_policy.py new file mode 100644 index 0000000000..e6d0366af5 --- /dev/null +++ b/api/tests/unit_tests/core/workflow/test_human_input_policy.py @@ -0,0 +1,50 @@ +from core.workflow.human_input_policy import ( + HumanInputSurface, + get_preferred_form_token, + is_recipient_type_allowed_for_surface, +) +from models.human_input import RecipientType + + +def test_service_api_only_allows_public_webapp_forms() -> None: + assert is_recipient_type_allowed_for_surface( + RecipientType.STANDALONE_WEB_APP, + HumanInputSurface.SERVICE_API, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.CONSOLE, + HumanInputSurface.SERVICE_API, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.BACKSTAGE, + HumanInputSurface.SERVICE_API, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.EMAIL_MEMBER, + HumanInputSurface.SERVICE_API, + ) + + +def test_console_only_allows_internal_console_surfaces() -> None: + assert is_recipient_type_allowed_for_surface( + RecipientType.CONSOLE, + HumanInputSurface.CONSOLE, + ) + assert is_recipient_type_allowed_for_surface( + RecipientType.BACKSTAGE, + HumanInputSurface.CONSOLE, + ) + assert not is_recipient_type_allowed_for_surface( + RecipientType.STANDALONE_WEB_APP, + HumanInputSurface.CONSOLE, + ) + + +def test_preferred_form_token_uses_shared_priority_order() -> None: + recipients = [ + (RecipientType.STANDALONE_WEB_APP, "web-token"), + (RecipientType.CONSOLE, "console-token"), + (RecipientType.BACKSTAGE, "backstage-token"), + ] + + assert get_preferred_form_token(recipients) == "backstage-token" diff --git a/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py b/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py new file mode 100644 index 0000000000..ac4b087b91 --- /dev/null +++ b/api/tests/unit_tests/repositories/test_sqlalchemy_api_workflow_run_repository.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +from datetime import UTC, datetime +from types import SimpleNamespace + +from graphon.nodes.human_input.entities import FormDefinition, FormInput, UserAction +from graphon.nodes.human_input.enums import FormInputType +from models.human_input import RecipientType +from repositories.sqlalchemy_api_workflow_run_repository import _build_human_input_required_reason + + +def _build_form_model() -> SimpleNamespace: + expiration_time = datetime(2024, 1, 1, tzinfo=UTC) + definition = FormDefinition( + form_content="content", + inputs=[FormInput(type=FormInputType.TEXT_INPUT, output_variable_name="name")], + user_actions=[UserAction(id="approve", title="Approve")], + rendered_content="rendered", + expiration_time=expiration_time, + default_values={"name": "Alice"}, + node_title="Ask Name", + display_in_ui=True, + ) + return SimpleNamespace( + id="form-1", + node_id="node-1", + form_definition=definition.model_dump_json(), + expiration_time=expiration_time, + ) + + +def _build_reason_model() -> SimpleNamespace: + return SimpleNamespace(form_id="form-1", node_id="node-1") + + +def test_build_human_input_required_reason_prefers_standalone_web_app_token() -> None: + reason = _build_human_input_required_reason( + _build_reason_model(), + _build_form_model(), + [ + SimpleNamespace(recipient_type=RecipientType.BACKSTAGE, access_token="btok"), + SimpleNamespace(recipient_type=RecipientType.CONSOLE, access_token="ctok"), + SimpleNamespace(recipient_type=RecipientType.STANDALONE_WEB_APP, access_token="wtok"), + ], + ) + + assert reason.node_title == "Ask Name" + assert reason.resolved_default_values == {"name": "Alice"} + assert not hasattr(reason, "form_token") + + +def test_build_human_input_required_reason_falls_back_to_console_token() -> None: + reason = _build_human_input_required_reason( + _build_reason_model(), + _build_form_model(), + [ + SimpleNamespace(recipient_type=RecipientType.BACKSTAGE, access_token="btok"), + SimpleNamespace(recipient_type=RecipientType.CONSOLE, access_token="ctok"), + ], + ) + + assert reason.node_id == "node-1" + assert reason.actions[0].id == "approve" + assert not hasattr(reason, "form_token") diff --git a/api/tests/unit_tests/services/document_indexing_task_proxy.py b/api/tests/unit_tests/services/document_indexing_task_proxy.py deleted file mode 100644 index ff243b8dc3..0000000000 --- a/api/tests/unit_tests/services/document_indexing_task_proxy.py +++ /dev/null @@ -1,1291 +0,0 @@ -""" -Comprehensive unit tests for DocumentIndexingTaskProxy service. - -This module contains extensive unit tests for the DocumentIndexingTaskProxy class, -which is responsible for routing document indexing tasks to appropriate Celery queues -based on tenant billing configuration and managing tenant-isolated task queues. - -The DocumentIndexingTaskProxy handles: -- Task scheduling and queuing (direct vs tenant-isolated queues) -- Priority vs normal task routing based on billing plans -- Tenant isolation using TenantIsolatedTaskQueue -- Batch indexing operations with multiple document IDs -- Error handling and retry logic through queue management - -This test suite ensures: -- Correct task routing based on billing configuration -- Proper tenant isolation queue management -- Accurate batch operation handling -- Comprehensive error condition coverage -- Edge cases are properly handled - -================================================================================ -ARCHITECTURE OVERVIEW -================================================================================ - -The DocumentIndexingTaskProxy is a critical component in the document indexing -workflow. It acts as a proxy/router that determines which Celery queue to use -for document indexing tasks based on tenant billing configuration. - -1. Task Queue Routing: - - Direct Queue: Bypasses tenant isolation, used for self-hosted/enterprise - - Tenant Queue: Uses tenant isolation, queues tasks when another task is running - - Default Queue: Normal priority with tenant isolation (SANDBOX plan) - - Priority Queue: High priority with tenant isolation (TEAM/PRO plans) - - Priority Direct Queue: High priority without tenant isolation (billing disabled) - -2. Tenant Isolation: - - Uses TenantIsolatedTaskQueue to ensure only one indexing task runs per tenant - - When a task is running, new tasks are queued in Redis - - When a task completes, it pulls the next task from the queue - - Prevents resource contention and ensures fair task distribution - -3. Billing Configuration: - - SANDBOX plan: Uses default tenant queue (normal priority, tenant isolated) - - TEAM/PRO plans: Uses priority tenant queue (high priority, tenant isolated) - - Billing disabled: Uses priority direct queue (high priority, no isolation) - -4. Batch Operations: - - Supports indexing multiple documents in a single task - - DocumentTask entity serializes task information - - Tasks are queued with all document IDs for batch processing - -================================================================================ -TESTING STRATEGY -================================================================================ - -This test suite follows a comprehensive testing strategy that covers: - -1. Initialization and Configuration: - - Proxy initialization with various parameters - - TenantIsolatedTaskQueue initialization - - Features property caching - - Edge cases (empty document_ids, single document, large batches) - -2. Task Queue Routing: - - Direct queue routing (bypasses tenant isolation) - - Tenant queue routing with existing task key (pushes to waiting queue) - - Tenant queue routing without task key (sets flag and executes immediately) - - DocumentTask serialization and deserialization - - Task function delay() call with correct parameters - -3. Queue Type Selection: - - Default tenant queue routing (normal_document_indexing_task) - - Priority tenant queue routing (priority_document_indexing_task with isolation) - - Priority direct queue routing (priority_document_indexing_task without isolation) - -4. Dispatch Logic: - - Billing enabled + SANDBOX plan → default tenant queue - - Billing enabled + non-SANDBOX plan (TEAM, PRO, etc.) → priority tenant queue - - Billing disabled (self-hosted/enterprise) → priority direct queue - - All CloudPlan enum values handling - - Edge cases: None plan, empty plan string - -5. Tenant Isolation and Queue Management: - - Task key existence checking (get_task_key) - - Task waiting time setting (set_task_waiting_time) - - Task pushing to queue (push_tasks) - - Queue state transitions (idle → active → idle) - - Multiple concurrent task handling - -6. Batch Operations: - - Single document indexing - - Multiple document batch indexing - - Large batch handling - - Empty batch handling (edge case) - -7. Error Handling and Retry Logic: - - Task function delay() failure handling - - Queue operation failures (Redis errors) - - Feature service failures - - Invalid task data handling - - Retry mechanism through queue pull operations - -8. Integration Points: - - FeatureService integration (billing features, subscription plans) - - TenantIsolatedTaskQueue integration (Redis operations) - - Celery task integration (normal_document_indexing_task, priority_document_indexing_task) - - DocumentTask entity serialization - -================================================================================ -""" - -from unittest.mock import Mock, patch - -import pytest - -from core.entities.document_task import DocumentTask -from core.rag.pipeline.queue import TenantIsolatedTaskQueue -from enums.cloud_plan import CloudPlan -from services.document_indexing_proxy.document_indexing_task_proxy import DocumentIndexingTaskProxy - -# ============================================================================ -# Test Data Factory -# ============================================================================ - - -class DocumentIndexingTaskProxyTestDataFactory: - """ - Factory class for creating test data and mock objects for DocumentIndexingTaskProxy tests. - - This factory provides static methods to create mock objects for: - - FeatureService features with billing configuration - - TenantIsolatedTaskQueue mocks with various states - - DocumentIndexingTaskProxy instances with different configurations - - DocumentTask entities for testing serialization - - The factory methods help maintain consistency across tests and reduce - code duplication when setting up test scenarios. - """ - - @staticmethod - def create_mock_features(billing_enabled: bool = False, plan: CloudPlan = CloudPlan.SANDBOX) -> Mock: - """ - Create mock features with billing configuration. - - This method creates a mock FeatureService features object with - billing configuration that can be used to test different billing - scenarios in the DocumentIndexingTaskProxy. - - Args: - billing_enabled: Whether billing is enabled for the tenant - plan: The CloudPlan enum value for the subscription plan - - Returns: - Mock object configured as FeatureService features with billing info - """ - features = Mock() - - features.billing = Mock() - - features.billing.enabled = billing_enabled - - features.billing.subscription = Mock() - - features.billing.subscription.plan = plan - - return features - - @staticmethod - def create_mock_tenant_queue(has_task_key: bool = False) -> Mock: - """ - Create mock TenantIsolatedTaskQueue. - - This method creates a mock TenantIsolatedTaskQueue that can simulate - different queue states for testing tenant isolation logic. - - Args: - has_task_key: Whether the queue has an active task key (task running) - - Returns: - Mock object configured as TenantIsolatedTaskQueue - """ - queue = Mock(spec=TenantIsolatedTaskQueue) - - queue.get_task_key.return_value = "task_key" if has_task_key else None - - queue.push_tasks = Mock() - - queue.set_task_waiting_time = Mock() - - queue.delete_task_key = Mock() - - return queue - - @staticmethod - def create_document_task_proxy( - tenant_id: str = "tenant-123", dataset_id: str = "dataset-456", document_ids: list[str] | None = None - ) -> DocumentIndexingTaskProxy: - """ - Create DocumentIndexingTaskProxy instance for testing. - - This method creates a DocumentIndexingTaskProxy instance with default - or specified parameters for use in test cases. - - Args: - tenant_id: Tenant identifier for the proxy - dataset_id: Dataset identifier for the proxy - document_ids: List of document IDs to index (defaults to 3 documents) - - Returns: - DocumentIndexingTaskProxy instance configured for testing - """ - if document_ids is None: - document_ids = ["doc-1", "doc-2", "doc-3"] - - return DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - @staticmethod - def create_document_task( - tenant_id: str = "tenant-123", dataset_id: str = "dataset-456", document_ids: list[str] | None = None - ) -> DocumentTask: - """ - Create DocumentTask entity for testing. - - This method creates a DocumentTask entity that can be used to test - task serialization and deserialization logic. - - Args: - tenant_id: Tenant identifier for the task - dataset_id: Dataset identifier for the task - document_ids: List of document IDs to index (defaults to 3 documents) - - Returns: - DocumentTask entity configured for testing - """ - if document_ids is None: - document_ids = ["doc-1", "doc-2", "doc-3"] - - return DocumentTask(tenant_id=tenant_id, dataset_id=dataset_id, document_ids=document_ids) - - -# ============================================================================ -# Test Classes -# ============================================================================ - - -class TestDocumentIndexingTaskProxy: - """ - Comprehensive unit tests for DocumentIndexingTaskProxy class. - - This test class covers all methods and scenarios of the DocumentIndexingTaskProxy, - including initialization, task routing, queue management, dispatch logic, and - error handling. - """ - - # ======================================================================== - # Initialization Tests - # ======================================================================== - - def test_initialization(self): - """ - Test DocumentIndexingTaskProxy initialization. - - This test verifies that the proxy is correctly initialized with - the provided tenant_id, dataset_id, and document_ids, and that - the TenantIsolatedTaskQueue is properly configured. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = ["doc-1", "doc-2", "doc-3"] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert isinstance(proxy._tenant_isolated_task_queue, TenantIsolatedTaskQueue) - - assert proxy._tenant_isolated_task_queue._tenant_id == tenant_id - - assert proxy._tenant_isolated_task_queue._unique_key == "document_indexing" - - def test_initialization_with_empty_document_ids(self): - """ - Test initialization with empty document_ids list. - - This test verifies that the proxy can be initialized with an empty - document_ids list, which may occur in edge cases or error scenarios. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = [] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert len(proxy._document_ids) == 0 - - def test_initialization_with_single_document_id(self): - """ - Test initialization with single document_id. - - This test verifies that the proxy can be initialized with a single - document ID, which is a common use case for single document indexing. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = ["doc-1"] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert len(proxy._document_ids) == 1 - - def test_initialization_with_large_batch(self): - """ - Test initialization with large batch of document IDs. - - This test verifies that the proxy can handle large batches of - document IDs, which may occur in bulk indexing scenarios. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = [f"doc-{i}" for i in range(100)] - - # Act - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - - # Assert - assert proxy._tenant_id == tenant_id - - assert proxy._dataset_id == dataset_id - - assert proxy._document_ids == document_ids - - assert len(proxy._document_ids) == 100 - - # ======================================================================== - # Features Property Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_features_property(self, mock_feature_service): - """ - Test cached_property features. - - This test verifies that the features property is correctly cached - and that FeatureService.get_features is called only once, even when - the property is accessed multiple times. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features() - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - # Act - features1 = proxy.features - - features2 = proxy.features # Second call should use cached property - - # Assert - assert features1 == mock_features - - assert features2 == mock_features - - assert features1 is features2 # Should be the same instance due to caching - - mock_feature_service.get_features.assert_called_once_with("tenant-123") - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_features_property_with_different_tenants(self, mock_feature_service): - """ - Test features property with different tenant IDs. - - This test verifies that the features property correctly calls - FeatureService.get_features with the correct tenant_id for each - proxy instance. - """ - # Arrange - mock_features1 = DocumentIndexingTaskProxyTestDataFactory.create_mock_features() - - mock_features2 = DocumentIndexingTaskProxyTestDataFactory.create_mock_features() - - mock_feature_service.get_features.side_effect = [mock_features1, mock_features2] - - proxy1 = DocumentIndexingTaskProxy("tenant-1", "dataset-1", ["doc-1"]) - - proxy2 = DocumentIndexingTaskProxy("tenant-2", "dataset-2", ["doc-2"]) - - # Act - features1 = proxy1.features - - features2 = proxy2.features - - # Assert - assert features1 == mock_features1 - - assert features2 == mock_features2 - - mock_feature_service.get_features.assert_any_call("tenant-1") - - mock_feature_service.get_features.assert_any_call("tenant-2") - - # ======================================================================== - # Direct Queue Routing Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue(self, mock_task): - """ - Test _send_to_direct_queue method. - - This test verifies that _send_to_direct_queue correctly calls - task_func.delay() with the correct parameters, bypassing tenant - isolation queue management. - """ - # Arrange - tenant_id = "tenant-direct-queue" - dataset_id = "dataset-direct-queue" - document_ids = ["doc-direct-1", "doc-direct-2"] - proxy = DocumentIndexingTaskProxy(tenant_id, dataset_id, document_ids) - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with(tenant_id=tenant_id, dataset_id=dataset_id, document_ids=document_ids) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_direct_queue_with_priority_task(self, mock_task): - """ - Test _send_to_direct_queue with priority task function. - - This test verifies that _send_to_direct_queue works correctly - with priority_document_indexing_task as the task function. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue_with_single_document(self, mock_task): - """ - Test _send_to_direct_queue with single document ID. - - This test verifies that _send_to_direct_queue correctly handles - a single document ID in the document_ids list. - """ - # Arrange - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", ["doc-1"]) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue_with_empty_documents(self, mock_task): - """ - Test _send_to_direct_queue with empty document_ids list. - - This test verifies that _send_to_direct_queue correctly handles - an empty document_ids list, which may occur in edge cases. - """ - # Arrange - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", []) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with(tenant_id="tenant-123", dataset_id="dataset-456", document_ids=[]) - - # ======================================================================== - # Tenant Queue Routing Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_with_existing_task_key(self, mock_task): - """ - Test _send_to_tenant_queue when task key exists. - - This test verifies that when a task key exists (indicating another - task is running), the new task is pushed to the waiting queue instead - of being executed immediately. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=True - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - proxy._tenant_isolated_task_queue.push_tasks.assert_called_once() - - pushed_tasks = proxy._tenant_isolated_task_queue.push_tasks.call_args[0][0] - - assert len(pushed_tasks) == 1 - - expected_task_data = { - "tenant_id": "tenant-123", - "dataset_id": "dataset-456", - "document_ids": ["doc-1", "doc-2", "doc-3"], - } - assert pushed_tasks[0] == expected_task_data - - assert pushed_tasks[0]["document_ids"] == ["doc-1", "doc-2", "doc-3"] - - mock_task.delay.assert_not_called() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_without_task_key(self, mock_task): - """ - Test _send_to_tenant_queue when no task key exists. - - This test verifies that when no task key exists (indicating no task - is currently running), the task is executed immediately and the - task waiting time flag is set. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - proxy._tenant_isolated_task_queue.push_tasks.assert_not_called() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_tenant_queue_with_priority_task(self, mock_task): - """ - Test _send_to_tenant_queue with priority task function. - - This test verifies that _send_to_tenant_queue works correctly - with priority_document_indexing_task as the task function. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_document_task_serialization(self, mock_task): - """ - Test DocumentTask serialization in _send_to_tenant_queue. - - This test verifies that DocumentTask entities are correctly - serialized to dictionaries when pushing to the waiting queue. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=True - ) - - mock_task.delay = Mock() - - # Act - proxy._send_to_tenant_queue(mock_task) - - # Assert - pushed_tasks = proxy._tenant_isolated_task_queue.push_tasks.call_args[0][0] - - task_dict = pushed_tasks[0] - - # Verify the task can be deserialized back to DocumentTask - document_task = DocumentTask(**task_dict) - - assert document_task.tenant_id == "tenant-123" - - assert document_task.dataset_id == "dataset-456" - - assert document_task.document_ids == ["doc-1", "doc-2", "doc-3"] - - # ======================================================================== - # Queue Type Selection Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_default_tenant_queue(self, mock_task): - """ - Test _send_to_default_tenant_queue method. - - This test verifies that _send_to_default_tenant_queue correctly - calls _send_to_tenant_queue with normal_document_indexing_task. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_tenant_queue = Mock() - - # Act - proxy._send_to_default_tenant_queue() - - # Assert - proxy._send_to_tenant_queue.assert_called_once_with(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_priority_tenant_queue(self, mock_task): - """ - Test _send_to_priority_tenant_queue method. - - This test verifies that _send_to_priority_tenant_queue correctly - calls _send_to_tenant_queue with priority_document_indexing_task. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_tenant_queue = Mock() - - # Act - proxy._send_to_priority_tenant_queue() - - # Assert - proxy._send_to_tenant_queue.assert_called_once_with(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_send_to_priority_direct_queue(self, mock_task): - """ - Test _send_to_priority_direct_queue method. - - This test verifies that _send_to_priority_direct_queue correctly - calls _send_to_direct_queue with priority_document_indexing_task. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_direct_queue = Mock() - - # Act - proxy._send_to_priority_direct_queue() - - # Assert - proxy._send_to_direct_queue.assert_called_once_with(mock_task) - - # ======================================================================== - # Dispatch Logic Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_enabled_sandbox_plan(self, mock_feature_service): - """ - Test _dispatch method when billing is enabled with SANDBOX plan. - - This test verifies that when billing is enabled and the subscription - plan is SANDBOX, the dispatch method routes to the default tenant queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_default_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_default_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_enabled_team_plan(self, mock_feature_service): - """ - Test _dispatch method when billing is enabled with TEAM plan. - - This test verifies that when billing is enabled and the subscription - plan is TEAM, the dispatch method routes to the priority tenant queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.TEAM - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_enabled_professional_plan(self, mock_feature_service): - """ - Test _dispatch method when billing is enabled with PROFESSIONAL plan. - - This test verifies that when billing is enabled and the subscription - plan is PROFESSIONAL, the dispatch method routes to the priority tenant queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.PROFESSIONAL - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_with_billing_disabled(self, mock_feature_service): - """ - Test _dispatch method when billing is disabled. - - This test verifies that when billing is disabled (e.g., self-hosted - or enterprise), the dispatch method routes to the priority direct queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=False) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_direct_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_direct_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_edge_case_empty_plan(self, mock_feature_service): - """ - Test _dispatch method with empty plan string. - - This test verifies that when billing is enabled but the plan is an - empty string, the dispatch method routes to the priority tenant queue - (treats it as a non-SANDBOX plan). - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=True, plan="") - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_edge_case_none_plan(self, mock_feature_service): - """ - Test _dispatch method with None plan. - - This test verifies that when billing is enabled but the plan is None, - the dispatch method routes to the priority tenant queue (treats it as - a non-SANDBOX plan). - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=True, plan=None) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy._dispatch() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - # ======================================================================== - # Delay Method Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_delay_method(self, mock_feature_service): - """ - Test delay method integration. - - This test verifies that the delay method correctly calls _dispatch, - which is the public interface for scheduling document indexing tasks. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_default_tenant_queue = Mock() - - # Act - proxy.delay() - - # Assert - proxy._send_to_default_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_delay_method_with_team_plan(self, mock_feature_service): - """ - Test delay method with TEAM plan. - - This test verifies that the delay method correctly routes to the - priority tenant queue when the subscription plan is TEAM. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.TEAM - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_tenant_queue = Mock() - - # Act - proxy.delay() - - # Assert - proxy._send_to_priority_tenant_queue.assert_called_once() - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_delay_method_with_billing_disabled(self, mock_feature_service): - """ - Test delay method with billing disabled. - - This test verifies that the delay method correctly routes to the - priority direct queue when billing is disabled. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=False) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._send_to_priority_direct_queue = Mock() - - # Act - proxy.delay() - - # Assert - proxy._send_to_priority_direct_queue.assert_called_once() - - # ======================================================================== - # DocumentTask Entity Tests - # ======================================================================== - - def test_document_task_dataclass(self): - """ - Test DocumentTask dataclass. - - This test verifies that DocumentTask entities can be created and - accessed correctly, which is important for task serialization. - """ - # Arrange - tenant_id = "tenant-123" - - dataset_id = "dataset-456" - - document_ids = ["doc-1", "doc-2"] - - # Act - task = DocumentTask(tenant_id=tenant_id, dataset_id=dataset_id, document_ids=document_ids) - - # Assert - assert task.tenant_id == tenant_id - - assert task.dataset_id == dataset_id - - assert task.document_ids == document_ids - - def test_document_task_serialization(self): - """ - Test DocumentTask serialization to dictionary. - - This test verifies that DocumentTask entities can be correctly - serialized to dictionaries using asdict() for queue storage. - """ - # Arrange - from dataclasses import asdict - - task = DocumentIndexingTaskProxyTestDataFactory.create_document_task() - - # Act - task_dict = asdict(task) - - # Assert - assert task_dict["tenant_id"] == "tenant-123" - - assert task_dict["dataset_id"] == "dataset-456" - - assert task_dict["document_ids"] == ["doc-1", "doc-2", "doc-3"] - - def test_document_task_deserialization(self): - """ - Test DocumentTask deserialization from dictionary. - - This test verifies that DocumentTask entities can be correctly - deserialized from dictionaries when pulled from the queue. - """ - # Arrange - task_dict = { - "tenant_id": "tenant-123", - "dataset_id": "dataset-456", - "document_ids": ["doc-1", "doc-2", "doc-3"], - } - - # Act - task = DocumentTask(**task_dict) - - # Assert - assert task.tenant_id == "tenant-123" - - assert task.dataset_id == "dataset-456" - - assert task.document_ids == ["doc-1", "doc-2", "doc-3"] - - # ======================================================================== - # Batch Operations Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_batch_operation_with_multiple_documents(self, mock_task): - """ - Test batch operation with multiple documents. - - This test verifies that the proxy correctly handles batch operations - with multiple document IDs in a single task. - """ - # Arrange - document_ids = [f"doc-{i}" for i in range(10)] - - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", document_ids) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=document_ids - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_batch_operation_with_large_batch(self, mock_task): - """ - Test batch operation with large batch of documents. - - This test verifies that the proxy correctly handles large batches - of document IDs, which may occur in bulk indexing scenarios. - """ - # Arrange - document_ids = [f"doc-{i}" for i in range(100)] - - proxy = DocumentIndexingTaskProxy("tenant-123", "dataset-456", document_ids) - - mock_task.delay = Mock() - - # Act - proxy._send_to_direct_queue(mock_task) - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=document_ids - ) - - assert len(mock_task.delay.call_args[1]["document_ids"]) == 100 - - # ======================================================================== - # Error Handling Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_direct_queue_task_delay_failure(self, mock_task): - """ - Test _send_to_direct_queue when task.delay() raises an exception. - - This test verifies that exceptions raised by task.delay() are - propagated correctly and not swallowed. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_task.delay.side_effect = Exception("Task delay failed") - - # Act & Assert - with pytest.raises(Exception, match="Task delay failed"): - proxy._send_to_direct_queue(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_push_tasks_failure(self, mock_task): - """ - Test _send_to_tenant_queue when push_tasks raises an exception. - - This test verifies that exceptions raised by push_tasks are - propagated correctly when a task key exists. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue(has_task_key=True) - - mock_queue.push_tasks.side_effect = Exception("Push tasks failed") - - proxy._tenant_isolated_task_queue = mock_queue - - # Act & Assert - with pytest.raises(Exception, match="Push tasks failed"): - proxy._send_to_tenant_queue(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_send_to_tenant_queue_set_waiting_time_failure(self, mock_task): - """ - Test _send_to_tenant_queue when set_task_waiting_time raises an exception. - - This test verifies that exceptions raised by set_task_waiting_time are - propagated correctly when no task key exists. - """ - # Arrange - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue(has_task_key=False) - - mock_queue.set_task_waiting_time.side_effect = Exception("Set waiting time failed") - - proxy._tenant_isolated_task_queue = mock_queue - - # Act & Assert - with pytest.raises(Exception, match="Set waiting time failed"): - proxy._send_to_tenant_queue(mock_task) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - def test_dispatch_feature_service_failure(self, mock_feature_service): - """ - Test _dispatch when FeatureService.get_features raises an exception. - - This test verifies that exceptions raised by FeatureService.get_features - are propagated correctly during dispatch. - """ - # Arrange - mock_feature_service.get_features.side_effect = Exception("Feature service failed") - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - # Act & Assert - with pytest.raises(Exception, match="Feature service failed"): - proxy._dispatch() - - # ======================================================================== - # Integration Tests - # ======================================================================== - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_proxy.document_indexing_task_proxy.normal_document_indexing_task") - def test_full_flow_sandbox_plan(self, mock_task, mock_feature_service): - """ - Test full flow for SANDBOX plan with tenant queue. - - This test verifies the complete flow from delay() call to task - scheduling for a SANDBOX plan tenant, including tenant isolation. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_full_flow_team_plan(self, mock_task, mock_feature_service): - """ - Test full flow for TEAM plan with priority tenant queue. - - This test verifies the complete flow from delay() call to task - scheduling for a TEAM plan tenant, including priority routing. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.TEAM - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=False - ) - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - proxy._tenant_isolated_task_queue.set_task_waiting_time.assert_called_once() - - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_proxy.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_proxy.document_indexing_task_proxy.priority_document_indexing_task") - def test_full_flow_billing_disabled(self, mock_task, mock_feature_service): - """ - Test full flow for billing disabled (self-hosted/enterprise). - - This test verifies the complete flow from delay() call to task - scheduling when billing is disabled, using priority direct queue. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features(billing_enabled=False) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - mock_task.delay.assert_called_once_with( - tenant_id="tenant-123", dataset_id="dataset-456", document_ids=["doc-1", "doc-2", "doc-3"] - ) - - @patch("services.document_indexing_task_proxy.FeatureService") - @patch("services.document_indexing_task_proxy.normal_document_indexing_task") - def test_full_flow_with_existing_task_key(self, mock_task, mock_feature_service): - """ - Test full flow when task key exists (task queuing). - - This test verifies the complete flow when another task is already - running, ensuring the new task is queued correctly. - """ - # Arrange - mock_features = DocumentIndexingTaskProxyTestDataFactory.create_mock_features( - billing_enabled=True, plan=CloudPlan.SANDBOX - ) - - mock_feature_service.get_features.return_value = mock_features - - proxy = DocumentIndexingTaskProxyTestDataFactory.create_document_task_proxy() - - proxy._tenant_isolated_task_queue = DocumentIndexingTaskProxyTestDataFactory.create_mock_tenant_queue( - has_task_key=True - ) - - mock_task.delay = Mock() - - # Act - proxy.delay() - - # Assert - proxy._tenant_isolated_task_queue.push_tasks.assert_called_once() - - pushed_tasks = proxy._tenant_isolated_task_queue.push_tasks.call_args[0][0] - - expected_task_data = { - "tenant_id": "tenant-123", - "dataset_id": "dataset-456", - "document_ids": ["doc-1", "doc-2", "doc-3"], - } - assert pushed_tasks[0] == expected_task_data - - assert pushed_tasks[0]["document_ids"] == ["doc-1", "doc-2", "doc-3"] - - mock_task.delay.assert_not_called() diff --git a/api/tests/unit_tests/services/external_dataset_service.py b/api/tests/unit_tests/services/external_dataset_service.py deleted file mode 100644 index 83bae370eb..0000000000 --- a/api/tests/unit_tests/services/external_dataset_service.py +++ /dev/null @@ -1,925 +0,0 @@ -""" -Extensive unit tests for ``ExternalDatasetService``. - -This module focuses on the *external dataset service* surface area, which is responsible -for integrating with **external knowledge APIs** and wiring them into Dify datasets. - -The goal of this test suite is twofold: - -- Provide **high‑confidence regression coverage** for all public helpers on - ``ExternalDatasetService``. -- Serve as **executable documentation** for how external API integration is expected - to behave in different scenarios (happy paths, validation failures, and error codes). - -The file intentionally contains **rich comments and generous spacing** in order to make -each scenario easy to scan during reviews. -""" - -from __future__ import annotations - -from types import SimpleNamespace -from typing import Any, cast -from unittest.mock import MagicMock, Mock, patch - -import httpx -import pytest - -from constants import HIDDEN_VALUE -from models.dataset import Dataset, ExternalKnowledgeApis, ExternalKnowledgeBindings -from services.entities.external_knowledge_entities.external_knowledge_entities import ( - Authorization, - AuthorizationConfig, - ExternalKnowledgeApiSetting, -) -from services.errors.dataset import DatasetNameDuplicateError -from services.external_knowledge_service import ExternalDatasetService - - -class ExternalDatasetTestDataFactory: - """ - Factory helpers for building *lightweight* mocks for external knowledge tests. - - These helpers are intentionally small and explicit: - - - They avoid pulling in unnecessary fixtures. - - They reflect the minimal contract that the service under test cares about. - """ - - @staticmethod - def create_external_api( - api_id: str = "api-123", - tenant_id: str = "tenant-1", - name: str = "Test API", - description: str = "Description", - settings: dict[str, Any] | None = None, - ) -> ExternalKnowledgeApis: - """ - Create a concrete ``ExternalKnowledgeApis`` instance with minimal fields. - - Using the real SQLAlchemy model (instead of a pure Mock) makes it easier to - exercise ``settings_dict`` and other convenience properties if needed. - """ - - instance = ExternalKnowledgeApis( - tenant_id=tenant_id, - name=name, - description=description, - settings=None if settings is None else cast(str, pytest.approx), # type: ignore[assignment] - ) - - # Overwrite generated id for determinism in assertions. - instance.id = api_id - return instance - - @staticmethod - def create_dataset( - dataset_id: str = "ds-1", - tenant_id: str = "tenant-1", - name: str = "External Dataset", - provider: str = "external", - ) -> Dataset: - """ - Build a small ``Dataset`` instance representing an external dataset. - """ - - dataset = Dataset( - tenant_id=tenant_id, - name=name, - description="", - provider=provider, - created_by="user-1", - ) - dataset.id = dataset_id - return dataset - - @staticmethod - def create_external_binding( - tenant_id: str = "tenant-1", - dataset_id: str = "ds-1", - api_id: str = "api-1", - external_knowledge_id: str = "knowledge-1", - ) -> ExternalKnowledgeBindings: - """ - Small helper for a binding between dataset and external knowledge API. - """ - - binding = ExternalKnowledgeBindings( - tenant_id=tenant_id, - dataset_id=dataset_id, - external_knowledge_api_id=api_id, - external_knowledge_id=external_knowledge_id, - created_by="user-1", - ) - return binding - - -# --------------------------------------------------------------------------- -# get_external_knowledge_apis -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceGetExternalKnowledgeApis: - """ - Tests for ``ExternalDatasetService.get_external_knowledge_apis``. - - These tests focus on: - - - Basic pagination wiring via ``db.paginate``. - - Optional search keyword behaviour. - """ - - @pytest.fixture - def mock_db_paginate(self): - """ - Patch ``db.paginate`` so we do not touch the real database layer. - """ - - with ( - patch("services.external_knowledge_service.db.paginate", autospec=True) as mock_paginate, - patch("services.external_knowledge_service.select", autospec=True), - ): - yield mock_paginate - - def test_get_external_knowledge_apis_basic_pagination(self, mock_db_paginate: MagicMock): - """ - It should return ``items`` and ``total`` coming from the paginate object. - """ - - # Arrange - tenant_id = "tenant-1" - page = 1 - per_page = 20 - - mock_items = [Mock(spec=ExternalKnowledgeApis), Mock(spec=ExternalKnowledgeApis)] - mock_pagination = SimpleNamespace(items=mock_items, total=42) - mock_db_paginate.return_value = mock_pagination - - # Act - items, total = ExternalDatasetService.get_external_knowledge_apis(page, per_page, tenant_id) - - # Assert - assert items is mock_items - assert total == 42 - - mock_db_paginate.assert_called_once() - call_kwargs = mock_db_paginate.call_args.kwargs - assert call_kwargs["page"] == page - assert call_kwargs["per_page"] == per_page - assert call_kwargs["max_per_page"] == 100 - assert call_kwargs["error_out"] is False - - def test_get_external_knowledge_apis_with_search_keyword(self, mock_db_paginate: MagicMock): - """ - When a search keyword is provided, the query should be adjusted - (we simply assert that paginate is still called and does not explode). - """ - - # Arrange - tenant_id = "tenant-1" - page = 2 - per_page = 10 - search = "foo" - - mock_pagination = SimpleNamespace(items=[], total=0) - mock_db_paginate.return_value = mock_pagination - - # Act - items, total = ExternalDatasetService.get_external_knowledge_apis(page, per_page, tenant_id, search=search) - - # Assert - assert items == [] - assert total == 0 - mock_db_paginate.assert_called_once() - - -# --------------------------------------------------------------------------- -# validate_api_list -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceValidateApiList: - """ - Lightweight validation tests for ``validate_api_list``. - """ - - def test_validate_api_list_success(self): - """ - A minimal valid configuration (endpoint + api_key) should pass. - """ - - config = {"endpoint": "https://example.com", "api_key": "secret"} - - # Act & Assert – no exception expected - ExternalDatasetService.validate_api_list(config) - - @pytest.mark.parametrize( - ("config", "expected_message"), - [ - ({}, "api list is empty"), - ({"api_key": "k"}, "endpoint is required"), - ({"endpoint": "https://example.com"}, "api_key is required"), - ], - ) - def test_validate_api_list_failures(self, config: dict[str, Any], expected_message: str): - """ - Invalid configs should raise ``ValueError`` with a clear message. - """ - - with pytest.raises(ValueError, match=expected_message): - ExternalDatasetService.validate_api_list(config) - - -# --------------------------------------------------------------------------- -# create_external_knowledge_api & get/update/delete -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceCrudExternalKnowledgeApi: - """ - CRUD tests for external knowledge API templates. - """ - - @pytest.fixture - def mock_db_session(self): - """ - Patch ``db.session`` for all CRUD tests in this class. - """ - - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_create_external_knowledge_api_success(self, mock_db_session: MagicMock): - """ - ``create_external_knowledge_api`` should persist a new record - when settings are present and valid. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - args = { - "name": "API", - "description": "desc", - "settings": {"endpoint": "https://api.example.com", "api_key": "secret"}, - } - - # We do not want to actually call the remote endpoint here, so we patch the validator. - with patch.object(ExternalDatasetService, "check_endpoint_and_api_key", autospec=True) as mock_check: - result = ExternalDatasetService.create_external_knowledge_api(tenant_id, user_id, args) - - assert isinstance(result, ExternalKnowledgeApis) - mock_check.assert_called_once_with(args["settings"]) - mock_db_session.add.assert_called_once() - mock_db_session.commit.assert_called_once() - - def test_create_external_knowledge_api_missing_settings_raises(self, mock_db_session: MagicMock): - """ - Missing ``settings`` should result in a ``ValueError``. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - args = {"name": "API", "description": "desc"} - - with pytest.raises(ValueError, match="settings is required"): - ExternalDatasetService.create_external_knowledge_api(tenant_id, user_id, args) - - mock_db_session.add.assert_not_called() - mock_db_session.commit.assert_not_called() - - def test_get_external_knowledge_api_found(self, mock_db_session: MagicMock): - """ - ``get_external_knowledge_api`` should return the first matching record. - """ - - api = Mock(spec=ExternalKnowledgeApis) - mock_db_session.scalar.return_value = api - - result = ExternalDatasetService.get_external_knowledge_api("api-id", "tenant-id") - assert result is api - - def test_get_external_knowledge_api_not_found_raises(self, mock_db_session: MagicMock): - """ - When the record is absent, a ``ValueError`` is raised. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.get_external_knowledge_api("missing-id", "tenant-id") - - def test_update_external_knowledge_api_success_with_hidden_api_key(self, mock_db_session: MagicMock): - """ - Updating an API should keep the existing API key when the special hidden - value placeholder is sent from the UI. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - api_id = "api-1" - - existing_api = Mock(spec=ExternalKnowledgeApis) - existing_api.settings_dict = {"api_key": "stored-key"} - existing_api.settings = '{"api_key":"stored-key"}' - mock_db_session.scalar.return_value = existing_api - - args = { - "name": "New Name", - "description": "New Desc", - "settings": {"endpoint": "https://api.example.com", "api_key": HIDDEN_VALUE}, - } - - result = ExternalDatasetService.update_external_knowledge_api(tenant_id, user_id, api_id, args) - - assert result is existing_api - # The placeholder should be replaced with stored key. - assert args["settings"]["api_key"] == "stored-key" - mock_db_session.commit.assert_called_once() - - def test_update_external_knowledge_api_not_found_raises(self, mock_db_session: MagicMock): - """ - Updating a non‑existent API template should raise ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.update_external_knowledge_api( - tenant_id="tenant-1", - user_id="user-1", - external_knowledge_api_id="missing-id", - args={"name": "n", "description": "d", "settings": {}}, - ) - - def test_delete_external_knowledge_api_success(self, mock_db_session: MagicMock): - """ - ``delete_external_knowledge_api`` should delete and commit when found. - """ - - api = Mock(spec=ExternalKnowledgeApis) - mock_db_session.scalar.return_value = api - - ExternalDatasetService.delete_external_knowledge_api("tenant-1", "api-1") - - mock_db_session.delete.assert_called_once_with(api) - mock_db_session.commit.assert_called_once() - - def test_delete_external_knowledge_api_not_found_raises(self, mock_db_session: MagicMock): - """ - Deletion of a missing template should raise ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.delete_external_knowledge_api("tenant-1", "missing") - - -# --------------------------------------------------------------------------- -# external_knowledge_api_use_check & binding lookups -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceUsageAndBindings: - """ - Tests for usage checks and dataset binding retrieval. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_external_knowledge_api_use_check_in_use(self, mock_db_session: MagicMock): - """ - When there are bindings, ``external_knowledge_api_use_check`` returns True and count. - """ - - mock_db_session.scalar.return_value = 3 - - in_use, count = ExternalDatasetService.external_knowledge_api_use_check("api-1", "tenant-1") - - assert in_use is True - assert count == 3 - assert "tenant_id" in str(mock_db_session.scalar.call_args.args[0]) - - def test_external_knowledge_api_use_check_not_in_use(self, mock_db_session: MagicMock): - """ - Zero bindings should return ``(False, 0)``. - """ - - mock_db_session.scalar.return_value = 0 - - in_use, count = ExternalDatasetService.external_knowledge_api_use_check("api-1", "tenant-1") - - assert in_use is False - assert count == 0 - - def test_get_external_knowledge_binding_with_dataset_id_found(self, mock_db_session: MagicMock): - """ - Binding lookup should return the first record when present. - """ - - binding = Mock(spec=ExternalKnowledgeBindings) - mock_db_session.scalar.return_value = binding - - result = ExternalDatasetService.get_external_knowledge_binding_with_dataset_id("tenant-1", "ds-1") - assert result is binding - - def test_get_external_knowledge_binding_with_dataset_id_not_found_raises(self, mock_db_session: MagicMock): - """ - Missing binding should result in a ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="external knowledge binding not found"): - ExternalDatasetService.get_external_knowledge_binding_with_dataset_id("tenant-1", "ds-1") - - -# --------------------------------------------------------------------------- -# document_create_args_validate -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceDocumentCreateArgsValidate: - """ - Tests for ``document_create_args_validate``. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_document_create_args_validate_success(self, mock_db_session: MagicMock): - """ - All required custom parameters present – validation should pass. - """ - - external_api = Mock(spec=ExternalKnowledgeApis) - external_api.settings = json_settings = ( - '[{"document_process_setting":[{"name":"foo","required":true},{"name":"bar","required":false}]}]' - ) - # Raw string; the service itself calls json.loads on it - mock_db_session.scalar.return_value = external_api - - process_parameter = {"foo": "value", "bar": "optional"} - - # Act & Assert – no exception - ExternalDatasetService.document_create_args_validate("tenant-1", "api-1", process_parameter) - - assert json_settings in external_api.settings # simple sanity check on our test data - - def test_document_create_args_validate_missing_template_raises(self, mock_db_session: MagicMock): - """ - When the referenced API template is missing, a ``ValueError`` is raised. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.document_create_args_validate("tenant-1", "missing", {}) - - def test_document_create_args_validate_missing_required_parameter_raises(self, mock_db_session: MagicMock): - """ - Required document process parameters must be supplied. - """ - - external_api = Mock(spec=ExternalKnowledgeApis) - external_api.settings = ( - '[{"document_process_setting":[{"name":"foo","required":true},{"name":"bar","required":false}]}]' - ) - mock_db_session.scalar.return_value = external_api - - process_parameter = {"bar": "present"} # missing "foo" - - with pytest.raises(ValueError, match="foo is required"): - ExternalDatasetService.document_create_args_validate("tenant-1", "api-1", process_parameter) - - -# --------------------------------------------------------------------------- -# process_external_api -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceProcessExternalApi: - """ - Tests focused on the HTTP request assembly and method mapping behaviour. - """ - - def test_process_external_api_valid_method_post(self): - """ - For a supported HTTP verb we should delegate to the correct ``ssrf_proxy`` function. - """ - - settings = ExternalKnowledgeApiSetting( - url="https://example.com/path", - request_method="POST", - headers={"X-Test": "1"}, - params={"foo": "bar"}, - ) - - fake_response = httpx.Response(200) - - with patch("services.external_knowledge_service.ssrf_proxy.post", autospec=True) as mock_post: - mock_post.return_value = fake_response - - result = ExternalDatasetService.process_external_api(settings, files=None) - - assert result is fake_response - mock_post.assert_called_once() - kwargs = mock_post.call_args.kwargs - assert kwargs["url"] == settings.url - assert kwargs["headers"] == settings.headers - assert kwargs["follow_redirects"] is True - assert "data" in kwargs - - def test_process_external_api_invalid_method_raises(self): - """ - An unsupported HTTP verb should raise ``InvalidHttpMethodError``. - """ - - settings = ExternalKnowledgeApiSetting( - url="https://example.com", - request_method="INVALID", - headers=None, - params={}, - ) - - from graphon.nodes.http_request.exc import InvalidHttpMethodError - - with pytest.raises(InvalidHttpMethodError): - ExternalDatasetService.process_external_api(settings, files=None) - - -# --------------------------------------------------------------------------- -# assembling_headers -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceAssemblingHeaders: - """ - Tests for header assembly based on different authentication flavours. - """ - - def test_assembling_headers_bearer_token(self): - """ - For bearer auth we expect ``Authorization: Bearer `` by default. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="bearer", api_key="secret", header=None), - ) - - headers = ExternalDatasetService.assembling_headers(auth) - - assert headers["Authorization"] == "Bearer secret" - - def test_assembling_headers_basic_token_with_custom_header(self): - """ - For basic auth we honour the configured header name. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="basic", api_key="abc123", header="X-Auth"), - ) - - headers = ExternalDatasetService.assembling_headers(auth, headers={"Existing": "1"}) - - assert headers["Existing"] == "1" - assert headers["X-Auth"] == "Basic abc123" - - def test_assembling_headers_custom_type(self): - """ - Custom auth type should inject the raw API key. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="custom", api_key="raw-key", header="X-API-KEY"), - ) - - headers = ExternalDatasetService.assembling_headers(auth, headers=None) - - assert headers["X-API-KEY"] == "raw-key" - - def test_assembling_headers_missing_config_raises(self): - """ - Missing config object should be rejected. - """ - - auth = Authorization(type="api-key", config=None) - - with pytest.raises(ValueError, match="authorization config is required"): - ExternalDatasetService.assembling_headers(auth) - - def test_assembling_headers_missing_api_key_raises(self): - """ - ``api_key`` is required when type is ``api-key``. - """ - - auth = Authorization( - type="api-key", - config=AuthorizationConfig(type="bearer", api_key=None, header="Authorization"), - ) - - with pytest.raises(ValueError, match="api_key is required"): - ExternalDatasetService.assembling_headers(auth) - - def test_assembling_headers_no_auth_type_leaves_headers_unchanged(self): - """ - For ``no-auth`` we should not modify the headers mapping. - """ - - auth = Authorization(type="no-auth", config=None) - - base_headers = {"X": "1"} - result = ExternalDatasetService.assembling_headers(auth, headers=base_headers) - - # A copy is returned, original is not mutated. - assert result == base_headers - assert result is not base_headers - - -# --------------------------------------------------------------------------- -# get_external_knowledge_api_settings -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceGetExternalKnowledgeApiSettings: - """ - Simple shape test for ``get_external_knowledge_api_settings``. - """ - - def test_get_external_knowledge_api_settings(self): - settings_dict: dict[str, Any] = { - "url": "https://example.com/retrieval", - "request_method": "post", - "headers": {"Content-Type": "application/json"}, - "params": {"foo": "bar"}, - } - - result = ExternalDatasetService.get_external_knowledge_api_settings(settings_dict) - - assert isinstance(result, ExternalKnowledgeApiSetting) - assert result.url == settings_dict["url"] - assert result.request_method == settings_dict["request_method"] - assert result.headers == settings_dict["headers"] - assert result.params == settings_dict["params"] - - -# --------------------------------------------------------------------------- -# create_external_dataset -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceCreateExternalDataset: - """ - Tests around creating the external dataset and its binding row. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_create_external_dataset_success(self, mock_db_session: MagicMock): - """ - A brand new dataset name with valid external knowledge references - should create both the dataset and its binding. - """ - - tenant_id = "tenant-1" - user_id = "user-1" - - args = { - "name": "My Dataset", - "description": "desc", - "external_knowledge_api_id": "api-1", - "external_knowledge_id": "knowledge-1", - "external_retrieval_model": {"top_k": 3}, - } - - # No existing dataset with same name. - mock_db_session.scalar.side_effect = [ - None, # duplicate‑name check - Mock(spec=ExternalKnowledgeApis), # external knowledge api - ] - - dataset = ExternalDatasetService.create_external_dataset(tenant_id, user_id, args) - - assert isinstance(dataset, Dataset) - assert dataset.provider == "external" - assert dataset.retrieval_model == args["external_retrieval_model"] - - assert mock_db_session.add.call_count >= 2 # dataset + binding - mock_db_session.flush.assert_called_once() - mock_db_session.commit.assert_called_once() - - def test_create_external_dataset_duplicate_name_raises(self, mock_db_session: MagicMock): - """ - When a dataset with the same name already exists, - ``DatasetNameDuplicateError`` is raised. - """ - - existing_dataset = Mock(spec=Dataset) - mock_db_session.scalar.return_value = existing_dataset - - args = { - "name": "Existing", - "external_knowledge_api_id": "api-1", - "external_knowledge_id": "knowledge-1", - } - - with pytest.raises(DatasetNameDuplicateError): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args) - - mock_db_session.add.assert_not_called() - mock_db_session.commit.assert_not_called() - - def test_create_external_dataset_missing_api_template_raises(self, mock_db_session: MagicMock): - """ - If the referenced external knowledge API does not exist, a ``ValueError`` is raised. - """ - - # First call: duplicate name check – not found. - mock_db_session.scalar.side_effect = [ - None, - None, # external knowledge api lookup - ] - - args = { - "name": "Dataset", - "external_knowledge_api_id": "missing", - "external_knowledge_id": "knowledge-1", - } - - with pytest.raises(ValueError, match="api template not found"): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args) - - def test_create_external_dataset_missing_required_ids_raise(self, mock_db_session: MagicMock): - """ - ``external_knowledge_id`` and ``external_knowledge_api_id`` are mandatory. - """ - - # duplicate name check — two calls to create_external_dataset, each does 2 scalar calls - mock_db_session.scalar.side_effect = [ - None, - Mock(spec=ExternalKnowledgeApis), - None, - Mock(spec=ExternalKnowledgeApis), - ] - - args_missing_knowledge_id = { - "name": "Dataset", - "external_knowledge_api_id": "api-1", - "external_knowledge_id": None, - } - - with pytest.raises(ValueError, match="external_knowledge_id is required"): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args_missing_knowledge_id) - - args_missing_api_id = { - "name": "Dataset", - "external_knowledge_api_id": None, - "external_knowledge_id": "k-1", - } - - with pytest.raises(ValueError, match="external_knowledge_api_id is required"): - ExternalDatasetService.create_external_dataset("tenant-1", "user-1", args_missing_api_id) - - -# --------------------------------------------------------------------------- -# fetch_external_knowledge_retrieval -# --------------------------------------------------------------------------- - - -class TestExternalDatasetServiceFetchExternalKnowledgeRetrieval: - """ - Tests for ``fetch_external_knowledge_retrieval`` which orchestrates - external retrieval requests and normalises the response payload. - """ - - @pytest.fixture - def mock_db_session(self): - with patch("services.external_knowledge_service.db.session", autospec=True) as mock_session: - yield mock_session - - def test_fetch_external_knowledge_retrieval_success(self, mock_db_session: MagicMock): - """ - With a valid binding and API template, records from the external - service should be returned when the HTTP response is 200. - """ - - tenant_id = "tenant-1" - dataset_id = "ds-1" - query = "test query" - external_retrieval_parameters = {"top_k": 3, "score_threshold_enabled": True, "score_threshold": 0.5} - - binding = ExternalDatasetTestDataFactory.create_external_binding( - tenant_id=tenant_id, - dataset_id=dataset_id, - api_id="api-1", - external_knowledge_id="knowledge-1", - ) - - api = Mock(spec=ExternalKnowledgeApis) - api.settings = '{"endpoint":"https://example.com","api_key":"secret"}' - - # First query: binding; second query: api. - mock_db_session.scalar.side_effect = [ - binding, - api, - ] - - fake_records = [{"content": "doc", "score": 0.9}] - fake_response = Mock(spec=httpx.Response) - fake_response.status_code = 200 - fake_response.json.return_value = {"records": fake_records} - - metadata_condition = SimpleNamespace(model_dump=lambda: {"field": "value"}) - - with patch.object( - ExternalDatasetService, "process_external_api", return_value=fake_response, autospec=True - ) as mock_process: - result = ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id=tenant_id, - dataset_id=dataset_id, - query=query, - external_retrieval_parameters=external_retrieval_parameters, - metadata_condition=metadata_condition, - ) - - assert result == fake_records - - mock_process.assert_called_once() - setting_arg = mock_process.call_args.args[0] - assert isinstance(setting_arg, ExternalKnowledgeApiSetting) - assert setting_arg.url.endswith("/retrieval") - - def test_fetch_external_knowledge_retrieval_binding_not_found_raises(self, mock_db_session: MagicMock): - """ - Missing binding should raise ``ValueError``. - """ - - mock_db_session.scalar.return_value = None - - with pytest.raises(ValueError, match="external knowledge binding not found"): - ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id="tenant-1", - dataset_id="missing", - query="q", - external_retrieval_parameters={}, - metadata_condition=None, - ) - - def test_fetch_external_knowledge_retrieval_missing_api_template_raises(self, mock_db_session: MagicMock): - """ - When the API template is missing or has no settings, a ``ValueError`` is raised. - """ - - binding = ExternalDatasetTestDataFactory.create_external_binding() - mock_db_session.scalar.side_effect = [ - binding, - None, - ] - - with pytest.raises(ValueError, match="external api template not found"): - ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id="tenant-1", - dataset_id="ds-1", - query="q", - external_retrieval_parameters={}, - metadata_condition=None, - ) - - def test_fetch_external_knowledge_retrieval_non_200_status_returns_empty_list(self, mock_db_session: MagicMock): - """ - Non‑200 responses should be treated as an empty result set. - """ - - binding = ExternalDatasetTestDataFactory.create_external_binding() - api = Mock(spec=ExternalKnowledgeApis) - api.settings = '{"endpoint":"https://example.com","api_key":"secret"}' - - mock_db_session.scalar.side_effect = [ - binding, - api, - ] - - fake_response = Mock(spec=httpx.Response) - fake_response.status_code = 500 - fake_response.json.return_value = {} - - with patch.object(ExternalDatasetService, "process_external_api", return_value=fake_response, autospec=True): - result = ExternalDatasetService.fetch_external_knowledge_retrieval( - tenant_id="tenant-1", - dataset_id="ds-1", - query="q", - external_retrieval_parameters={}, - metadata_condition=None, - ) - - assert result == [] diff --git a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py index 327281d07f..efb79aadde 100644 --- a/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py +++ b/api/tests/unit_tests/services/rag_pipeline/test_rag_pipeline_service.py @@ -374,24 +374,14 @@ def test_publish_workflow_success(mocker, rag_pipeline_service) -> None: mock_db = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db", mock_db) mock_dataset_service_class = mocker.patch("services.dataset_service.DatasetService") - mock_dataset_service = mock_dataset_service_class.return_value - # 6. Mock session and its scalar/query methods + # 6. Mock session and dataset lookup mock_session = mocker.Mock() mock_session.scalar.return_value = draft_wf - # Mock dataset update query (needed even if service is mocked, as rag_pipeline fetches it first) dataset = mocker.Mock() dataset.retrieval_model_dict = {} - dataset_query = mocker.Mock() - dataset_query.where.return_value.first.return_value = dataset - - # Mock node execution copy - node_exec_query = mocker.Mock() - node_exec_query.where.return_value.all.return_value = [] - - # Mocked session query side effects - mock_session.query.side_effect = [node_exec_query, dataset_query] + pipeline.retrieve_dataset.return_value = dataset # 7. Run test result = rag_pipeline_service.publish_workflow(session=mock_session, pipeline=pipeline, account=account) @@ -1524,7 +1514,6 @@ def test_handle_node_run_result_marks_document_error_for_published_invoke(mocker ) document = SimpleNamespace(indexing_status="waiting", error=None) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", return_value=document) add_mock = mocker.patch("services.rag_pipeline.rag_pipeline.db.session.add") commit_mock = mocker.patch("services.rag_pipeline.rag_pipeline.db.session.commit") @@ -1595,7 +1584,6 @@ def test_publish_customized_pipeline_template_raises_for_missing_workflow_id(moc def test_get_pipeline_raises_when_dataset_missing(mocker, rag_pipeline_service) -> None: - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=None) with pytest.raises(ValueError, match="Dataset not found"): @@ -1604,7 +1592,6 @@ def test_get_pipeline_raises_when_dataset_missing(mocker, rag_pipeline_service) def test_get_pipeline_raises_when_pipeline_missing(mocker, rag_pipeline_service) -> None: dataset = SimpleNamespace(pipeline_id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, None]) with pytest.raises(ValueError, match="Pipeline not found"): @@ -1644,7 +1631,6 @@ def test_get_pipeline_templates_builtin_en_us_no_fallback(mocker) -> None: def test_update_customized_pipeline_template_commits_when_name_empty(mocker) -> None: template = SimpleNamespace(name="old", description="old", icon={}, updated_by=None) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=template) commit = mocker.patch("services.rag_pipeline.rag_pipeline.db.session.commit") mocker.patch("services.rag_pipeline.rag_pipeline.current_user", SimpleNamespace(id="u1", current_tenant_id="t1")) @@ -1871,7 +1857,6 @@ def test_run_free_workflow_node_delegates_to_handle_result(mocker, rag_pipeline_ def test_publish_customized_pipeline_template_raises_when_workflow_missing(mocker, rag_pipeline_service) -> None: pipeline = SimpleNamespace(id="p1", tenant_id="t1", workflow_id="wf-1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", side_effect=[pipeline, None]) with pytest.raises(ValueError, match="Workflow not found"): @@ -1910,7 +1895,6 @@ def test_get_recommended_plugins_skips_manifest_when_missing(mocker, rag_pipelin def test_retry_error_document_raises_when_pipeline_missing(mocker, rag_pipeline_service) -> None: exec_log = SimpleNamespace(pipeline_id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=exec_log) mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", return_value=None) @@ -1923,7 +1907,6 @@ def test_retry_error_document_raises_when_pipeline_missing(mocker, rag_pipeline_ def test_retry_error_document_raises_when_workflow_missing(mocker, rag_pipeline_service) -> None: exec_log = SimpleNamespace(pipeline_id="p1") pipeline = SimpleNamespace(id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", return_value=exec_log) mocker.patch("services.rag_pipeline.rag_pipeline.db.session.get", return_value=pipeline) mocker.patch.object(rag_pipeline_service, "get_published_workflow", return_value=None) @@ -1940,7 +1923,6 @@ def test_get_datasource_plugins_returns_empty_for_non_datasource_nodes(mocker, r workflow = SimpleNamespace( graph_dict={"nodes": [{"id": "n1", "data": {"type": "start"}}]}, rag_pipeline_variables=[] ) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) mocker.patch.object(rag_pipeline_service, "get_published_workflow", return_value=workflow) @@ -2103,7 +2085,6 @@ def test_get_datasource_plugins_handles_empty_datasource_data_and_non_published( graph_dict={"nodes": [{"id": "n1", "data": {"type": "datasource", "datasource_parameters": {}}}]}, rag_pipeline_variables=[{"variable": "v1", "belong_to_node_id": "shared"}], ) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) mocker.patch.object(rag_pipeline_service, "get_draft_workflow", return_value=workflow) mocker.patch( @@ -2143,7 +2124,6 @@ def test_get_datasource_plugins_extracts_user_inputs_and_credentials(mocker, rag {"variable": "v3", "belong_to_node_id": "shared"}, ], ) - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) mocker.patch.object(rag_pipeline_service, "get_published_workflow", return_value=workflow) mocker.patch( @@ -2161,7 +2141,6 @@ def test_get_datasource_plugins_extracts_user_inputs_and_credentials(mocker, rag def test_get_pipeline_returns_pipeline_when_found(mocker, rag_pipeline_service) -> None: dataset = SimpleNamespace(pipeline_id="p1") pipeline = SimpleNamespace(id="p1") - query = mocker.Mock() mocker.patch("services.rag_pipeline.rag_pipeline.db.session.scalar", side_effect=[dataset, pipeline]) result = rag_pipeline_service.get_pipeline("t1", "d1") diff --git a/api/tests/unit_tests/services/segment_service.py b/api/tests/unit_tests/services/segment_service.py deleted file mode 100644 index f0a66a00d4..0000000000 --- a/api/tests/unit_tests/services/segment_service.py +++ /dev/null @@ -1,1115 +0,0 @@ -from unittest.mock import MagicMock, Mock, patch - -import pytest - -from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from models.account import Account -from models.dataset import ChildChunk, Dataset, Document, DocumentSegment -from models.enums import SegmentType -from services.dataset_service import SegmentService -from services.entities.knowledge_entities.knowledge_entities import SegmentUpdateArgs -from services.errors.chunk import ChildChunkDeleteIndexError, ChildChunkIndexingError - - -class SegmentTestDataFactory: - """Factory class for creating test data and mock objects for segment service tests.""" - - @staticmethod - def create_segment_mock( - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - content: str = "Test segment content", - position: int = 1, - enabled: bool = True, - status: str = "completed", - word_count: int = 3, - tokens: int = 5, - **kwargs, - ) -> Mock: - """Create a mock segment with specified attributes.""" - segment = Mock(spec=DocumentSegment) - segment.id = segment_id - segment.document_id = document_id - segment.dataset_id = dataset_id - segment.tenant_id = tenant_id - segment.content = content - segment.position = position - segment.enabled = enabled - segment.status = status - segment.word_count = word_count - segment.tokens = tokens - segment.index_node_id = f"node-{segment_id}" - segment.index_node_hash = "hash-123" - segment.keywords = [] - segment.answer = None - segment.disabled_at = None - segment.disabled_by = None - segment.updated_by = None - segment.updated_at = None - segment.indexing_at = None - segment.completed_at = None - segment.error = None - for key, value in kwargs.items(): - setattr(segment, key, value) - return segment - - @staticmethod - def create_child_chunk_mock( - chunk_id: str = "chunk-123", - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - content: str = "Test child chunk content", - position: int = 1, - word_count: int = 3, - **kwargs, - ) -> Mock: - """Create a mock child chunk with specified attributes.""" - chunk = Mock(spec=ChildChunk) - chunk.id = chunk_id - chunk.segment_id = segment_id - chunk.document_id = document_id - chunk.dataset_id = dataset_id - chunk.tenant_id = tenant_id - chunk.content = content - chunk.position = position - chunk.word_count = word_count - chunk.index_node_id = f"node-{chunk_id}" - chunk.index_node_hash = "hash-123" - chunk.type = SegmentType.AUTOMATIC - chunk.created_by = "user-123" - chunk.updated_by = None - chunk.updated_at = None - for key, value in kwargs.items(): - setattr(chunk, key, value) - return chunk - - @staticmethod - def create_document_mock( - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - doc_form: str = IndexStructureType.PARAGRAPH_INDEX, - word_count: int = 100, - **kwargs, - ) -> Mock: - """Create a mock document with specified attributes.""" - document = Mock(spec=Document) - document.id = document_id - document.dataset_id = dataset_id - document.tenant_id = tenant_id - document.doc_form = doc_form - document.word_count = word_count - for key, value in kwargs.items(): - setattr(document, key, value) - return document - - @staticmethod - def create_dataset_mock( - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - indexing_technique: str = IndexTechniqueType.HIGH_QUALITY, - embedding_model: str = "text-embedding-ada-002", - embedding_model_provider: str = "openai", - **kwargs, - ) -> Mock: - """Create a mock dataset with specified attributes.""" - dataset = Mock(spec=Dataset) - dataset.id = dataset_id - dataset.tenant_id = tenant_id - dataset.indexing_technique = indexing_technique - dataset.embedding_model = embedding_model - dataset.embedding_model_provider = embedding_model_provider - for key, value in kwargs.items(): - setattr(dataset, key, value) - return dataset - - @staticmethod - def create_user_mock( - user_id: str = "user-789", - tenant_id: str = "tenant-123", - **kwargs, - ) -> Mock: - """Create a mock user with specified attributes.""" - user = Mock(spec=Account) - user.id = user_id - user.current_tenant_id = tenant_id - user.name = "Test User" - for key, value in kwargs.items(): - setattr(user, key, value) - return user - - -class TestSegmentServiceCreateSegment: - """Tests for SegmentService.create_segment method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_create_segment_success(self, mock_db_session, mock_current_user): - """Test successful creation of a segment.""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = {"content": "New segment content", "keywords": ["test", "segment"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None # No existing segments - mock_db_session.query.return_value = mock_query - - mock_segment = SegmentTestDataFactory.create_segment_mock() - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert mock_db_session.add.call_count == 2 - - created_segment = mock_db_session.add.call_args_list[0].args[0] - assert isinstance(created_segment, DocumentSegment) - assert created_segment.content == args["content"] - assert created_segment.word_count == len(args["content"]) - - mock_db_session.commit.assert_called_once() - - mock_vector_service.assert_called_once() - vector_call_args = mock_vector_service.call_args[0] - assert vector_call_args[0] == [args["keywords"]] - assert vector_call_args[1][0] == created_segment - assert vector_call_args[2] == dataset - assert vector_call_args[3] == document.doc_form - - assert result == mock_segment - - def test_create_segment_with_qa_model(self, mock_db_session, mock_current_user): - """Test creation of segment with QA model (requires answer).""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(doc_form=IndexStructureType.QA_INDEX, word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = {"content": "What is AI?", "answer": "AI is Artificial Intelligence", "keywords": ["ai"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - mock_segment = SegmentTestDataFactory.create_segment_mock() - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert result == mock_segment - mock_db_session.add.assert_called() - mock_db_session.commit.assert_called() - - def test_create_segment_with_high_quality_indexing(self, mock_db_session, mock_current_user): - """Test creation of segment with high quality indexing technique.""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - args = {"content": "New segment content", "keywords": ["test"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - mock_embedding_model = MagicMock() - mock_embedding_model.get_text_embedding_num_tokens.return_value = [10] - mock_model_manager = MagicMock() - mock_model_manager.get_model_instance.return_value = mock_embedding_model - - mock_segment = SegmentTestDataFactory.create_segment_mock() - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.ModelManager.for_tenant", autospec=True) as mock_model_manager_class, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_model_manager_class.return_value = mock_model_manager - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert result == mock_segment - mock_model_manager.get_model_instance.assert_called_once() - mock_embedding_model.get_text_embedding_num_tokens.assert_called_once() - - def test_create_segment_vector_index_failure(self, mock_db_session, mock_current_user): - """Test segment creation when vector indexing fails.""" - # Arrange - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = {"content": "New segment content", "keywords": ["test"]} - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - mock_segment = SegmentTestDataFactory.create_segment_mock(enabled=False, status="error") - mock_db_session.query.return_value.where.return_value.first.return_value = mock_segment - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_segments_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_vector_service.side_effect = Exception("Vector indexing failed") - mock_hash.return_value = "hash-123" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.create_segment(args, document, dataset) - - # Assert - assert result == mock_segment - assert mock_db_session.commit.call_count == 2 # Once for creation, once for error update - - -class TestSegmentServiceUpdateSegment: - """Tests for SegmentService.update_segment method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_update_segment_content_success(self, mock_db_session, mock_current_user): - """Test successful update of segment content.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True, word_count=10) - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = SegmentUpdateArgs(content="Updated content", keywords=["updated"]) - - mock_db_session.query.return_value.where.return_value.first.return_value = segment - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.VectorService.update_segment_vector", autospec=True) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_redis_get.return_value = None # Not indexing - mock_hash.return_value = "new-hash" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_segment(args, segment, document, dataset) - - # Assert - assert result == segment - assert segment.content == "Updated content" - assert segment.keywords == ["updated"] - assert segment.word_count == len("Updated content") - assert document.word_count == 100 + (len("Updated content") - 10) - mock_db_session.add.assert_called() - mock_db_session.commit.assert_called() - - def test_update_segment_disable(self, mock_db_session, mock_current_user): - """Test disabling a segment.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - args = SegmentUpdateArgs(enabled=False) - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.redis_client.setex", autospec=True) as mock_redis_setex, - patch("services.dataset_service.disable_segment_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_redis_get.return_value = None - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_segment(args, segment, document, dataset) - - # Assert - assert result == segment - assert segment.enabled is False - mock_db_session.add.assert_called() - mock_db_session.commit.assert_called() - mock_task.delay.assert_called_once() - - def test_update_segment_indexing_in_progress(self, mock_db_session, mock_current_user): - """Test update fails when segment is currently indexing.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - args = SegmentUpdateArgs(content="Updated content") - - with patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get: - mock_redis_get.return_value = "1" # Indexing in progress - - # Act & Assert - with pytest.raises(ValueError, match="Segment is indexing"): - SegmentService.update_segment(args, segment, document, dataset) - - def test_update_segment_disabled_segment(self, mock_db_session, mock_current_user): - """Test update fails when segment is disabled.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=False) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - args = SegmentUpdateArgs(content="Updated content") - - with patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get: - mock_redis_get.return_value = None - - # Act & Assert - with pytest.raises(ValueError, match="Can't update disabled segment"): - SegmentService.update_segment(args, segment, document, dataset) - - def test_update_segment_with_qa_model(self, mock_db_session, mock_current_user): - """Test update segment with QA model (includes answer).""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True, word_count=10) - document = SegmentTestDataFactory.create_document_mock(doc_form=IndexStructureType.QA_INDEX, word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - args = SegmentUpdateArgs(content="Updated question", answer="Updated answer", keywords=["qa"]) - - mock_db_session.query.return_value.where.return_value.first.return_value = segment - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.VectorService.update_segment_vector", autospec=True) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_redis_get.return_value = None - mock_hash.return_value = "new-hash" - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_segment(args, segment, document, dataset) - - # Assert - assert result == segment - assert segment.content == "Updated question" - assert segment.answer == "Updated answer" - assert segment.keywords == ["qa"] - new_word_count = len("Updated question") + len("Updated answer") - assert segment.word_count == new_word_count - assert document.word_count == 100 + (new_word_count - 10) - mock_db_session.commit.assert_called() - - -class TestSegmentServiceDeleteSegment: - """Tests for SegmentService.delete_segment method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_delete_segment_success(self, mock_db_session): - """Test successful deletion of a segment.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True, word_count=50) - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock() - - mock_scalars = MagicMock() - mock_scalars.all.return_value = [] - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.redis_client.setex", autospec=True) as mock_redis_setex, - patch("services.dataset_service.delete_segment_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.select", autospec=True) as mock_select, - ): - mock_redis_get.return_value = None - mock_select.return_value.where.return_value = mock_select - - # Act - SegmentService.delete_segment(segment, document, dataset) - - # Assert - mock_db_session.delete.assert_called_once_with(segment) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_delete_segment_disabled(self, mock_db_session): - """Test deletion of disabled segment (no index deletion).""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=False, word_count=50) - document = SegmentTestDataFactory.create_document_mock(word_count=100) - dataset = SegmentTestDataFactory.create_dataset_mock() - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.delete_segment_from_index_task", autospec=True) as mock_task, - ): - mock_redis_get.return_value = None - - # Act - SegmentService.delete_segment(segment, document, dataset) - - # Assert - mock_db_session.delete.assert_called_once_with(segment) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_not_called() - - def test_delete_segment_indexing_in_progress(self, mock_db_session): - """Test deletion fails when segment is currently being deleted.""" - # Arrange - segment = SegmentTestDataFactory.create_segment_mock(enabled=True) - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get: - mock_redis_get.return_value = "1" # Deletion in progress - - # Act & Assert - with pytest.raises(ValueError, match="Segment is deleting"): - SegmentService.delete_segment(segment, document, dataset) - - -class TestSegmentServiceDeleteSegments: - """Tests for SegmentService.delete_segments method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_delete_segments_success(self, mock_db_session, mock_current_user): - """Test successful deletion of multiple segments.""" - # Arrange - segment_ids = ["segment-1", "segment-2"] - document = SegmentTestDataFactory.create_document_mock(word_count=200) - dataset = SegmentTestDataFactory.create_dataset_mock() - - segments_info = [ - ("node-1", "segment-1", 50), - ("node-2", "segment-2", 30), - ] - - mock_query = MagicMock() - mock_query.with_entities.return_value.where.return_value.all.return_value = segments_info - mock_db_session.query.return_value = mock_query - - mock_scalars = MagicMock() - mock_scalars.all.return_value = [] - mock_select = MagicMock() - mock_select.where.return_value = mock_select - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.delete_segment_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.select", autospec=True) as mock_select_func, - ): - mock_select_func.return_value = mock_select - - # Act - SegmentService.delete_segments(segment_ids, document, dataset) - - # Assert - mock_db_session.query.return_value.where.return_value.delete.assert_called_once() - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_delete_segments_empty_list(self, mock_db_session, mock_current_user): - """Test deletion with empty list (should return early).""" - # Arrange - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - # Act - SegmentService.delete_segments([], document, dataset) - - # Assert - mock_db_session.query.assert_not_called() - - -class TestSegmentServiceUpdateSegmentsStatus: - """Tests for SegmentService.update_segments_status method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_update_segments_status_enable(self, mock_db_session, mock_current_user): - """Test enabling multiple segments.""" - # Arrange - segment_ids = ["segment-1", "segment-2"] - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - segments = [ - SegmentTestDataFactory.create_segment_mock(segment_id="segment-1", enabled=False), - SegmentTestDataFactory.create_segment_mock(segment_id="segment-2", enabled=False), - ] - - mock_scalars = MagicMock() - mock_scalars.all.return_value = segments - mock_select = MagicMock() - mock_select.where.return_value = mock_select - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.enable_segments_to_index_task", autospec=True) as mock_task, - patch("services.dataset_service.select", autospec=True) as mock_select_func, - ): - mock_redis_get.return_value = None - mock_select_func.return_value = mock_select - - # Act - SegmentService.update_segments_status(segment_ids, "enable", dataset, document) - - # Assert - assert all(seg.enabled is True for seg in segments) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_update_segments_status_disable(self, mock_db_session, mock_current_user): - """Test disabling multiple segments.""" - # Arrange - segment_ids = ["segment-1", "segment-2"] - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - segments = [ - SegmentTestDataFactory.create_segment_mock(segment_id="segment-1", enabled=True), - SegmentTestDataFactory.create_segment_mock(segment_id="segment-2", enabled=True), - ] - - mock_scalars = MagicMock() - mock_scalars.all.return_value = segments - mock_select = MagicMock() - mock_select.where.return_value = mock_select - mock_db_session.scalars.return_value = mock_scalars - - with ( - patch("services.dataset_service.redis_client.get", autospec=True) as mock_redis_get, - patch("services.dataset_service.disable_segments_from_index_task", autospec=True) as mock_task, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - patch("services.dataset_service.select", autospec=True) as mock_select_func, - ): - mock_redis_get.return_value = None - mock_now.return_value = "2024-01-01T00:00:00" - mock_select_func.return_value = mock_select - - # Act - SegmentService.update_segments_status(segment_ids, "disable", dataset, document) - - # Assert - assert all(seg.enabled is False for seg in segments) - mock_db_session.commit.assert_called_once() - mock_task.delay.assert_called_once() - - def test_update_segments_status_empty_list(self, mock_db_session, mock_current_user): - """Test update with empty list (should return early).""" - # Arrange - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - # Act - SegmentService.update_segments_status([], "enable", dataset, document) - - # Assert - mock_db_session.scalars.assert_not_called() - - -class TestSegmentServiceGetSegments: - """Tests for SegmentService.get_segments method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_get_segments_success(self, mock_db_session, mock_current_user): - """Test successful retrieval of segments.""" - # Arrange - document_id = "doc-123" - tenant_id = "tenant-123" - segments = [ - SegmentTestDataFactory.create_segment_mock(segment_id="segment-1"), - SegmentTestDataFactory.create_segment_mock(segment_id="segment-2"), - ] - - mock_paginate = MagicMock() - mock_paginate.items = segments - mock_paginate.total = 2 - mock_db_session.paginate.return_value = mock_paginate - - # Act - items, total = SegmentService.get_segments(document_id, tenant_id) - - # Assert - assert len(items) == 2 - assert total == 2 - mock_db_session.paginate.assert_called_once() - - def test_get_segments_with_status_filter(self, mock_db_session, mock_current_user): - """Test retrieval with status filter.""" - # Arrange - document_id = "doc-123" - tenant_id = "tenant-123" - status_list = ["completed", "error"] - - mock_paginate = MagicMock() - mock_paginate.items = [] - mock_paginate.total = 0 - mock_db_session.paginate.return_value = mock_paginate - - # Act - items, total = SegmentService.get_segments(document_id, tenant_id, status_list=status_list) - - # Assert - assert len(items) == 0 - assert total == 0 - - def test_get_segments_with_keyword(self, mock_db_session, mock_current_user): - """Test retrieval with keyword search.""" - # Arrange - document_id = "doc-123" - tenant_id = "tenant-123" - keyword = "test" - - mock_paginate = MagicMock() - mock_paginate.items = [SegmentTestDataFactory.create_segment_mock()] - mock_paginate.total = 1 - mock_db_session.paginate.return_value = mock_paginate - - # Act - items, total = SegmentService.get_segments(document_id, tenant_id, keyword=keyword) - - # Assert - assert len(items) == 1 - assert total == 1 - - -class TestSegmentServiceGetSegmentById: - """Tests for SegmentService.get_segment_by_id method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_get_segment_by_id_success(self, mock_db_session): - """Test successful retrieval of segment by ID.""" - # Arrange - segment_id = "segment-123" - tenant_id = "tenant-123" - segment = SegmentTestDataFactory.create_segment_mock(segment_id=segment_id) - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = segment - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_segment_by_id(segment_id, tenant_id) - - # Assert - assert result == segment - - def test_get_segment_by_id_not_found(self, mock_db_session): - """Test retrieval when segment is not found.""" - # Arrange - segment_id = "non-existent" - tenant_id = "tenant-123" - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = None - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_segment_by_id(segment_id, tenant_id) - - # Assert - assert result is None - - -class TestSegmentServiceGetChildChunks: - """Tests for SegmentService.get_child_chunks method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_get_child_chunks_success(self, mock_db_session, mock_current_user): - """Test successful retrieval of child chunks.""" - # Arrange - segment_id = "segment-123" - document_id = "doc-123" - dataset_id = "dataset-123" - page = 1 - limit = 20 - - mock_paginate = MagicMock() - mock_paginate.items = [ - SegmentTestDataFactory.create_child_chunk_mock(chunk_id="chunk-1"), - SegmentTestDataFactory.create_child_chunk_mock(chunk_id="chunk-2"), - ] - mock_paginate.total = 2 - mock_db_session.paginate.return_value = mock_paginate - - # Act - result = SegmentService.get_child_chunks(segment_id, document_id, dataset_id, page, limit) - - # Assert - assert result == mock_paginate - mock_db_session.paginate.assert_called_once() - - def test_get_child_chunks_with_keyword(self, mock_db_session, mock_current_user): - """Test retrieval with keyword search.""" - # Arrange - segment_id = "segment-123" - document_id = "doc-123" - dataset_id = "dataset-123" - page = 1 - limit = 20 - keyword = "test" - - mock_paginate = MagicMock() - mock_paginate.items = [] - mock_paginate.total = 0 - mock_db_session.paginate.return_value = mock_paginate - - # Act - result = SegmentService.get_child_chunks(segment_id, document_id, dataset_id, page, limit, keyword=keyword) - - # Assert - assert result == mock_paginate - - -class TestSegmentServiceGetChildChunkById: - """Tests for SegmentService.get_child_chunk_by_id method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_get_child_chunk_by_id_success(self, mock_db_session): - """Test successful retrieval of child chunk by ID.""" - # Arrange - chunk_id = "chunk-123" - tenant_id = "tenant-123" - chunk = SegmentTestDataFactory.create_child_chunk_mock(chunk_id=chunk_id) - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = chunk - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_child_chunk_by_id(chunk_id, tenant_id) - - # Assert - assert result == chunk - - def test_get_child_chunk_by_id_not_found(self, mock_db_session): - """Test retrieval when child chunk is not found.""" - # Arrange - chunk_id = "non-existent" - tenant_id = "tenant-123" - - mock_query = MagicMock() - mock_query.where.return_value.first.return_value = None - mock_db_session.query.return_value = mock_query - - # Act - result = SegmentService.get_child_chunk_by_id(chunk_id, tenant_id) - - # Assert - assert result is None - - -class TestSegmentServiceCreateChildChunk: - """Tests for SegmentService.create_child_chunk method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_create_child_chunk_success(self, mock_db_session, mock_current_user): - """Test successful creation of a child chunk.""" - # Arrange - content = "New child chunk content" - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_hash.return_value = "hash-123" - - # Act - result = SegmentService.create_child_chunk(content, segment, document, dataset) - - # Assert - assert result is not None - mock_db_session.add.assert_called_once() - mock_db_session.commit.assert_called_once() - mock_vector_service.assert_called_once() - - def test_create_child_chunk_vector_index_failure(self, mock_db_session, mock_current_user): - """Test child chunk creation when vector indexing fails.""" - # Arrange - content = "New child chunk content" - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - mock_query = MagicMock() - mock_query.where.return_value.scalar.return_value = None - mock_db_session.query.return_value = mock_query - - with ( - patch("services.dataset_service.redis_client.lock", autospec=True) as mock_lock, - patch( - "services.dataset_service.VectorService.create_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.helper.generate_text_hash", autospec=True) as mock_hash, - ): - mock_lock.return_value.__enter__ = Mock() - mock_lock.return_value.__exit__ = Mock(return_value=None) - mock_vector_service.side_effect = Exception("Vector indexing failed") - mock_hash.return_value = "hash-123" - - # Act & Assert - with pytest.raises(ChildChunkIndexingError): - SegmentService.create_child_chunk(content, segment, document, dataset) - - mock_db_session.rollback.assert_called_once() - - -class TestSegmentServiceUpdateChildChunk: - """Tests for SegmentService.update_child_chunk method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - @pytest.fixture - def mock_current_user(self): - """Mock current_user.""" - user = SegmentTestDataFactory.create_user_mock() - with patch("services.dataset_service.current_user", user): - yield user - - def test_update_child_chunk_success(self, mock_db_session, mock_current_user): - """Test successful update of a child chunk.""" - # Arrange - content = "Updated child chunk content" - chunk = SegmentTestDataFactory.create_child_chunk_mock() - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with ( - patch( - "services.dataset_service.VectorService.update_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_now.return_value = "2024-01-01T00:00:00" - - # Act - result = SegmentService.update_child_chunk(content, chunk, segment, document, dataset) - - # Assert - assert result == chunk - assert chunk.content == content - assert chunk.word_count == len(content) - mock_db_session.add.assert_called_once_with(chunk) - mock_db_session.commit.assert_called_once() - mock_vector_service.assert_called_once() - - def test_update_child_chunk_vector_index_failure(self, mock_db_session, mock_current_user): - """Test child chunk update when vector indexing fails.""" - # Arrange - content = "Updated content" - chunk = SegmentTestDataFactory.create_child_chunk_mock() - segment = SegmentTestDataFactory.create_segment_mock() - document = SegmentTestDataFactory.create_document_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with ( - patch( - "services.dataset_service.VectorService.update_child_chunk_vector", autospec=True - ) as mock_vector_service, - patch("services.dataset_service.naive_utc_now", autospec=True) as mock_now, - ): - mock_vector_service.side_effect = Exception("Vector indexing failed") - mock_now.return_value = "2024-01-01T00:00:00" - - # Act & Assert - with pytest.raises(ChildChunkIndexingError): - SegmentService.update_child_chunk(content, chunk, segment, document, dataset) - - mock_db_session.rollback.assert_called_once() - - -class TestSegmentServiceDeleteChildChunk: - """Tests for SegmentService.delete_child_chunk method.""" - - @pytest.fixture - def mock_db_session(self): - """Mock database session.""" - with patch("services.dataset_service.db.session", autospec=True) as mock_db: - yield mock_db - - def test_delete_child_chunk_success(self, mock_db_session): - """Test successful deletion of a child chunk.""" - # Arrange - chunk = SegmentTestDataFactory.create_child_chunk_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with patch( - "services.dataset_service.VectorService.delete_child_chunk_vector", autospec=True - ) as mock_vector_service: - # Act - SegmentService.delete_child_chunk(chunk, dataset) - - # Assert - mock_db_session.delete.assert_called_once_with(chunk) - mock_db_session.commit.assert_called_once() - mock_vector_service.assert_called_once_with(chunk, dataset) - - def test_delete_child_chunk_vector_index_failure(self, mock_db_session): - """Test child chunk deletion when vector indexing fails.""" - # Arrange - chunk = SegmentTestDataFactory.create_child_chunk_mock() - dataset = SegmentTestDataFactory.create_dataset_mock() - - with patch( - "services.dataset_service.VectorService.delete_child_chunk_vector", autospec=True - ) as mock_vector_service: - mock_vector_service.side_effect = Exception("Vector deletion failed") - - # Act & Assert - with pytest.raises(ChildChunkDeleteIndexError): - SegmentService.delete_child_chunk(chunk, dataset) - - mock_db_session.rollback.assert_called_once() diff --git a/api/tests/unit_tests/services/services_test_help.py b/api/tests/unit_tests/services/services_test_help.py deleted file mode 100644 index c6b962f7fc..0000000000 --- a/api/tests/unit_tests/services/services_test_help.py +++ /dev/null @@ -1,59 +0,0 @@ -from unittest.mock import MagicMock - - -class ServiceDbTestHelper: - """ - Helper class for service database query tests. - """ - - @staticmethod - def setup_db_query_filter_by_mock(mock_db, query_results): - """ - Smart database query mock that responds based on model type and query parameters. - - Args: - mock_db: Mock database session - query_results: Dict mapping (model_name, filter_key, filter_value) to return value - Example: {('Account', 'email', 'test@example.com'): mock_account} - """ - - def query_side_effect(model): - mock_query = MagicMock() - - def filter_by_side_effect(**kwargs): - mock_filter_result = MagicMock() - - def first_side_effect(): - # Find matching result based on model and filter parameters - for (model_name, filter_key, filter_value), result in query_results.items(): - if model.__name__ == model_name and filter_key in kwargs and kwargs[filter_key] == filter_value: - return result - return None - - mock_filter_result.first.side_effect = first_side_effect - - # Handle order_by calls for complex queries - def order_by_side_effect(*args, **kwargs): - mock_order_result = MagicMock() - - def order_first_side_effect(): - # Look for order_by results in the same query_results dict - for (model_name, filter_key, filter_value), result in query_results.items(): - if ( - model.__name__ == model_name - and filter_key == "order_by" - and filter_value == "first_available" - ): - return result - return None - - mock_order_result.first.side_effect = order_first_side_effect - return mock_order_result - - mock_filter_result.order_by.side_effect = order_by_side_effect - return mock_filter_result - - mock_query.filter_by.side_effect = filter_by_side_effect - return mock_query - - mock_db.session.query.side_effect = query_side_effect diff --git a/api/tests/unit_tests/services/test_account_service.py b/api/tests/unit_tests/services/test_account_service.py index c4f5f57153..e9d2f1481e 100644 --- a/api/tests/unit_tests/services/test_account_service.py +++ b/api/tests/unit_tests/services/test_account_service.py @@ -14,7 +14,6 @@ from services.errors.account import ( AccountRegisterError, CurrentPasswordIncorrectError, ) -from tests.unit_tests.services.services_test_help import ServiceDbTestHelper class TestAccountAssociatedDataFactory: @@ -149,7 +148,6 @@ class TestAccountService: # Setup basic session methods mock_session.add = MagicMock() mock_session.commit = MagicMock() - mock_session.query = MagicMock() yield mock_db @@ -1572,15 +1570,9 @@ class TestRegisterService: account_id="existing-user-456", email="existing@example.com", status="active" ) - # Mock database queries - query_results = { - ( - "TenantAccountJoin", - "tenant_id", - "tenant-456", - ): TestAccountAssociatedDataFactory.create_tenant_join_mock(), - } - ServiceDbTestHelper.setup_db_query_filter_by_mock(mock_db_dependencies["db"], query_results) + mock_db_dependencies[ + "db" + ].session.scalar.return_value = TestAccountAssociatedDataFactory.create_tenant_join_mock() # Mock TenantService methods with ( diff --git a/api/tests/unit_tests/services/test_app_generate_service.py b/api/tests/unit_tests/services/test_app_generate_service.py index c2b430c551..119a7adc45 100644 --- a/api/tests/unit_tests/services/test_app_generate_service.py +++ b/api/tests/unit_tests/services/test_app_generate_service.py @@ -327,7 +327,8 @@ class TestGenerate: streaming=False, ) assert result == {"result": "advanced-blocking"} - assert gen_spy.call_args.kwargs.get("streaming") is False + call_kwargs = gen_spy.call_args.kwargs + assert call_kwargs.get("streaming") is False retrieve_spy.assert_not_called() # -- ADVANCED_CHAT streaming -------------------------------------------- diff --git a/api/tests/unit_tests/services/test_dataset_service_segment.py b/api/tests/unit_tests/services/test_dataset_service_segment.py index d6c104708c..5cfef76719 100644 --- a/api/tests/unit_tests/services/test_dataset_service_segment.py +++ b/api/tests/unit_tests/services/test_dataset_service_segment.py @@ -714,7 +714,6 @@ class TestSegmentServiceMutations: patch("services.dataset_service.db") as mock_db, patch("services.dataset_service.delete_segment_from_index_task") as delete_task, ): - segments_query = MagicMock() # execute().all() for segments_info (multi-column) execute_result = MagicMock() execute_result.all.return_value = [ diff --git a/api/tests/unit_tests/services/test_datasource_provider_service.py b/api/tests/unit_tests/services/test_datasource_provider_service.py index d304e0ec44..c389c4a635 100644 --- a/api/tests/unit_tests/services/test_datasource_provider_service.py +++ b/api/tests/unit_tests/services/test_datasource_provider_service.py @@ -36,9 +36,7 @@ class TestDatasourceProviderService: @pytest.fixture def mock_db_session(self): """ - Robust, chainable query mock. - q returns itself for .filter_by(), .order_by(), .where() so any - SQLAlchemy chaining pattern works without multiple brittle sub-mocks. + Mock session with scalar/scalars defaults for current SQLAlchemy access paths. """ with ( patch("services.datasource_provider_service.Session") as mock_cls, @@ -46,20 +44,6 @@ class TestDatasourceProviderService: ): sess = MagicMock(spec=Session) - q = MagicMock() - sess.query.return_value = q - - # Self-returning chain — any method called on q returns q - q.filter_by.return_value = q - q.order_by.return_value = q - q.where.return_value = q - - # Default terminal values (tests override per-case) - q.first.return_value = None - q.all.return_value = [] - q.count.return_value = 0 - q.delete.return_value = 1 - # Default values for select()-style calls (tests override per-case) sess.scalar.return_value = None sess.scalars.return_value.all.return_value = [] diff --git a/api/tests/unit_tests/services/test_message_service.py b/api/tests/unit_tests/services/test_message_service.py index 969132cfd8..7adc15d63e 100644 --- a/api/tests/unit_tests/services/test_message_service.py +++ b/api/tests/unit_tests/services/test_message_service.py @@ -3,6 +3,7 @@ from unittest.mock import MagicMock, patch import pytest +from graphon.model_runtime.entities.model_entities import ModelType from libs.infinite_scroll_pagination import InfiniteScrollPagination from models.enums import FeedbackFromSource, FeedbackRating from models.model import App, AppMode, EndUser, Message @@ -931,6 +932,130 @@ class TestMessageServiceSuggestedQuestions: assert result == ["Q1?"] mock_llm_gen.generate_suggested_questions_after_answer.assert_called_once() + @patch("services.message_service.db") + @patch("services.message_service.ModelManager.for_tenant") + @patch("services.message_service.TokenBufferMemory") + @patch("services.message_service.LLMGenerator") + @patch("services.message_service.TraceQueueManager") + @patch.object(MessageService, "get_message") + @patch("services.message_service.ConversationService") + def test_get_suggested_questions_chat_app_uses_frontend_model_and_prompt( + self, + mock_conversation_service, + mock_get_message, + mock_trace_manager, + mock_llm_gen, + mock_memory, + mock_model_manager, + mock_db, + factory, + ): + """Test suggested question generation uses frontend configured model and prompt.""" + from core.app.entities.app_invoke_entities import InvokeFrom + + app = factory.create_app_mock(mode=AppMode.CHAT.value) + app.tenant_id = "tenant-123" + user = factory.create_end_user_mock() + message = factory.create_message_mock() + mock_get_message.return_value = message + + conversation = MagicMock() + conversation.override_model_configs = None + mock_conversation_service.get_conversation.return_value = conversation + + app_model_config = MagicMock() + app_model_config.suggested_questions_after_answer_dict = { + "enabled": True, + "prompt": "custom prompt", + "model": { + "provider": "openai", + "name": "gpt-4o-mini", + "completion_params": {"max_tokens": 2048, "temperature": 0.1}, + }, + } + mock_db.session.scalar.return_value = app_model_config + + mock_memory.return_value.get_history_prompt_text.return_value = "histories" + mock_llm_gen.generate_suggested_questions_after_answer.return_value = ["Q1?"] + + result = MessageService.get_suggested_questions_after_answer( + app_model=app, + user=user, + message_id="msg-123", + invoke_from=InvokeFrom.WEB_APP, + ) + + assert result == ["Q1?"] + mock_model_manager.return_value.get_default_model_instance.assert_called_once_with( + tenant_id="tenant-123", + model_type=ModelType.LLM, + ) + mock_memory.assert_called_once_with( + conversation=conversation, + model_instance=mock_model_manager.return_value.get_default_model_instance.return_value, + ) + mock_llm_gen.generate_suggested_questions_after_answer.assert_called_once_with( + tenant_id="tenant-123", + histories="histories", + instruction_prompt="custom prompt", + model_config={ + "provider": "openai", + "name": "gpt-4o-mini", + "completion_params": {"max_tokens": 2048, "temperature": 0.1}, + }, + ) + + @patch("services.message_service.db") + @patch("services.message_service.ModelManager.for_tenant") + @patch("services.message_service.TokenBufferMemory") + @patch("services.message_service.LLMGenerator") + @patch("services.message_service.TraceQueueManager") + @patch.object(MessageService, "get_message") + @patch("services.message_service.ConversationService") + def test_get_suggested_questions_chat_app_invalid_frontend_model_fallback_to_default( + self, + mock_conversation_service, + mock_get_message, + mock_trace_manager, + mock_llm_gen, + mock_memory, + mock_model_manager, + mock_db, + factory, + ): + """Test invalid frontend configured model falls back to tenant default model.""" + app = factory.create_app_mock(mode=AppMode.CHAT.value) + app.tenant_id = "tenant-123" + user = factory.create_end_user_mock() + message = factory.create_message_mock() + mock_get_message.return_value = message + + conversation = MagicMock() + conversation.override_model_configs = None + mock_conversation_service.get_conversation.return_value = conversation + + app_model_config = MagicMock() + app_model_config.suggested_questions_after_answer_dict = { + "enabled": True, + "model": {"provider": "openai", "name": "invalid-model"}, + } + mock_db.session.scalar.return_value = app_model_config + + mock_model_manager.return_value.get_model_instance.side_effect = ValueError("invalid model") + mock_memory.return_value.get_history_prompt_text.return_value = "histories" + mock_llm_gen.generate_suggested_questions_after_answer.return_value = ["Q1?"] + + result = MessageService.get_suggested_questions_after_answer( + app_model=app, user=user, message_id="msg-123", invoke_from=MagicMock() + ) + + assert result == ["Q1?"] + mock_model_manager.return_value.get_default_model_instance.assert_called_once_with( + tenant_id="tenant-123", + model_type=ModelType.LLM, + ) + mock_model_manager.return_value.get_model_instance.assert_not_called() + # Test 30: get_suggested_questions_after_answer - Disabled Error @patch("services.message_service.WorkflowService") @patch("services.message_service.AdvancedChatAppConfigManager") diff --git a/api/tests/unit_tests/services/test_trigger_provider_service.py b/api/tests/unit_tests/services/test_trigger_provider_service.py index ebf1b36610..6eba60e5f1 100644 --- a/api/tests/unit_tests/services/test_trigger_provider_service.py +++ b/api/tests/unit_tests/services/test_trigger_provider_service.py @@ -694,7 +694,7 @@ def test_get_oauth_client_should_return_decrypted_system_client_when_verified( _mock_get_trigger_provider(mocker, provider_controller) mocker.patch("services.trigger.trigger_provider_service.PluginService.is_plugin_verified", return_value=True) mocker.patch( - "services.trigger.trigger_provider_service.decrypt_system_oauth_params", + "services.trigger.trigger_provider_service.decrypt_system_params", return_value={"client_id": "system"}, ) @@ -716,7 +716,7 @@ def test_get_oauth_client_should_raise_error_when_system_decryption_fails( _mock_get_trigger_provider(mocker, provider_controller) mocker.patch("services.trigger.trigger_provider_service.PluginService.is_plugin_verified", return_value=True) mocker.patch( - "services.trigger.trigger_provider_service.decrypt_system_oauth_params", + "services.trigger.trigger_provider_service.decrypt_system_params", side_effect=RuntimeError("bad data"), ) diff --git a/api/tests/unit_tests/services/test_webhook_service_additional.py b/api/tests/unit_tests/services/test_webhook_service_additional.py index 776cb5dc3f..491dd94842 100644 --- a/api/tests/unit_tests/services/test_webhook_service_additional.py +++ b/api/tests/unit_tests/services/test_webhook_service_additional.py @@ -17,23 +17,6 @@ from services.trigger import webhook_service as service_module from services.trigger.webhook_service import WebhookService -class _FakeQuery: - def __init__(self, result: Any) -> None: - self._result = result - - def where(self, *args: Any, **kwargs: Any) -> "_FakeQuery": - return self - - def filter(self, *args: Any, **kwargs: Any) -> "_FakeQuery": - return self - - def order_by(self, *args: Any, **kwargs: Any) -> "_FakeQuery": - return self - - def first(self) -> Any: - return self._result - - @pytest.fixture def flask_app() -> Flask: return Flask(__name__) diff --git a/api/tests/unit_tests/services/test_workflow_service.py b/api/tests/unit_tests/services/test_workflow_service.py index 0015e8b908..feafada59a 100644 --- a/api/tests/unit_tests/services/test_workflow_service.py +++ b/api/tests/unit_tests/services/test_workflow_service.py @@ -1649,8 +1649,6 @@ class TestWorkflowServiceCredentialValidation: """Missing BuiltinToolProvider → plugin requires no credentials → no error.""" # Arrange with patch("services.workflow_service.db") as mock_db: - mock_db.session.query.return_value.where.return_value.order_by.return_value.first.return_value = None - # Act + Assert (should NOT raise) service._check_default_tool_credential("tenant-1", "some-provider") @@ -1662,10 +1660,6 @@ class TestWorkflowServiceCredentialValidation: patch("services.workflow_service.db") as mock_db, patch("core.helper.credential_utils.check_credential_policy_compliance", side_effect=Exception("denied")), ): - mock_db.session.query.return_value.where.return_value.order_by.return_value.first.return_value = ( - mock_provider - ) - # Act + Assert with pytest.raises(ValueError, match="Failed to validate default credential"): service._check_default_tool_credential("tenant-1", "some-provider") diff --git a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py index 79a2d30f57..ce0d94398d 100644 --- a/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py +++ b/api/tests/unit_tests/services/tools/test_builtin_tools_manage_service.py @@ -280,7 +280,7 @@ class TestGetOauthClient: assert result == {"client_id": "id", "client_secret": "secret"} - @patch(f"{MODULE}.decrypt_system_oauth_params", return_value={"sys_key": "sys_val"}) + @patch(f"{MODULE}.decrypt_system_params", return_value={"sys_key": "sys_val"}) @patch(f"{MODULE}.PluginService") @patch(f"{MODULE}.create_provider_encrypter") @patch(f"{MODULE}.ToolManager") diff --git a/api/tests/unit_tests/services/vector_service.py b/api/tests/unit_tests/services/vector_service.py deleted file mode 100644 index ad80beb4e3..0000000000 --- a/api/tests/unit_tests/services/vector_service.py +++ /dev/null @@ -1,1793 +0,0 @@ -""" -Comprehensive unit tests for VectorService and Vector classes. - -This module contains extensive unit tests for the VectorService and Vector -classes, which are critical components in the RAG (Retrieval-Augmented Generation) -pipeline that handle vector database operations, collection management, embedding -storage and retrieval, and metadata filtering. - -The VectorService provides methods for: -- Creating vector embeddings for document segments -- Updating segment vector embeddings -- Generating child chunks for hierarchical indexing -- Managing child chunk vectors (create, update, delete) - -The Vector class provides methods for: -- Vector database operations (create, add, delete, search) -- Collection creation and management with Redis locking -- Embedding storage and retrieval -- Vector index operations (HNSW, L2 distance, etc.) -- Metadata filtering in vector space -- Support for multiple vector database backends - -This test suite ensures: -- Correct vector database operations -- Proper collection creation and management -- Accurate embedding storage and retrieval -- Comprehensive vector search functionality -- Metadata filtering and querying -- Error conditions are handled correctly -- Edge cases are properly validated - -================================================================================ -ARCHITECTURE OVERVIEW -================================================================================ - -The Vector service system is a critical component that bridges document -segments and vector databases, enabling semantic search and retrieval. - -1. VectorService: - - High-level service for managing vector operations on document segments - - Handles both regular segments and hierarchical (parent-child) indexing - - Integrates with IndexProcessor for document transformation - - Manages embedding model instances via ModelManager - -2. Vector Class: - - Wrapper around BaseVector implementations - - Handles embedding generation via ModelManager - - Supports multiple vector database backends (Chroma, Milvus, Qdrant, etc.) - - Manages collection creation with Redis locking for concurrency control - - Provides batch processing for large document sets - -3. BaseVector Abstract Class: - - Defines interface for vector database operations - - Implemented by various vector database backends - - Provides methods for CRUD operations on vectors - - Supports both vector similarity search and full-text search - -4. Collection Management: - - Uses Redis locks to prevent concurrent collection creation - - Caches collection existence status in Redis - - Supports collection deletion with cache invalidation - -5. Embedding Generation: - - Uses ModelManager to get embedding model instances - - Supports cached embeddings for performance - - Handles batch processing for large document sets - - Generates embeddings for both documents and queries - -================================================================================ -TESTING STRATEGY -================================================================================ - -This test suite follows a comprehensive testing strategy that covers: - -1. VectorService Methods: - - create_segments_vector: Regular and hierarchical indexing - - update_segment_vector: Vector and keyword index updates - - generate_child_chunks: Child chunk generation with full doc mode - - create_child_chunk_vector: Child chunk vector creation - - update_child_chunk_vector: Batch child chunk updates - - delete_child_chunk_vector: Child chunk deletion - -2. Vector Class Methods: - - Initialization with dataset and attributes - - Collection creation with Redis locking - - Embedding generation and batch processing - - Vector operations (create, add_texts, delete_by_ids, etc.) - - Search operations (by vector, by full text) - - Metadata filtering and querying - - Duplicate checking logic - - Vector factory selection - -3. Integration Points: - - ModelManager integration for embedding models - - IndexProcessor integration for document transformation - - Redis integration for locking and caching - - Database session management - - Vector database backend abstraction - -4. Error Handling: - - Invalid vector store configuration - - Missing embedding models - - Collection creation failures - - Search operation errors - - Metadata filtering errors - -5. Edge Cases: - - Empty document lists - - Missing metadata fields - - Duplicate document IDs - - Large batch processing - - Concurrent collection creation - -================================================================================ -""" - -from typing import Any -from unittest.mock import Mock, patch - -import pytest - -from core.rag.datasource.vdb.vector_base import BaseVector -from core.rag.datasource.vdb.vector_factory import Vector -from core.rag.datasource.vdb.vector_type import VectorType -from core.rag.index_processor.constant.index_type import IndexStructureType, IndexTechniqueType -from core.rag.models.document import Document -from models.dataset import ChildChunk, Dataset, DatasetDocument, DatasetProcessRule, DocumentSegment -from services.vector_service import VectorService - -# ============================================================================ -# Test Data Factory -# ============================================================================ - - -class VectorServiceTestDataFactory: - """ - Factory class for creating test data and mock objects for Vector service tests. - - This factory provides static methods to create mock objects for: - - Dataset instances with various configurations - - DocumentSegment instances - - ChildChunk instances - - Document instances (RAG documents) - - Embedding model instances - - Vector processor mocks - - Index processor mocks - - The factory methods help maintain consistency across tests and reduce - code duplication when setting up test scenarios. - """ - - @staticmethod - def create_dataset_mock( - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - doc_form: str = IndexStructureType.PARAGRAPH_INDEX, - indexing_technique: str = IndexTechniqueType.HIGH_QUALITY, - embedding_model_provider: str = "openai", - embedding_model: str = "text-embedding-ada-002", - index_struct_dict: dict[str, Any] | None = None, - **kwargs, - ) -> Mock: - """ - Create a mock Dataset with specified attributes. - - Args: - dataset_id: Unique identifier for the dataset - tenant_id: Tenant identifier - doc_form: Document form type - indexing_technique: Indexing technique (high_quality or economy) - embedding_model_provider: Embedding model provider - embedding_model: Embedding model name - index_struct_dict: Index structure dictionary - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a Dataset instance - """ - dataset = Mock(spec=Dataset) - - dataset.id = dataset_id - - dataset.tenant_id = tenant_id - - dataset.doc_form = doc_form - - dataset.indexing_technique = indexing_technique - - dataset.embedding_model_provider = embedding_model_provider - - dataset.embedding_model = embedding_model - - dataset.index_struct_dict = index_struct_dict - - for key, value in kwargs.items(): - setattr(dataset, key, value) - - return dataset - - @staticmethod - def create_document_segment_mock( - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - content: str = "Test segment content", - index_node_id: str = "node-123", - index_node_hash: str = "hash-123", - **kwargs, - ) -> Mock: - """ - Create a mock DocumentSegment with specified attributes. - - Args: - segment_id: Unique identifier for the segment - document_id: Parent document identifier - dataset_id: Dataset identifier - content: Segment content text - index_node_id: Index node identifier - index_node_hash: Index node hash - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a DocumentSegment instance - """ - segment = Mock(spec=DocumentSegment) - - segment.id = segment_id - - segment.document_id = document_id - - segment.dataset_id = dataset_id - - segment.content = content - - segment.index_node_id = index_node_id - - segment.index_node_hash = index_node_hash - - for key, value in kwargs.items(): - setattr(segment, key, value) - - return segment - - @staticmethod - def create_child_chunk_mock( - chunk_id: str = "chunk-123", - segment_id: str = "segment-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - content: str = "Test child chunk content", - index_node_id: str = "node-chunk-123", - index_node_hash: str = "hash-chunk-123", - position: int = 1, - **kwargs, - ) -> Mock: - """ - Create a mock ChildChunk with specified attributes. - - Args: - chunk_id: Unique identifier for the child chunk - segment_id: Parent segment identifier - document_id: Parent document identifier - dataset_id: Dataset identifier - tenant_id: Tenant identifier - content: Child chunk content text - index_node_id: Index node identifier - index_node_hash: Index node hash - position: Position in parent segment - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a ChildChunk instance - """ - chunk = Mock(spec=ChildChunk) - - chunk.id = chunk_id - - chunk.segment_id = segment_id - - chunk.document_id = document_id - - chunk.dataset_id = dataset_id - - chunk.tenant_id = tenant_id - - chunk.content = content - - chunk.index_node_id = index_node_id - - chunk.index_node_hash = index_node_hash - - chunk.position = position - - for key, value in kwargs.items(): - setattr(chunk, key, value) - - return chunk - - @staticmethod - def create_dataset_document_mock( - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - tenant_id: str = "tenant-123", - dataset_process_rule_id: str = "rule-123", - doc_language: str = "en", - created_by: str = "user-123", - **kwargs, - ) -> Mock: - """ - Create a mock DatasetDocument with specified attributes. - - Args: - document_id: Unique identifier for the document - dataset_id: Dataset identifier - tenant_id: Tenant identifier - dataset_process_rule_id: Process rule identifier - doc_language: Document language - created_by: Creator user ID - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a DatasetDocument instance - """ - document = Mock(spec=DatasetDocument) - - document.id = document_id - - document.dataset_id = dataset_id - - document.tenant_id = tenant_id - - document.dataset_process_rule_id = dataset_process_rule_id - - document.doc_language = doc_language - - document.created_by = created_by - - for key, value in kwargs.items(): - setattr(document, key, value) - - return document - - @staticmethod - def create_dataset_process_rule_mock( - rule_id: str = "rule-123", - **kwargs, - ) -> Mock: - """ - Create a mock DatasetProcessRule with specified attributes. - - Args: - rule_id: Unique identifier for the process rule - **kwargs: Additional attributes to set on the mock - - Returns: - Mock object configured as a DatasetProcessRule instance - """ - rule = Mock(spec=DatasetProcessRule) - - rule.id = rule_id - - rule.to_dict = Mock(return_value={"rules": {"parent_mode": "chunk"}}) - - for key, value in kwargs.items(): - setattr(rule, key, value) - - return rule - - @staticmethod - def create_rag_document_mock( - page_content: str = "Test document content", - doc_id: str = "doc-123", - doc_hash: str = "hash-123", - document_id: str = "doc-123", - dataset_id: str = "dataset-123", - **kwargs, - ) -> Document: - """ - Create a RAG Document with specified attributes. - - Args: - page_content: Document content text - doc_id: Document identifier in metadata - doc_hash: Document hash in metadata - document_id: Parent document ID in metadata - dataset_id: Dataset ID in metadata - **kwargs: Additional metadata fields - - Returns: - Document instance configured for testing - """ - metadata = { - "doc_id": doc_id, - "doc_hash": doc_hash, - "document_id": document_id, - "dataset_id": dataset_id, - } - - metadata.update(kwargs) - - return Document(page_content=page_content, metadata=metadata) - - @staticmethod - def create_embedding_model_instance_mock() -> Mock: - """ - Create a mock embedding model instance. - - Returns: - Mock object configured as an embedding model instance - """ - model_instance = Mock() - - model_instance.embed_documents = Mock(return_value=[[0.1] * 1536]) - - model_instance.embed_query = Mock(return_value=[0.1] * 1536) - - return model_instance - - @staticmethod - def create_vector_processor_mock() -> Mock: - """ - Create a mock vector processor (BaseVector implementation). - - Returns: - Mock object configured as a BaseVector instance - """ - processor = Mock(spec=BaseVector) - - processor.collection_name = "test_collection" - - processor.create = Mock() - - processor.add_texts = Mock() - - processor.text_exists = Mock(return_value=False) - - processor.delete_by_ids = Mock() - - processor.delete_by_metadata_field = Mock() - - processor.search_by_vector = Mock(return_value=[]) - - processor.search_by_full_text = Mock(return_value=[]) - - processor.delete = Mock() - - return processor - - @staticmethod - def create_index_processor_mock() -> Mock: - """ - Create a mock index processor. - - Returns: - Mock object configured as an index processor instance - """ - processor = Mock() - - processor.load = Mock() - - processor.clean = Mock() - - processor.transform = Mock(return_value=[]) - - return processor - - -# ============================================================================ -# Tests for VectorService -# ============================================================================ - - -class TestVectorService: - """ - Comprehensive unit tests for VectorService class. - - This test class covers all methods of the VectorService class, including - segment vector operations, child chunk operations, and integration with - various components like IndexProcessor and ModelManager. - """ - - # ======================================================================== - # Tests for create_segments_vector - # ======================================================================== - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_create_segments_vector_regular_indexing(self, mock_db, mock_index_processor_factory): - """ - Test create_segments_vector with regular indexing (non-hierarchical). - - This test verifies that segments are correctly converted to RAG documents - and loaded into the index processor for regular indexing scenarios. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form=IndexStructureType.PARAGRAPH_INDEX, indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - keywords_list = [["keyword1", "keyword2"]] - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.create_segments_vector(keywords_list, [segment], dataset, IndexStructureType.PARAGRAPH_INDEX) - - # Assert - mock_index_processor.load.assert_called_once() - - call_args = mock_index_processor.load.call_args - - assert call_args[0][0] == dataset - - assert len(call_args[0][1]) == 1 - - assert call_args[1]["with_keywords"] is True - - assert call_args[1]["keywords_list"] == keywords_list - - @patch("services.vector_service.VectorService.generate_child_chunks") - @patch("services.vector_service.ModelManager.for_tenant") - @patch("services.vector_service.db") - def test_create_segments_vector_parent_child_indexing( - self, mock_db, mock_model_manager, mock_generate_child_chunks - ): - """ - Test create_segments_vector with parent-child indexing. - - This test verifies that for hierarchical indexing, child chunks are - generated instead of regular segment indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = dataset_document - - mock_db.session.query.return_value.where.return_value.first.return_value = processing_rule - - mock_embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_model_manager.return_value.get_model_instance.return_value = mock_embedding_model - - # Act - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - # Assert - mock_generate_child_chunks.assert_called_once() - - @patch("services.vector_service.db") - def test_create_segments_vector_missing_document(self, mock_db): - """ - Test create_segments_vector when document is missing. - - This test verifies that when a document is not found, the segment - is skipped with a warning log. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = None - - # Act - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - # Assert - # Should not raise an error, just skip the segment - - @patch("services.vector_service.db") - def test_create_segments_vector_missing_processing_rule(self, mock_db): - """ - Test create_segments_vector when processing rule is missing. - - This test verifies that when a processing rule is not found, a - ValueError is raised. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.HIGH_QUALITY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = dataset_document - - mock_db.session.query.return_value.where.return_value.first.return_value = None - - # Act & Assert - with pytest.raises(ValueError, match="No processing rule found"): - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - @patch("services.vector_service.db") - def test_create_segments_vector_economy_indexing_technique(self, mock_db): - """ - Test create_segments_vector with economy indexing technique. - - This test verifies that when indexing_technique is not high_quality, - a ValueError is raised for parent-child indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - doc_form="parent_child_model", indexing_technique=IndexTechniqueType.ECONOMY - ) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - mock_db.session.query.return_value.filter_by.return_value.first.return_value = dataset_document - - mock_db.session.query.return_value.where.return_value.first.return_value = processing_rule - - # Act & Assert - with pytest.raises(ValueError, match="The knowledge base index technique is not high quality"): - VectorService.create_segments_vector(None, [segment], dataset, "parent_child_model") - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_create_segments_vector_empty_documents(self, mock_db, mock_index_processor_factory): - """ - Test create_segments_vector with empty documents list. - - This test verifies that when no documents are created, the index - processor is not called. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.create_segments_vector(None, [], dataset, IndexStructureType.PARAGRAPH_INDEX) - - # Assert - mock_index_processor.load.assert_not_called() - - # ======================================================================== - # Tests for update_segment_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_segment_vector_high_quality(self, mock_db, mock_vector_class): - """ - Test update_segment_vector with high_quality indexing technique. - - This test verifies that segments are correctly updated in the vector - store when using high_quality indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_segment_vector(None, segment, dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once_with([segment.index_node_id]) - - mock_vector.add_texts.assert_called_once() - - @patch("services.vector_service.Keyword") - @patch("services.vector_service.db") - def test_update_segment_vector_economy_with_keywords(self, mock_db, mock_keyword_class): - """ - Test update_segment_vector with economy indexing and keywords. - - This test verifies that segments are correctly updated in the keyword - index when using economy indexing with keywords. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - keywords = ["keyword1", "keyword2"] - - mock_keyword = Mock() - - mock_keyword.delete_by_ids = Mock() - - mock_keyword.add_texts = Mock() - - mock_keyword_class.return_value = mock_keyword - - # Act - VectorService.update_segment_vector(keywords, segment, dataset) - - # Assert - mock_keyword.delete_by_ids.assert_called_once_with([segment.index_node_id]) - - mock_keyword.add_texts.assert_called_once() - - call_args = mock_keyword.add_texts.call_args - - assert call_args[1]["keywords_list"] == [keywords] - - @patch("services.vector_service.Keyword") - @patch("services.vector_service.db") - def test_update_segment_vector_economy_without_keywords(self, mock_db, mock_keyword_class): - """ - Test update_segment_vector with economy indexing without keywords. - - This test verifies that segments are correctly updated in the keyword - index when using economy indexing without keywords. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - mock_keyword = Mock() - - mock_keyword.delete_by_ids = Mock() - - mock_keyword.add_texts = Mock() - - mock_keyword_class.return_value = mock_keyword - - # Act - VectorService.update_segment_vector(None, segment, dataset) - - # Assert - mock_keyword.delete_by_ids.assert_called_once_with([segment.index_node_id]) - - mock_keyword.add_texts.assert_called_once() - - call_args = mock_keyword.add_texts.call_args - - assert "keywords_list" not in call_args[1] or call_args[1].get("keywords_list") is None - - # ======================================================================== - # Tests for generate_child_chunks - # ======================================================================== - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_generate_child_chunks_with_children(self, mock_db, mock_index_processor_factory): - """ - Test generate_child_chunks when children are generated. - - This test verifies that child chunks are correctly generated and - saved to the database when the index processor returns children. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - child_document = VectorServiceTestDataFactory.create_rag_document_mock( - page_content="Child content", doc_id="child-node-123" - ) - - child_document.children = [child_document] - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor.transform.return_value = [child_document] - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.generate_child_chunks(segment, dataset_document, dataset, embedding_model, processing_rule, False) - - # Assert - mock_index_processor.transform.assert_called_once() - - mock_index_processor.load.assert_called_once() - - mock_db.session.add.assert_called() - - mock_db.session.commit.assert_called_once() - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_generate_child_chunks_regenerate(self, mock_db, mock_index_processor_factory): - """ - Test generate_child_chunks with regenerate=True. - - This test verifies that when regenerate is True, existing child chunks - are cleaned before generating new ones. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor.transform.return_value = [] - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.generate_child_chunks(segment, dataset_document, dataset, embedding_model, processing_rule, True) - - # Assert - mock_index_processor.clean.assert_called_once() - - call_args = mock_index_processor.clean.call_args - - assert call_args[0][0] == dataset - - assert call_args[0][1] == [segment.index_node_id] - - assert call_args[1]["with_keywords"] is True - - assert call_args[1]["delete_child_chunks"] is True - - @patch("services.vector_service.IndexProcessorFactory") - @patch("services.vector_service.db") - def test_generate_child_chunks_no_children(self, mock_db, mock_index_processor_factory): - """ - Test generate_child_chunks when no children are generated. - - This test verifies that when the index processor returns no children, - no child chunks are saved to the database. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - segment = VectorServiceTestDataFactory.create_document_segment_mock() - - dataset_document = VectorServiceTestDataFactory.create_dataset_document_mock() - - processing_rule = VectorServiceTestDataFactory.create_dataset_process_rule_mock() - - embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_index_processor = VectorServiceTestDataFactory.create_index_processor_mock() - - mock_index_processor.transform.return_value = [] - - mock_index_processor_factory.return_value.init_index_processor.return_value = mock_index_processor - - # Act - VectorService.generate_child_chunks(segment, dataset_document, dataset, embedding_model, processing_rule, False) - - # Assert - mock_index_processor.transform.assert_called_once() - - mock_index_processor.load.assert_not_called() - - mock_db.session.add.assert_not_called() - - # ======================================================================== - # Tests for create_child_chunk_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_create_child_chunk_vector_high_quality(self, mock_db, mock_vector_class): - """ - Test create_child_chunk_vector with high_quality indexing. - - This test verifies that child chunk vectors are correctly created - when using high_quality indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.create_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.add_texts.assert_called_once() - - call_args = mock_vector.add_texts.call_args - - assert call_args[1]["duplicate_check"] is True - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_create_child_chunk_vector_economy(self, mock_db, mock_vector_class): - """ - Test create_child_chunk_vector with economy indexing. - - This test verifies that child chunk vectors are not created when - using economy indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.create_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.add_texts.assert_not_called() - - # ======================================================================== - # Tests for update_child_chunk_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_with_all_operations(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with new, update, and delete operations. - - This test verifies that child chunk vectors are correctly updated - when there are new chunks, updated chunks, and deleted chunks. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - new_chunk = VectorServiceTestDataFactory.create_child_chunk_mock(chunk_id="new-chunk-1") - - update_chunk = VectorServiceTestDataFactory.create_child_chunk_mock(chunk_id="update-chunk-1") - - delete_chunk = VectorServiceTestDataFactory.create_child_chunk_mock(chunk_id="delete-chunk-1") - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([new_chunk], [update_chunk], [delete_chunk], dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once() - - delete_ids = mock_vector.delete_by_ids.call_args[0][0] - - assert update_chunk.index_node_id in delete_ids - - assert delete_chunk.index_node_id in delete_ids - - mock_vector.add_texts.assert_called_once() - - call_args = mock_vector.add_texts.call_args - - assert len(call_args[0][0]) == 2 # new_chunk + update_chunk - - assert call_args[1]["duplicate_check"] is True - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_only_new(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with only new chunks. - - This test verifies that when only new chunks are provided, only - add_texts is called, not delete_by_ids. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - new_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([new_chunk], [], [], dataset) - - # Assert - mock_vector.delete_by_ids.assert_not_called() - - mock_vector.add_texts.assert_called_once() - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_only_delete(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with only deleted chunks. - - This test verifies that when only deleted chunks are provided, only - delete_by_ids is called, not add_texts. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - delete_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([], [], [delete_chunk], dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once_with([delete_chunk.index_node_id]) - - mock_vector.add_texts.assert_not_called() - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_update_child_chunk_vector_economy(self, mock_db, mock_vector_class): - """ - Test update_child_chunk_vector with economy indexing. - - This test verifies that child chunk vectors are not updated when - using economy indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - new_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.update_child_chunk_vector([new_chunk], [], [], dataset) - - # Assert - mock_vector.delete_by_ids.assert_not_called() - - mock_vector.add_texts.assert_not_called() - - # ======================================================================== - # Tests for delete_child_chunk_vector - # ======================================================================== - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_delete_child_chunk_vector_high_quality(self, mock_db, mock_vector_class): - """ - Test delete_child_chunk_vector with high_quality indexing. - - This test verifies that child chunk vectors are correctly deleted - when using high_quality indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.HIGH_QUALITY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.delete_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.delete_by_ids.assert_called_once_with([child_chunk.index_node_id]) - - @patch("services.vector_service.Vector") - @patch("services.vector_service.db") - def test_delete_child_chunk_vector_economy(self, mock_db, mock_vector_class): - """ - Test delete_child_chunk_vector with economy indexing. - - This test verifies that child chunk vectors are not deleted when - using economy indexing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock(indexing_technique=IndexTechniqueType.ECONOMY) - - child_chunk = VectorServiceTestDataFactory.create_child_chunk_mock() - - mock_vector = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_class.return_value = mock_vector - - # Act - VectorService.delete_child_chunk_vector(child_chunk, dataset) - - # Assert - mock_vector.delete_by_ids.assert_not_called() - - -# ============================================================================ -# Tests for Vector Class -# ============================================================================ - - -class TestVector: - """ - Comprehensive unit tests for Vector class. - - This test class covers all methods of the Vector class, including - initialization, collection management, embedding operations, vector - database operations, and search functionality. - """ - - # ======================================================================== - # Tests for Vector Initialization - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_initialization_default_attributes(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector initialization with default attributes. - - This test verifies that Vector is correctly initialized with default - attributes when none are provided. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - # Act - vector = Vector(dataset=dataset) - - # Assert - assert vector._dataset == dataset - - assert vector._attributes == ["doc_id", "dataset_id", "document_id", "doc_hash"] - - mock_get_embeddings.assert_called_once() - - mock_init_vector.assert_called_once() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_initialization_custom_attributes(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector initialization with custom attributes. - - This test verifies that Vector is correctly initialized with custom - attributes when provided. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - custom_attributes = ["custom_attr1", "custom_attr2"] - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - # Act - vector = Vector(dataset=dataset, attributes=custom_attributes) - - # Assert - assert vector._dataset == dataset - - assert vector._attributes == custom_attributes - - # ======================================================================== - # Tests for Vector.create - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_create_with_texts(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.create with texts list. - - This test verifies that documents are correctly embedded and created - in the vector store with batch processing. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [ - VectorServiceTestDataFactory.create_rag_document_mock(page_content=f"Content {i}") for i in range(5) - ] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536] * 5) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.create(texts=documents) - - # Assert - mock_embeddings.embed_documents.assert_called() - - mock_vector_processor.create.assert_called() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_create_empty_texts(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.create with empty texts list. - - This test verifies that when texts is None or empty, no operations - are performed. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.create(texts=None) - - # Assert - mock_embeddings.embed_documents.assert_not_called() - - mock_vector_processor.create.assert_not_called() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_create_large_batch(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.create with large batch of documents. - - This test verifies that large batches are correctly processed in - chunks of 1000 documents. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [ - VectorServiceTestDataFactory.create_rag_document_mock(page_content=f"Content {i}") for i in range(2500) - ] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536] * 1000) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.create(texts=documents) - - # Assert - # Should be called 3 times (1000, 1000, 500) - assert mock_embeddings.embed_documents.call_count == 3 - - assert mock_vector_processor.create.call_count == 3 - - # ======================================================================== - # Tests for Vector.add_texts - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_add_texts_without_duplicate_check(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.add_texts without duplicate check. - - This test verifies that documents are added without checking for - duplicates when duplicate_check is False. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [VectorServiceTestDataFactory.create_rag_document_mock()] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536]) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.add_texts(documents, duplicate_check=False) - - # Assert - mock_embeddings.embed_documents.assert_called_once() - - mock_vector_processor.create.assert_called_once() - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_add_texts_with_duplicate_check(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.add_texts with duplicate check. - - This test verifies that duplicate documents are filtered out when - duplicate_check is True. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - documents = [VectorServiceTestDataFactory.create_rag_document_mock(doc_id="doc-123")] - - mock_embeddings = Mock() - - mock_embeddings.embed_documents = Mock(return_value=[[0.1] * 1536]) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(return_value=True) # Document exists - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.add_texts(documents, duplicate_check=True) - - # Assert - mock_vector_processor.text_exists.assert_called_once_with("doc-123") - - mock_embeddings.embed_documents.assert_not_called() - - mock_vector_processor.create.assert_not_called() - - # ======================================================================== - # Tests for Vector.text_exists - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_text_exists_true(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.text_exists when text exists. - - This test verifies that text_exists correctly returns True when - a document exists in the vector store. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(return_value=True) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.text_exists("doc-123") - - # Assert - assert result is True - - mock_vector_processor.text_exists.assert_called_once_with("doc-123") - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_text_exists_false(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.text_exists when text does not exist. - - This test verifies that text_exists correctly returns False when - a document does not exist in the vector store. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(return_value=False) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.text_exists("doc-123") - - # Assert - assert result is False - - mock_vector_processor.text_exists.assert_called_once_with("doc-123") - - # ======================================================================== - # Tests for Vector.delete_by_ids - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_delete_by_ids(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.delete_by_ids. - - This test verifies that documents are correctly deleted by their IDs. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - ids = ["doc-1", "doc-2", "doc-3"] - - # Act - vector.delete_by_ids(ids) - - # Assert - mock_vector_processor.delete_by_ids.assert_called_once_with(ids) - - # ======================================================================== - # Tests for Vector.delete_by_metadata_field - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_delete_by_metadata_field(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.delete_by_metadata_field. - - This test verifies that documents are correctly deleted by metadata - field value. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.delete_by_metadata_field("dataset_id", "dataset-123") - - # Assert - mock_vector_processor.delete_by_metadata_field.assert_called_once_with("dataset_id", "dataset-123") - - # ======================================================================== - # Tests for Vector.search_by_vector - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_search_by_vector(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.search_by_vector. - - This test verifies that vector search correctly embeds the query - and searches the vector store. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - query = "test query" - - query_vector = [0.1] * 1536 - - mock_embeddings = Mock() - - mock_embeddings.embed_query = Mock(return_value=query_vector) - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.search_by_vector = Mock(return_value=[]) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.search_by_vector(query) - - # Assert - mock_embeddings.embed_query.assert_called_once_with(query) - - mock_vector_processor.search_by_vector.assert_called_once_with(query_vector) - - assert result == [] - - # ======================================================================== - # Tests for Vector.search_by_full_text - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_search_by_full_text(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector.search_by_full_text. - - This test verifies that full-text search correctly searches the - vector store without embedding the query. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - query = "test query" - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.search_by_full_text = Mock(return_value=[]) - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - result = vector.search_by_full_text(query) - - # Assert - mock_vector_processor.search_by_full_text.assert_called_once_with(query) - - assert result == [] - - # ======================================================================== - # Tests for Vector.delete - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.redis_client") - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_delete(self, mock_get_embeddings, mock_init_vector, mock_redis_client): - """ - Test Vector.delete. - - This test verifies that the collection is deleted and Redis cache - is cleared. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.collection_name = "test_collection" - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - # Act - vector.delete() - - # Assert - mock_vector_processor.delete.assert_called_once() - - mock_redis_client.delete.assert_called_once_with("vector_indexing_test_collection") - - # ======================================================================== - # Tests for Vector.get_vector_factory - # ======================================================================== - - def test_vector_get_vector_factory_chroma(self): - """ - Test Vector.get_vector_factory for Chroma. - - This test verifies that the correct factory class is returned for - Chroma vector type. - """ - # Act - factory_class = Vector.get_vector_factory(VectorType.CHROMA) - - # Assert - assert factory_class is not None - - # Verify it's the correct factory by checking the module name - assert "chroma" in factory_class.__module__.lower() - - def test_vector_get_vector_factory_milvus(self): - """ - Test Vector.get_vector_factory for Milvus. - - This test verifies that the correct factory class is returned for - Milvus vector type. - """ - # Act - factory_class = Vector.get_vector_factory(VectorType.MILVUS) - - # Assert - assert factory_class is not None - - assert "milvus" in factory_class.__module__.lower() - - def test_vector_get_vector_factory_invalid_type(self): - """ - Test Vector.get_vector_factory with invalid vector type. - - This test verifies that a ValueError is raised when an invalid - vector type is provided. - """ - # Act & Assert - with pytest.raises(ValueError, match="Vector store .* is not supported"): - Vector.get_vector_factory("invalid_type") - - # ======================================================================== - # Tests for Vector._filter_duplicate_texts - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_filter_duplicate_texts(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector._filter_duplicate_texts. - - This test verifies that duplicate documents are correctly filtered - based on doc_id in metadata. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_vector_processor.text_exists = Mock(side_effect=[True, False]) # First exists, second doesn't - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - doc1 = VectorServiceTestDataFactory.create_rag_document_mock(doc_id="doc-1") - - doc2 = VectorServiceTestDataFactory.create_rag_document_mock(doc_id="doc-2") - - documents = [doc1, doc2] - - # Act - filtered = vector._filter_duplicate_texts(documents) - - # Assert - assert len(filtered) == 1 - - assert filtered[0].metadata["doc_id"] == "doc-2" - - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - @patch("core.rag.datasource.vdb.vector_factory.Vector._get_embeddings") - def test_vector_filter_duplicate_texts_no_metadata(self, mock_get_embeddings, mock_init_vector): - """ - Test Vector._filter_duplicate_texts with documents without metadata. - - This test verifies that documents without metadata are not filtered. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock() - - mock_embeddings = Mock() - - mock_get_embeddings.return_value = mock_embeddings - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - vector = Vector(dataset=dataset) - - doc1 = Document(page_content="Content 1", metadata=None) - - doc2 = Document(page_content="Content 2", metadata={}) - - documents = [doc1, doc2] - - # Act - filtered = vector._filter_duplicate_texts(documents) - - # Assert - assert len(filtered) == 2 - - # ======================================================================== - # Tests for Vector._get_embeddings - # ======================================================================== - - @patch("core.rag.datasource.vdb.vector_factory.CacheEmbedding") - @patch("core.rag.datasource.vdb.vector_factory.ModelManager.for_tenant") - @patch("core.rag.datasource.vdb.vector_factory.Vector._init_vector") - def test_vector_get_embeddings(self, mock_init_vector, mock_model_manager, mock_cache_embedding): - """ - Test Vector._get_embeddings. - - This test verifies that embeddings are correctly retrieved from - ModelManager and wrapped in CacheEmbedding. - """ - # Arrange - dataset = VectorServiceTestDataFactory.create_dataset_mock( - embedding_model_provider="openai", embedding_model="text-embedding-ada-002" - ) - - mock_embedding_model = VectorServiceTestDataFactory.create_embedding_model_instance_mock() - - mock_model_manager.return_value.get_model_instance.return_value = mock_embedding_model - - mock_cache_embedding_instance = Mock() - - mock_cache_embedding.return_value = mock_cache_embedding_instance - - mock_vector_processor = VectorServiceTestDataFactory.create_vector_processor_mock() - - mock_init_vector.return_value = mock_vector_processor - - # Act - vector = Vector(dataset=dataset) - - # Assert - mock_model_manager.return_value.get_model_instance.assert_called_once() - - mock_cache_embedding.assert_called_once_with(mock_embedding_model) - - assert vector._embeddings == mock_cache_embedding_instance diff --git a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py index d570dce107..dfdbd9acd6 100644 --- a/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py +++ b/api/tests/unit_tests/services/workflow/test_workflow_event_snapshot_service.py @@ -1,14 +1,20 @@ import json import queue -from collections.abc import Sequence +from collections.abc import Mapping, Sequence from dataclasses import dataclass from datetime import UTC, datetime +from itertools import cycle from threading import Event +from types import SimpleNamespace +from typing import Any, cast +from unittest.mock import MagicMock import pytest +from sqlalchemy.orm import Session, sessionmaker from core.app.app_config.entities import WorkflowUIBasedAppConfig from core.app.entities.app_invoke_entities import InvokeFrom, WorkflowAppGenerateEntity +from core.app.entities.task_entities import StreamEvent from core.app.layers.pause_state_persist_layer import WorkflowResumptionContext, _WorkflowGenerateEntityWrapper from graphon.entities.pause_reason import HumanInputRequired from graphon.enums import WorkflowExecutionStatus, WorkflowNodeExecutionStatus @@ -18,11 +24,14 @@ from models.model import AppMode from models.workflow import WorkflowRun from repositories.api_workflow_node_execution_repository import WorkflowNodeExecutionSnapshot from repositories.entities.workflow_pause import WorkflowPauseEntity +from services import workflow_event_snapshot_service as service_module from services.workflow_event_snapshot_service import ( BufferState, MessageContext, _build_snapshot_events, + _is_terminal_event, _resolve_task_id, + build_workflow_event_stream, ) @@ -125,50 +134,6 @@ def _build_resumption_context(task_id: str) -> WorkflowResumptionContext: ) -def test_build_snapshot_events_includes_pause_event() -> None: - workflow_run = _build_workflow_run(WorkflowExecutionStatus.PAUSED) - snapshot = _build_snapshot(WorkflowNodeExecutionStatus.PAUSED) - resumption_context = _build_resumption_context("task-ctx") - pause_entity = _FakePauseEntity( - pause_id="pause-1", - workflow_run_id="run-1", - paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), - pause_reasons=[ - HumanInputRequired( - form_id="form-1", - form_content="content", - node_id="node-1", - node_title="Human Input", - ) - ], - ) - - events = _build_snapshot_events( - workflow_run=workflow_run, - node_snapshots=[snapshot], - task_id="task-ctx", - message_context=None, - pause_entity=pause_entity, - resumption_context=resumption_context, - ) - - assert [event["event"] for event in events] == [ - "workflow_started", - "node_started", - "node_finished", - "workflow_paused", - ] - assert events[2]["data"]["status"] == WorkflowNodeExecutionStatus.PAUSED.value - pause_data = events[-1]["data"] - assert pause_data["paused_nodes"] == ["node-1"] - assert pause_data["outputs"] == {"result": "value"} - assert pause_data["status"] == WorkflowExecutionStatus.PAUSED.value - assert pause_data["created_at"] == int(workflow_run.created_at.timestamp()) - assert pause_data["elapsed_time"] == workflow_run.elapsed_time - assert pause_data["total_tokens"] == workflow_run.total_tokens - assert pause_data["total_steps"] == workflow_run.total_steps - - def test_build_snapshot_events_applies_message_context() -> None: workflow_run = _build_workflow_run(WorkflowExecutionStatus.RUNNING) snapshot = _build_snapshot(WorkflowNodeExecutionStatus.SUCCEEDED) @@ -222,3 +187,656 @@ def test_resolve_task_id_priority(context_task_id, buffered_task_id, expected) - buffer_state.task_id_ready.set() task_id = _resolve_task_id(resumption_context, buffer_state, "run-1", wait_timeout=0.0) assert task_id == expected + + +def _build_workflow_run_additional(status: WorkflowExecutionStatus = WorkflowExecutionStatus.RUNNING) -> WorkflowRun: + return WorkflowRun( + id="run-1", + tenant_id="tenant-1", + app_id="app-1", + workflow_id="workflow-1", + type="workflow", + triggered_from="app-run", + version="v1", + graph=None, + inputs=json.dumps({"query": "hello"}), + status=status, + outputs=json.dumps({}), + error=None, + elapsed_time=1.2, + total_tokens=5, + total_steps=2, + created_by_role=CreatorUserRole.END_USER, + created_by="user-1", + created_at=datetime(2024, 1, 1, tzinfo=UTC), + ) + + +def _build_resumption_context_additional(task_id: str) -> WorkflowResumptionContext: + app_config = WorkflowUIBasedAppConfig( + tenant_id="tenant-1", + app_id="app-1", + app_mode=AppMode.WORKFLOW, + workflow_id="workflow-1", + ) + generate_entity = WorkflowAppGenerateEntity( + task_id=task_id, + app_config=app_config, + inputs={}, + files=[], + user_id="user-1", + stream=True, + invoke_from=InvokeFrom.EXPLORE, + call_depth=0, + workflow_execution_id="run-1", + ) + runtime_state = GraphRuntimeState(variable_pool=VariablePool(), start_at=0.0) + runtime_state.outputs = {"answer": "ok"} + wrapper = _WorkflowGenerateEntityWrapper(entity=generate_entity) + return WorkflowResumptionContext( + generate_entity=wrapper, + serialized_graph_runtime_state=runtime_state.dumps(), + ) + + +class _SessionContext: + def __init__(self, session: Any) -> None: + self._session = session + + def __enter__(self) -> Any: + return self._session + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> bool: + return False + + +class _SessionMaker: + def __init__(self, session: Any) -> None: + self._session = session + + def __call__(self) -> _SessionContext: + return _SessionContext(self._session) + + +class _SubscriptionContext: + def __init__(self, subscription: Any) -> None: + self._subscription = subscription + + def __enter__(self) -> Any: + return self._subscription + + def __exit__(self, exc_type: Any, exc: Any, tb: Any) -> bool: + return False + + +class _Topic: + def __init__(self, subscription: Any) -> None: + self._subscription = subscription + + def subscribe(self) -> _SubscriptionContext: + return _SubscriptionContext(self._subscription) + + +class _StaticSubscription: + def receive(self, timeout: int = 1) -> None: + return None + + +@dataclass(frozen=True) +class _PauseEntity(WorkflowPauseEntity): + state: bytes + + @property + def id(self) -> str: + return "pause-1" + + @property + def workflow_execution_id(self) -> str: + return "run-1" + + @property + def resumed_at(self) -> datetime | None: + return None + + @property + def paused_at(self) -> datetime: + return datetime(2024, 1, 1, tzinfo=UTC) + + def get_state(self) -> bytes: + return self.state + + def get_pause_reasons(self) -> list[Any]: + return [] + + +def test_get_message_context_should_return_none_when_no_message() -> None: + # Arrange + session = SimpleNamespace(scalar=MagicMock(return_value=None)) + session_maker = _SessionMaker(session) + + # Act + result = service_module._get_message_context(cast(sessionmaker[Session], session_maker), "run-1") + + # Assert + assert result is None + + +def test_get_message_context_should_default_created_at_to_zero_when_message_has_no_timestamp() -> None: + # Arrange + message = SimpleNamespace( + id="msg-1", + conversation_id="conv-1", + created_at=None, + answer="answer", + ) + session = SimpleNamespace(scalar=MagicMock(return_value=message)) + session_maker = _SessionMaker(session) + + # Act + result = service_module._get_message_context(cast(sessionmaker[Session], session_maker), "run-1") + + # Assert + assert result is not None + assert result.created_at == 0 + assert result.message_id == "msg-1" + assert result.conversation_id == "conv-1" + assert result.answer == "answer" + + +def test_load_resumption_context_should_return_none_when_pause_entity_missing() -> None: + # Arrange + + # Act + result = service_module._load_resumption_context(None) + + # Assert + assert result is None + + +def test_load_resumption_context_should_return_none_when_pause_entity_state_is_invalid() -> None: + # Arrange + pause_entity = _PauseEntity(state=b"not-a-valid-state") + + # Act + result = service_module._load_resumption_context(pause_entity) + + # Assert + assert result is None + + +def test_load_resumption_context_should_parse_valid_state_into_context() -> None: + # Arrange + context = _build_resumption_context_additional(task_id="task-ctx") + pause_entity = _PauseEntity(state=context.dumps().encode()) + + # Act + result = service_module._load_resumption_context(pause_entity) + + # Assert + assert result is not None + assert result.get_generate_entity().task_id == "task-ctx" + + +def test_resolve_task_id_should_return_workflow_run_id_when_buffer_state_is_missing() -> None: + # Arrange + + # Act + result = service_module._resolve_task_id( + resumption_context=None, + buffer_state=None, + workflow_run_id="run-1", + ) + + # Assert + assert result == "run-1" + + +@pytest.mark.parametrize( + ("payload", "expected"), + [ + (b'{"event":"node_started"}', {"event": "node_started"}), + (b"invalid-json", None), + (b"[]", None), + ], +) +def test_parse_event_message_should_parse_only_json_object( + payload: bytes, + expected: dict[str, Any] | None, +) -> None: + # Arrange + + # Act + result = service_module._parse_event_message(payload) + + # Assert + assert result == expected + + +def test_is_terminal_event_should_recognize_finished_and_optional_paused_events() -> None: + # Arrange + finished_event = {"event": StreamEvent.WORKFLOW_FINISHED.value} + paused_event = {"event": StreamEvent.WORKFLOW_PAUSED.value} + + # Act + is_finished = service_module._is_terminal_event(finished_event, close_on_pause=False) + paused_without_flag = service_module._is_terminal_event(paused_event, close_on_pause=False) + paused_with_flag = service_module._is_terminal_event(paused_event, close_on_pause=True) + + # Assert + assert is_finished is True + assert paused_without_flag is False + assert paused_with_flag is True + assert service_module._is_terminal_event(StreamEvent.PING.value, close_on_pause=True) is False + + +def test_apply_message_context_should_update_payload_when_context_exists() -> None: + # Arrange + payload: dict[str, Any] = {"event": "workflow_started"} + context = MessageContext(conversation_id="conv-1", message_id="msg-1", created_at=1700000000) + + # Act + service_module._apply_message_context(payload, context) + + # Assert + assert payload["conversation_id"] == "conv-1" + assert payload["message_id"] == "msg-1" + assert payload["created_at"] == 1700000000 + + +def test_start_buffering_should_capture_task_id_and_enqueue_event() -> None: + # Arrange + class Subscription: + def __init__(self) -> None: + self._calls = 0 + + def receive(self, timeout: int = 1) -> bytes | None: + self._calls += 1 + if self._calls == 1: + return b'{"event":"node_started","task_id":"task-1"}' + return None + + subscription = Subscription() + + # Act + buffer_state = service_module._start_buffering(subscription) + ready = buffer_state.task_id_ready.wait(timeout=1) + event = buffer_state.queue.get(timeout=1) + buffer_state.stop_event.set() + finished = buffer_state.done_event.wait(timeout=1) + + # Assert + assert ready is True + assert finished is True + assert buffer_state.task_id_hint == "task-1" + assert event["event"] == "node_started" + + +def test_start_buffering_should_drop_old_event_when_queue_is_full( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + class QueueWithSingleFull: + def __init__(self) -> None: + self._first_put = True + self.items: list[dict[str, Any]] = [{"event": "old"}] + + def put_nowait(self, item: dict[str, Any]) -> None: + if self._first_put: + self._first_put = False + raise queue.Full + self.items.append(item) + + def get_nowait(self) -> dict[str, Any]: + if not self.items: + raise queue.Empty + return self.items.pop(0) + + def empty(self) -> bool: + return len(self.items) == 0 + + fake_queue = QueueWithSingleFull() + monkeypatch.setattr(service_module.queue, "Queue", lambda maxsize=2048: fake_queue) + + class Subscription: + def __init__(self) -> None: + self._calls = 0 + + def receive(self, timeout: int = 1) -> bytes | None: + self._calls += 1 + if self._calls == 1: + return b'{"event":"node_started","task_id":"task-2"}' + return None + + subscription = Subscription() + + # Act + buffer_state = service_module._start_buffering(subscription) + ready = buffer_state.task_id_ready.wait(timeout=1) + buffer_state.stop_event.set() + finished = buffer_state.done_event.wait(timeout=1) + + # Assert + assert ready is True + assert finished is True + assert fake_queue.items[-1]["task_id"] == "task-2" + + +def test_start_buffering_should_set_done_event_when_subscription_raises() -> None: + # Arrange + class Subscription: + def receive(self, timeout: int = 1) -> bytes | None: + raise RuntimeError("subscription failure") + + subscription = Subscription() + + # Act + buffer_state = service_module._start_buffering(subscription) + finished = buffer_state.done_event.wait(timeout=1) + + # Assert + assert finished is True + + +def test_build_workflow_event_stream_should_emit_ping_and_terminal_snapshot_event( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.RUNNING) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock()) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr( + service_module, + "_get_message_context", + MagicMock(return_value=MessageContext("conv-1", "msg-1", 1700000000)), + ) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + buffer_state = BufferState( + queue=queue.Queue(), + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + monkeypatch.setattr( + service_module, + "_build_snapshot_events", + MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value, "task_id": "task-1"}]), + ) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.ADVANCED_CHAT, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + ) + ) + + # Assert + assert events[0] == StreamEvent.PING.value + finished_event = cast(Mapping[str, Any], events[1]) + assert finished_event["event"] == StreamEvent.WORKFLOW_FINISHED.value + assert buffer_state.stop_event.is_set() is True + node_repo.get_execution_snapshots_by_workflow_run.assert_called_once() + called_kwargs = node_repo.get_execution_snapshots_by_workflow_run.call_args.kwargs + assert called_kwargs["workflow_run_id"] == "run-1" + + +def test_build_workflow_event_stream_should_emit_periodic_ping_and_stop_after_idle_timeout( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.RUNNING) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock()) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + monkeypatch.setattr(service_module, "_build_snapshot_events", MagicMock(return_value=[])) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + + class AlwaysEmptyQueue: + def empty(self) -> bool: + return False + + def get(self, timeout: int = 1) -> None: + raise queue.Empty + + buffer_state = BufferState( + queue=AlwaysEmptyQueue(), # type: ignore[arg-type] + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + time_values = cycle([0.0, 6.0, 21.0, 26.0]) + monkeypatch.setattr(service_module.time, "time", lambda: next(time_values)) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + idle_timeout=20.0, + ping_interval=5.0, + ) + ) + + # Assert + assert events == [StreamEvent.PING.value, StreamEvent.PING.value] + assert buffer_state.stop_event.is_set() is True + + +def test_build_workflow_event_stream_should_exit_when_buffer_done_and_empty( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.RUNNING) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock()) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + monkeypatch.setattr(service_module, "_build_snapshot_events", MagicMock(return_value=[])) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + buffer_state = BufferState( + queue=queue.Queue(), + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + buffer_state.done_event.set() + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + ) + ) + + # Assert + assert events == [StreamEvent.PING.value] + assert buffer_state.stop_event.is_set() is True + + +def test_build_workflow_event_stream_should_continue_when_pause_loading_fails( + monkeypatch: pytest.MonkeyPatch, +) -> None: + # Arrange + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.PAUSED) + topic = _Topic(_StaticSubscription()) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock(side_effect=RuntimeError("boom"))) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr(service_module, "_load_resumption_context", MagicMock(return_value=None)) + monkeypatch.setattr(service_module, "_resolve_task_id", MagicMock(return_value="task-1")) + snapshot_builder = MagicMock(return_value=[{"event": StreamEvent.WORKFLOW_FINISHED.value}]) + monkeypatch.setattr(service_module, "_build_snapshot_events", snapshot_builder) + buffer_state = BufferState( + queue=queue.Queue(), + stop_event=Event(), + done_event=Event(), + task_id_ready=Event(), + task_id_hint="task-1", + ) + monkeypatch.setattr(service_module, "_start_buffering", MagicMock(return_value=buffer_state)) + + # Act + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=MagicMock(), + ) + ) + + # Assert + assert events[0] == StreamEvent.PING.value + assert snapshot_builder.call_args.kwargs["pause_entity"] is None + + +def test_is_terminal_event_respects_close_on_pause_flag() -> None: + pause_event = {"event": "workflow_paused"} + finish_event = {"event": "workflow_finished"} + + assert _is_terminal_event(pause_event, close_on_pause=True) is True + assert _is_terminal_event(pause_event, close_on_pause=False) is False + assert _is_terminal_event(finish_event, close_on_pause=False) is True + + +def test_build_snapshot_events_preserves_public_form_token(monkeypatch: pytest.MonkeyPatch) -> None: + workflow_run = _build_workflow_run(WorkflowExecutionStatus.PAUSED) + snapshot = _build_snapshot(WorkflowNodeExecutionStatus.PAUSED) + resumption_context = _build_resumption_context("task-ctx") + monkeypatch.setattr( + service_module, "load_form_tokens_by_form_id", lambda form_ids, session=None, surface=None: {"form-1": "wtok"} + ) + session_maker = _SessionMaker( + SimpleNamespace( + execute=lambda _stmt: [("form-1", datetime(2024, 1, 1, tzinfo=UTC), '{"display_in_ui": true}')], + ) + ) + pause_entity = _FakePauseEntity( + pause_id="pause-1", + workflow_run_id="run-1", + paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), + pause_reasons=[ + HumanInputRequired( + form_id="form-1", + form_content="content", + node_id="node-1", + node_title="Human Input", + form_token="wtok", + ) + ], + ) + + events = _build_snapshot_events( + workflow_run=workflow_run, + node_snapshots=[snapshot], + task_id="task-ctx", + message_context=None, + pause_entity=pause_entity, + resumption_context=resumption_context, + session_maker=cast(sessionmaker[Session], session_maker), + ) + + assert events[-2]["event"] == StreamEvent.HUMAN_INPUT_REQUIRED.value + assert events[-2]["data"]["form_token"] == "wtok" + assert events[-2]["data"]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + pause_data = events[-1]["data"] + assert pause_data["reasons"][0]["form_token"] == "wtok" + assert pause_data["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) + + +def test_build_workflow_event_stream_loads_pause_tokens_without_flask_app_context( + monkeypatch: pytest.MonkeyPatch, +) -> None: + workflow_run = _build_workflow_run_additional(status=WorkflowExecutionStatus.PAUSED) + topic = _Topic(_StaticSubscription()) + pause_entity = _FakePauseEntity( + pause_id="pause-1", + workflow_run_id="run-1", + paused_at_value=datetime(2024, 1, 1, tzinfo=UTC), + pause_reasons=[ + HumanInputRequired( + form_id="form-1", + form_content="content", + node_id="node-1", + node_title="Human Input", + ) + ], + ) + workflow_run_repo = SimpleNamespace(get_workflow_pause=MagicMock(return_value=pause_entity)) + node_repo = SimpleNamespace(get_execution_snapshots_by_workflow_run=MagicMock(return_value=[])) + factory = SimpleNamespace( + create_api_workflow_run_repository=MagicMock(return_value=workflow_run_repo), + create_api_workflow_node_execution_repository=MagicMock(return_value=node_repo), + ) + monkeypatch.setattr(service_module, "DifyAPIRepositoryFactory", factory) + monkeypatch.setattr(service_module.MessageGenerator, "get_response_topic", MagicMock(return_value=topic)) + monkeypatch.setattr( + service_module, "_load_resumption_context", MagicMock(return_value=_build_resumption_context("task-1")) + ) + monkeypatch.setattr( + service_module, "load_form_tokens_by_form_id", lambda form_ids, session=None, surface=None: {"form-1": "wtok"} + ) + + session = SimpleNamespace( + scalar=MagicMock(return_value=None), + execute=lambda _stmt: [("form-1", datetime(2024, 1, 1, tzinfo=UTC), '{"display_in_ui": true}')], + ) + session_maker = _SessionMaker(session) + + events = list( + build_workflow_event_stream( + app_mode=AppMode.WORKFLOW, + workflow_run=workflow_run, + tenant_id="tenant-1", + app_id="app-1", + session_maker=cast(sessionmaker[Session], session_maker), + ) + ) + + pause_event = cast(Mapping[str, Any], events[-1]) + assert pause_event["event"] == StreamEvent.WORKFLOW_PAUSED.value + assert pause_event["data"]["reasons"][0]["form_token"] == "wtok" + assert pause_event["data"]["reasons"][0]["expiration_time"] == int(datetime(2024, 1, 1, tzinfo=UTC).timestamp()) diff --git a/api/tests/unit_tests/tasks/test_dataset_indexing_task.py b/api/tests/unit_tests/tasks/test_dataset_indexing_task.py index 5dad58b8f1..b74079bd69 100644 --- a/api/tests/unit_tests/tasks/test_dataset_indexing_task.py +++ b/api/tests/unit_tests/tasks/test_dataset_indexing_task.py @@ -89,9 +89,6 @@ def mock_db_session(): session = MagicMock() session._shared_data = {"dataset": None, "documents": []} - # Keep a pointer so repeated Document.first() calls iterate across provided docs - session._doc_first_idx = 0 - def _get_entity(stmt) -> type | None: """Extract the mapped entity class from a SQLAlchemy select statement.""" try: @@ -1591,18 +1588,7 @@ class TestDocumentIndexingTaskSummaryFlow: need_summary=True, ) - dataset_query = MagicMock() - dataset_query.where.return_value = dataset_query - dataset_query.first.return_value = dataset - phase1_docs = [SimpleNamespace(id="doc-1"), SimpleNamespace(id="doc-2"), SimpleNamespace(id="doc-3")] - phase1_document_query = MagicMock() - phase1_document_query.where.return_value = phase1_document_query - phase1_document_query.all.return_value = phase1_docs - - summary_document_query = MagicMock() - summary_document_query.where.return_value = summary_document_query - summary_document_query.all.return_value = [doc_eligible, doc_skip_form, doc_skip_status] session1 = MagicMock() session2 = MagicMock() @@ -1657,18 +1643,6 @@ class TestDocumentIndexingTaskSummaryFlow: need_summary=True, ) - dataset_query = MagicMock() - dataset_query.where.return_value = dataset_query - dataset_query.first.return_value = dataset - - phase1_query = MagicMock() - phase1_query.where.return_value = phase1_query - phase1_query.all.return_value = [SimpleNamespace(id="doc-1")] - - summary_query = MagicMock() - summary_query.where.return_value = summary_query - summary_query.all.return_value = [doc_eligible] - session1 = MagicMock() session2 = MagicMock() session2.begin.return_value = nullcontext() diff --git a/api/tests/unit_tests/tasks/test_workflow_execute_task.py b/api/tests/unit_tests/tasks/test_workflow_execute_task.py index d3cf632b47..72508bef52 100644 --- a/api/tests/unit_tests/tasks/test_workflow_execute_task.py +++ b/api/tests/unit_tests/tasks/test_workflow_execute_task.py @@ -7,11 +7,17 @@ from unittest.mock import MagicMock import pytest -from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom +from core.app.entities.app_invoke_entities import AdvancedChatAppGenerateEntity, InvokeFrom, WorkflowAppGenerateEntity from models.enums import CreatorUserRole from models.model import App, AppMode, Conversation from models.workflow import Workflow, WorkflowRun -from tasks.app_generate.workflow_execute_task import _publish_streaming_response, _resume_app_execution +from repositories.sqlalchemy_api_workflow_run_repository import _WorkflowRunError +from tasks.app_generate.workflow_execute_task import ( + _publish_streaming_response, + _resume_advanced_chat, + _resume_app_execution, + _resume_workflow, +) class _FakeSessionContext: @@ -38,12 +44,28 @@ def _build_advanced_chat_generate_entity(conversation_id: str | None) -> Advance ) +def _build_workflow_generate_entity(stream: bool) -> WorkflowAppGenerateEntity: + return WorkflowAppGenerateEntity( + task_id="task-id", + inputs={}, + files=[], + user_id="user-id", + stream=stream, + invoke_from=InvokeFrom.WEB_APP, + workflow_execution_id="workflow-run-id", + ) + + +def _single_event_generator(payload): + yield payload + + @pytest.fixture -def mock_topic(mocker) -> MagicMock: +def mock_topic(monkeypatch: pytest.MonkeyPatch) -> MagicMock: topic = MagicMock() - mocker.patch( + monkeypatch.setattr( "tasks.app_generate.workflow_execute_task.MessageBasedAppGenerator.get_response_topic", - return_value=topic, + lambda *_args, **_kwargs: topic, ) return topic @@ -67,31 +89,35 @@ def test_publish_streaming_response_coerces_string_uuid(mock_topic: MagicMock): mock_topic.publish.assert_called_once_with(json.dumps({"event": "bar"}).encode()) -def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(mocker): +def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(monkeypatch: pytest.MonkeyPatch): workflow_run_id = "run-id" conversation_id = "conversation-id" message = MagicMock() - mocker.patch("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) + monkeypatch.setattr("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) pause_entity = MagicMock() pause_entity.get_state.return_value = b"state" workflow_run_repo = MagicMock() workflow_run_repo.get_workflow_pause.return_value = pause_entity - mocker.patch( + monkeypatch.setattr( "tasks.app_generate.workflow_execute_task.DifyAPIRepositoryFactory.create_api_workflow_run_repository", - return_value=workflow_run_repo, + lambda *_args, **_kwargs: workflow_run_repo, ) generate_entity = _build_advanced_chat_generate_entity(conversation_id) resumption_context = MagicMock() resumption_context.serialized_graph_runtime_state = "{}" resumption_context.get_generate_entity.return_value = generate_entity - mocker.patch( - "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", return_value=resumption_context + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", + lambda *_args, **_kwargs: resumption_context, + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", + lambda *_args, **_kwargs: MagicMock(), ) - mocker.patch("tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", return_value=MagicMock()) workflow_run = SimpleNamespace( workflow_id="wf-id", @@ -120,10 +146,15 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m session.get.side_effect = _session_get session.scalar.return_value = message - mocker.patch("tasks.app_generate.workflow_execute_task.Session", return_value=_FakeSessionContext(session)) - mocker.patch("tasks.app_generate.workflow_execute_task._resolve_user_for_run", return_value=MagicMock()) - resume_advanced_chat = mocker.patch("tasks.app_generate.workflow_execute_task._resume_advanced_chat") - mocker.patch("tasks.app_generate.workflow_execute_task._resume_workflow") + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.Session", lambda *_args, **_kwargs: _FakeSessionContext(session) + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._resolve_user_for_run", lambda *_args, **_kwargs: MagicMock() + ) + resume_advanced_chat = MagicMock() + monkeypatch.setattr("tasks.app_generate.workflow_execute_task._resume_advanced_chat", resume_advanced_chat) + monkeypatch.setattr("tasks.app_generate.workflow_execute_task._resume_workflow", MagicMock()) _resume_app_execution({"workflow_run_id": workflow_run_id}) @@ -144,29 +175,35 @@ def test_resume_app_execution_queries_message_by_conversation_and_workflow_run(m assert resume_advanced_chat.call_args.kwargs["message"] is message -def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversation_id(mocker): +def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversation_id( + monkeypatch: pytest.MonkeyPatch, +): workflow_run_id = "run-id" - mocker.patch("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) + monkeypatch.setattr("tasks.app_generate.workflow_execute_task.db", SimpleNamespace(engine=object())) pause_entity = MagicMock() pause_entity.get_state.return_value = b"state" workflow_run_repo = MagicMock() workflow_run_repo.get_workflow_pause.return_value = pause_entity - mocker.patch( + monkeypatch.setattr( "tasks.app_generate.workflow_execute_task.DifyAPIRepositoryFactory.create_api_workflow_run_repository", - return_value=workflow_run_repo, + lambda *_args, **_kwargs: workflow_run_repo, ) generate_entity = _build_advanced_chat_generate_entity(conversation_id=None) resumption_context = MagicMock() resumption_context.serialized_graph_runtime_state = "{}" resumption_context.get_generate_entity.return_value = generate_entity - mocker.patch( - "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", return_value=resumption_context + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowResumptionContext.loads", + lambda *_args, **_kwargs: resumption_context, + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", + lambda *_args, **_kwargs: MagicMock(), ) - mocker.patch("tasks.app_generate.workflow_execute_task.GraphRuntimeState.from_snapshot", return_value=MagicMock()) workflow_run = SimpleNamespace( workflow_id="wf-id", @@ -191,12 +228,152 @@ def test_resume_app_execution_returns_early_when_advanced_chat_missing_conversat session.get.side_effect = _session_get - mocker.patch("tasks.app_generate.workflow_execute_task.Session", return_value=_FakeSessionContext(session)) - mocker.patch("tasks.app_generate.workflow_execute_task._resolve_user_for_run", return_value=MagicMock()) - resume_advanced_chat = mocker.patch("tasks.app_generate.workflow_execute_task._resume_advanced_chat") + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.Session", lambda *_args, **_kwargs: _FakeSessionContext(session) + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._resolve_user_for_run", lambda *_args, **_kwargs: MagicMock() + ) + resume_advanced_chat = MagicMock() + monkeypatch.setattr("tasks.app_generate.workflow_execute_task._resume_advanced_chat", resume_advanced_chat) _resume_app_execution({"workflow_run_id": workflow_run_id}) session.scalar.assert_not_called() workflow_run_repo.resume_workflow_pause.assert_not_called() resume_advanced_chat.assert_not_called() + + +def test_resume_advanced_chat_publishes_events_for_originally_blocking_runs(monkeypatch: pytest.MonkeyPatch): + generate_entity = _build_advanced_chat_generate_entity(conversation_id="conversation-id") + generate_entity.stream = False + + generator_instance = MagicMock() + response_stream = _single_event_generator({"event": "message"}) + generator_instance.resume.return_value = response_stream + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.AdvancedChatAppGenerator", + lambda: generator_instance, + ) + + publish_streaming_response = MagicMock() + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._publish_streaming_response", publish_streaming_response + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_execution_repository", + lambda **kwargs: MagicMock(), + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + lambda **kwargs: MagicMock(), + ) + + _resume_advanced_chat( + app_model=SimpleNamespace(id="app-id"), + workflow=SimpleNamespace(created_by="workflow-owner"), + user=MagicMock(), + conversation=SimpleNamespace(id="conversation-id"), + message=MagicMock(), + generate_entity=generate_entity, + graph_runtime_state=MagicMock(), + session_factory=MagicMock(), + pause_state_config=MagicMock(), + workflow_run_id="workflow-run-id", + workflow_run=SimpleNamespace(triggered_from="app_run"), + ) + + resumed_entity = generator_instance.resume.call_args.kwargs["application_generate_entity"] + assert resumed_entity.stream is True + publish_streaming_response.assert_called_once_with(response_stream, "workflow-run-id", AppMode.ADVANCED_CHAT) + + +def test_resume_workflow_publishes_events_for_originally_blocking_runs(monkeypatch: pytest.MonkeyPatch): + generate_entity = _build_workflow_generate_entity(stream=False) + + generator_instance = MagicMock() + response_stream = _single_event_generator({"event": "workflow_finished"}) + generator_instance.resume.return_value = response_stream + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowAppGenerator", + lambda: generator_instance, + ) + + publish_streaming_response = MagicMock() + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._publish_streaming_response", publish_streaming_response + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_execution_repository", + lambda **kwargs: MagicMock(), + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + lambda **kwargs: MagicMock(), + ) + workflow_run_repo = MagicMock() + pause_entity = MagicMock() + + _resume_workflow( + app_model=SimpleNamespace(id="app-id"), + workflow=SimpleNamespace(created_by="workflow-owner"), + user=MagicMock(), + generate_entity=generate_entity, + graph_runtime_state=MagicMock(), + session_factory=MagicMock(), + pause_state_config=MagicMock(), + workflow_run_id="workflow-run-id", + workflow_run=SimpleNamespace(triggered_from="app_run"), + workflow_run_repo=workflow_run_repo, + pause_entity=pause_entity, + ) + + resumed_entity = generator_instance.resume.call_args.kwargs["application_generate_entity"] + assert resumed_entity.stream is True + publish_streaming_response.assert_called_once_with(response_stream, "workflow-run-id", AppMode.WORKFLOW) + workflow_run_repo.delete_workflow_pause.assert_called_once_with(pause_entity) + + +def test_resume_workflow_ignores_missing_old_pause_after_repause(monkeypatch: pytest.MonkeyPatch): + generate_entity = _build_workflow_generate_entity(stream=False) + + generator_instance = MagicMock() + response_stream = _single_event_generator({"event": "workflow_paused"}) + generator_instance.resume.return_value = response_stream + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.WorkflowAppGenerator", + lambda: generator_instance, + ) + + publish_streaming_response = MagicMock() + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task._publish_streaming_response", publish_streaming_response + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_execution_repository", + lambda **kwargs: MagicMock(), + ) + monkeypatch.setattr( + "tasks.app_generate.workflow_execute_task.DifyCoreRepositoryFactory.create_workflow_node_execution_repository", + lambda **kwargs: MagicMock(), + ) + workflow_run_repo = MagicMock() + workflow_run_repo.delete_workflow_pause.side_effect = _WorkflowRunError("WorkflowPause not found: old-pause") + pause_entity = MagicMock() + + _resume_workflow( + app_model=SimpleNamespace(id="app-id"), + workflow=SimpleNamespace(created_by="workflow-owner"), + user=MagicMock(), + generate_entity=generate_entity, + graph_runtime_state=MagicMock(), + session_factory=MagicMock(), + pause_state_config=MagicMock(), + workflow_run_id="workflow-run-id", + workflow_run=SimpleNamespace(triggered_from="app_run"), + workflow_run_repo=workflow_run_repo, + pause_entity=pause_entity, + ) + + publish_streaming_response.assert_called_once_with(response_stream, "workflow-run-id", AppMode.WORKFLOW) + workflow_run_repo.delete_workflow_pause.assert_called_once_with(pause_entity) diff --git a/api/tests/unit_tests/utils/encryption/test_system_encryption.py b/api/tests/unit_tests/utils/encryption/test_system_encryption.py new file mode 100644 index 0000000000..0435facfdb --- /dev/null +++ b/api/tests/unit_tests/utils/encryption/test_system_encryption.py @@ -0,0 +1,619 @@ +import base64 +import hashlib +from unittest.mock import patch + +import pytest +from Crypto.Cipher import AES +from Crypto.Random import get_random_bytes +from Crypto.Util.Padding import pad + +from core.tools.utils.system_encryption import ( + EncryptionError, + SystemEncrypter, + create_system_encrypter, + decrypt_system_params, + encrypt_system_params, + get_system_encrypter, +) + + +class TestSystemEncrypter: + """Test cases for SystemEncrypter class""" + + def test_init_with_secret_key(self): + """Test initialization with provided secret key""" + secret_key = "test_secret_key" + encrypter = SystemEncrypter(secret_key=secret_key) + expected_key = hashlib.sha256(secret_key.encode()).digest() + assert encrypter.key == expected_key + + def test_init_with_none_secret_key(self): + """Test initialization with None secret key falls back to config""" + with patch("core.tools.utils.system_encryption.dify_config") as mock_config: + mock_config.SECRET_KEY = "config_secret" + encrypter = SystemEncrypter(secret_key=None) + expected_key = hashlib.sha256(b"config_secret").digest() + assert encrypter.key == expected_key + + def test_init_with_empty_secret_key(self): + """Test initialization with empty secret key""" + encrypter = SystemEncrypter(secret_key="") + expected_key = hashlib.sha256(b"").digest() + assert encrypter.key == expected_key + + def test_init_without_secret_key_uses_config(self): + """Test initialization without secret key uses config""" + with patch("core.tools.utils.system_encryption.dify_config") as mock_config: + mock_config.SECRET_KEY = "default_secret" + encrypter = SystemEncrypter() + expected_key = hashlib.sha256(b"default_secret").digest() + assert encrypter.key == expected_key + + def test_encrypt_params_basic(self): + """Test basic parameters encryption""" + encrypter = SystemEncrypter("test_secret") + params = {"client_id": "test_id", "client_secret": "test_secret"} + + encrypted = encrypter.encrypt_params(params) + + assert isinstance(encrypted, str) + assert len(encrypted) > 0 + # Should be valid base64 + try: + base64.b64decode(encrypted) + except Exception: + pytest.fail("Encrypted result is not valid base64") + + def test_encrypt_params_empty_dict(self): + """Test encryption with empty dictionary""" + encrypter = SystemEncrypter("test_secret") + params = {} + + encrypted = encrypter.encrypt_params(params) + assert isinstance(encrypted, str) + assert len(encrypted) > 0 + + def test_encrypt_params_complex_data(self): + """Test encryption with complex data structures""" + encrypter = SystemEncrypter("test_secret") + params = { + "client_id": "test_id", + "client_secret": "test_secret", + "scopes": ["read", "write", "admin"], + "metadata": {"issuer": "test_issuer", "expires_in": 3600, "is_active": True}, + "numeric_value": 42, + "boolean_value": False, + "null_value": None, + } + + encrypted = encrypter.encrypt_params(params) + assert isinstance(encrypted, str) + assert len(encrypted) > 0 + + def test_encrypt_params_unicode_data(self): + """Test encryption with unicode data""" + encrypter = SystemEncrypter("test_secret") + params = {"client_id": "test_id", "client_secret": "test_secret", "description": "This is a test case 🚀"} + + encrypted = encrypter.encrypt_params(params) + assert isinstance(encrypted, str) + assert len(encrypted) > 0 + + def test_encrypt_params_large_data(self): + """Test encryption with large data""" + encrypter = SystemEncrypter("test_secret") + params = { + "client_id": "test_id", + "large_data": "x" * 10000, # 10KB of data + } + + encrypted = encrypter.encrypt_params(params) + assert isinstance(encrypted, str) + assert len(encrypted) > 0 + + def test_encrypt_params_invalid_input(self): + """Test encryption with invalid input types""" + encrypter = SystemEncrypter("test_secret") + + with pytest.raises(Exception): # noqa: B017 + encrypter.encrypt_params(None) + + with pytest.raises(Exception): # noqa: B017 + encrypter.encrypt_params("not_a_dict") + + def test_decrypt_params_basic(self): + """Test basic parameters decryption""" + encrypter = SystemEncrypter("test_secret") + original_params = {"client_id": "test_id", "client_secret": "test_secret"} + + encrypted = encrypter.encrypt_params(original_params) + decrypted = encrypter.decrypt_params(encrypted) + + assert decrypted == original_params + + def test_decrypt_params_empty_dict(self): + """Test decryption of empty dictionary""" + encrypter = SystemEncrypter("test_secret") + original_params = {} + + encrypted = encrypter.encrypt_params(original_params) + decrypted = encrypter.decrypt_params(encrypted) + + assert decrypted == original_params + + def test_decrypt_params_complex_data(self): + """Test decryption with complex data structures""" + encrypter = SystemEncrypter("test_secret") + original_params = { + "client_id": "test_id", + "client_secret": "test_secret", + "scopes": ["read", "write", "admin"], + "metadata": {"issuer": "test_issuer", "expires_in": 3600, "is_active": True}, + "numeric_value": 42, + "boolean_value": False, + "null_value": None, + } + + encrypted = encrypter.encrypt_params(original_params) + decrypted = encrypter.decrypt_params(encrypted) + + assert decrypted == original_params + + def test_decrypt_params_unicode_data(self): + """Test decryption with unicode data""" + encrypter = SystemEncrypter("test_secret") + original_params = { + "client_id": "test_id", + "client_secret": "test_secret", + "description": "This is a test case 🚀", + } + + encrypted = encrypter.encrypt_params(original_params) + decrypted = encrypter.decrypt_params(encrypted) + + assert decrypted == original_params + + def test_decrypt_params_large_data(self): + """Test decryption with large data""" + encrypter = SystemEncrypter("test_secret") + original_params = { + "client_id": "test_id", + "large_data": "x" * 10000, # 10KB of data + } + + encrypted = encrypter.encrypt_params(original_params) + decrypted = encrypter.decrypt_params(encrypted) + + assert decrypted == original_params + + def test_decrypt_params_invalid_base64(self): + """Test decryption with invalid base64 data""" + encrypter = SystemEncrypter("test_secret") + + with pytest.raises(EncryptionError): + encrypter.decrypt_params("invalid_base64!") + + def test_decrypt_params_empty_string(self): + """Test decryption with empty string""" + encrypter = SystemEncrypter("test_secret") + + with pytest.raises(ValueError) as exc_info: + encrypter.decrypt_params("") + + assert "encrypted_data cannot be empty" in str(exc_info.value) + + def test_decrypt_params_non_string_input(self): + """Test decryption with non-string input""" + encrypter = SystemEncrypter("test_secret") + + with pytest.raises(ValueError) as exc_info: + encrypter.decrypt_params(123) + + assert "encrypted_data must be a string" in str(exc_info.value) + + with pytest.raises(ValueError) as exc_info: + encrypter.decrypt_params(None) + + assert "encrypted_data must be a string" in str(exc_info.value) + + def test_decrypt_params_too_short_data(self): + """Test decryption with too short encrypted data""" + encrypter = SystemEncrypter("test_secret") + + # Create data that's too short (less than 32 bytes) + short_data = base64.b64encode(b"short").decode() + + with pytest.raises(EncryptionError) as exc_info: + encrypter.decrypt_params(short_data) + + assert "Invalid encrypted data format" in str(exc_info.value) + + def test_decrypt_params_corrupted_data(self): + """Test decryption with corrupted data""" + encrypter = SystemEncrypter("test_secret") + + # Create corrupted data (valid base64 but invalid encrypted content) + corrupted_data = base64.b64encode(b"x" * 48).decode() # 48 bytes of garbage + + with pytest.raises(EncryptionError): + encrypter.decrypt_params(corrupted_data) + + def test_decrypt_params_wrong_key(self): + """Test decryption with wrong key""" + encrypter1 = SystemEncrypter("secret1") + encrypter2 = SystemEncrypter("secret2") + + original_params = {"client_id": "test_id", "client_secret": "test_secret"} + encrypted = encrypter1.encrypt_params(original_params) + + with pytest.raises(EncryptionError): + encrypter2.decrypt_params(encrypted) + + def test_encryption_decryption_consistency(self): + """Test that encryption and decryption are consistent""" + encrypter = SystemEncrypter("test_secret") + + test_cases = [ + {}, + {"simple": "value"}, + {"client_id": "id", "client_secret": "secret"}, + {"complex": {"nested": {"deep": "value"}}}, + {"unicode": "test 🚀"}, + {"numbers": 42, "boolean": True, "null": None}, + {"array": [1, 2, 3, "four", {"five": 5}]}, + ] + + for original_params in test_cases: + encrypted = encrypter.encrypt_params(original_params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == original_params, f"Failed for case: {original_params}" + + def test_encryption_randomness(self): + """Test that encryption produces different results for same input""" + encrypter = SystemEncrypter("test_secret") + params = {"client_id": "test_id", "client_secret": "test_secret"} + + encrypted1 = encrypter.encrypt_params(params) + encrypted2 = encrypter.encrypt_params(params) + + # Should be different due to random IV + assert encrypted1 != encrypted2 + + # But should decrypt to same result + decrypted1 = encrypter.decrypt_params(encrypted1) + decrypted2 = encrypter.decrypt_params(encrypted2) + assert decrypted1 == decrypted2 == params + + def test_different_secret_keys_produce_different_results(self): + """Test that different secret keys produce different encrypted results""" + encrypter1 = SystemEncrypter("secret1") + encrypter2 = SystemEncrypter("secret2") + + params = {"client_id": "test_id", "client_secret": "test_secret"} + + encrypted1 = encrypter1.encrypt_params(params) + encrypted2 = encrypter2.encrypt_params(params) + + # Should produce different encrypted results + assert encrypted1 != encrypted2 + + # But each should decrypt correctly with its own key + decrypted1 = encrypter1.decrypt_params(encrypted1) + decrypted2 = encrypter2.decrypt_params(encrypted2) + assert decrypted1 == decrypted2 == params + + @patch("core.tools.utils.system_encryption.get_random_bytes") + def test_encrypt_params_crypto_error(self, mock_get_random_bytes): + """Test encryption when crypto operation fails""" + mock_get_random_bytes.side_effect = Exception("Crypto error") + + encrypter = SystemEncrypter("test_secret") + params = {"client_id": "test_id"} + + with pytest.raises(EncryptionError) as exc_info: + encrypter.encrypt_params(params) + + assert "Encryption failed" in str(exc_info.value) + + @patch("core.tools.utils.system_encryption.TypeAdapter") + def test_encrypt_params_serialization_error(self, mock_type_adapter): + """Test encryption when JSON serialization fails""" + mock_type_adapter.return_value.dump_json.side_effect = Exception("Serialization error") + + encrypter = SystemEncrypter("test_secret") + params = {"client_id": "test_id"} + + with pytest.raises(EncryptionError) as exc_info: + encrypter.encrypt_params(params) + + assert "Encryption failed" in str(exc_info.value) + + def test_decrypt_params_invalid_json(self): + """Test decryption with invalid JSON data""" + encrypter = SystemEncrypter("test_secret") + + # Create valid encrypted data but with invalid JSON content + iv = get_random_bytes(16) + cipher = AES.new(encrypter.key, AES.MODE_CBC, iv) + invalid_json = b"invalid json content" + padded_data = pad(invalid_json, AES.block_size) + encrypted_data = cipher.encrypt(padded_data) + combined = iv + encrypted_data + encoded = base64.b64encode(combined).decode() + + with pytest.raises(EncryptionError): + encrypter.decrypt_params(encoded) + + def test_key_derivation_consistency(self): + """Test that key derivation is consistent""" + secret_key = "test_secret" + encrypter1 = SystemEncrypter(secret_key) + encrypter2 = SystemEncrypter(secret_key) + + assert encrypter1.key == encrypter2.key + + # Keys should be 32 bytes (256 bits) + assert len(encrypter1.key) == 32 + + +class TestFactoryFunctions: + """Test cases for factory functions""" + + def test_create_system_encrypter_with_secret(self): + """Test factory function with secret key""" + secret_key = "test_secret" + encrypter = create_system_encrypter(secret_key) + + assert isinstance(encrypter, SystemEncrypter) + expected_key = hashlib.sha256(secret_key.encode()).digest() + assert encrypter.key == expected_key + + def test_create_system_encrypter_without_secret(self): + """Test factory function without secret key""" + with patch("core.tools.utils.system_encryption.dify_config") as mock_config: + mock_config.SECRET_KEY = "config_secret" + encrypter = create_system_encrypter() + + assert isinstance(encrypter, SystemEncrypter) + expected_key = hashlib.sha256(b"config_secret").digest() + assert encrypter.key == expected_key + + def test_create_system_encrypter_with_none_secret(self): + """Test factory function with None secret key""" + with patch("core.tools.utils.system_encryption.dify_config") as mock_config: + mock_config.SECRET_KEY = "config_secret" + encrypter = create_system_encrypter(None) + + assert isinstance(encrypter, SystemEncrypter) + expected_key = hashlib.sha256(b"config_secret").digest() + assert encrypter.key == expected_key + + +class TestGlobalEncrypterInstance: + """Test cases for global encrypter instance""" + + def test_get_system_encrypter_singleton(self): + """Test that get_system_encrypter returns singleton instance""" + # Clear the global instance first + import core.tools.utils.system_encryption + + core.tools.utils.system_encryption._encrypter = None + + encrypter1 = get_system_encrypter() + encrypter2 = get_system_encrypter() + + assert encrypter1 is encrypter2 + assert isinstance(encrypter1, SystemEncrypter) + + def test_get_system_encrypter_uses_config(self): + """Test that global encrypter uses config""" + # Clear the global instance first + import core.tools.utils.system_encryption + + core.tools.utils.system_encryption._encrypter = None + + with patch("core.tools.utils.system_encryption.dify_config") as mock_config: + mock_config.SECRET_KEY = "global_secret" + encrypter = get_system_encrypter() + + expected_key = hashlib.sha256(b"global_secret").digest() + assert encrypter.key == expected_key + + +class TestConvenienceFunctions: + """Test cases for convenience functions""" + + def test_encrypt_system_params(self): + """Test encrypt_system_params convenience function""" + params = {"client_id": "test_id", "client_secret": "test_secret"} + + encrypted = encrypt_system_params(params) + + assert isinstance(encrypted, str) + assert len(encrypted) > 0 + + def test_decrypt_system_params(self): + """Test decrypt_system_params convenience function""" + params = {"client_id": "test_id", "client_secret": "test_secret"} + + encrypted = encrypt_system_params(params) + decrypted = decrypt_system_params(encrypted) + + assert decrypted == params + + def test_convenience_functions_consistency(self): + """Test that convenience functions work consistently""" + test_cases = [ + {}, + {"simple": "value"}, + {"client_id": "id", "client_secret": "secret"}, + {"complex": {"nested": {"deep": "value"}}}, + {"unicode": "test 🚀"}, + {"numbers": 42, "boolean": True, "null": None}, + ] + + for original_params in test_cases: + encrypted = encrypt_system_params(original_params) + decrypted = decrypt_system_params(encrypted) + assert decrypted == original_params, f"Failed for case: {original_params}" + + def test_convenience_functions_with_errors(self): + """Test convenience functions with error conditions""" + # Test encryption with invalid input + with pytest.raises(Exception): # noqa: B017 + encrypt_system_params(None) + + # Test decryption with invalid input + with pytest.raises(ValueError): + decrypt_system_params("") + + with pytest.raises(ValueError): + decrypt_system_params(None) + + +class TestErrorHandling: + """Test cases for error handling""" + + def test_encryption_error_inheritance(self): + """Test that EncryptionError is a proper exception""" + error = EncryptionError("Test error") + assert isinstance(error, Exception) + assert str(error) == "Test error" + + def test_encryption_error_with_cause(self): + """Test EncryptionError with cause""" + original_error = ValueError("Original error") + error = EncryptionError("Wrapper error") + error.__cause__ = original_error + + assert isinstance(error, Exception) + assert str(error) == "Wrapper error" + assert error.__cause__ is original_error + + def test_error_messages_are_informative(self): + """Test that error messages are informative""" + encrypter = SystemEncrypter("test_secret") + + # Test empty string error + with pytest.raises(ValueError) as exc_info: + encrypter.decrypt_params("") + assert "encrypted_data cannot be empty" in str(exc_info.value) + + # Test non-string error + with pytest.raises(ValueError) as exc_info: + encrypter.decrypt_params(123) + assert "encrypted_data must be a string" in str(exc_info.value) + + # Test invalid format error + short_data = base64.b64encode(b"short").decode() + with pytest.raises(EncryptionError) as exc_info: + encrypter.decrypt_params(short_data) + assert "Invalid encrypted data format" in str(exc_info.value) + + +class TestEdgeCases: + """Test cases for edge cases and boundary conditions""" + + def test_very_long_secret_key(self): + """Test with very long secret key""" + long_secret = "x" * 10000 + encrypter = SystemEncrypter(long_secret) + + # Key should still be 32 bytes due to SHA-256 + assert len(encrypter.key) == 32 + + # Should still work normally + params = {"client_id": "test_id"} + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + def test_special_characters_in_secret_key(self): + """Test with special characters in secret key""" + special_secret = "!@#$%^&*()_+-=[]{}|;':\",./<>?`~test🚀" + encrypter = SystemEncrypter(special_secret) + + params = {"client_id": "test_id"} + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + def test_empty_values_in_params(self): + """Test with empty values in params""" + params = { + "client_id": "", + "client_secret": "", + "empty_dict": {}, + "empty_list": [], + "empty_string": "", + "zero": 0, + "false": False, + "none": None, + } + + encrypter = SystemEncrypter("test_secret") + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + def test_deeply_nested_params(self): + """Test with deeply nested params""" + params = {"level1": {"level2": {"level3": {"level4": {"level5": {"deep_value": "found"}}}}}} + + encrypter = SystemEncrypter("test_secret") + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + def test_params_with_all_json_types(self): + """Test with all JSON-supported data types""" + params = { + "string": "test_string", + "integer": 42, + "float": 3.14159, + "boolean_true": True, + "boolean_false": False, + "null_value": None, + "empty_string": "", + "array": [1, "two", 3.0, True, False, None], + "object": {"nested_string": "nested_value", "nested_number": 123, "nested_bool": True}, + } + + encrypter = SystemEncrypter("test_secret") + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + +class TestPerformance: + """Test cases for performance considerations""" + + def test_large_params(self): + """Test with large params""" + large_value = "x" * 100000 # 100KB + params = {"client_id": "test_id", "large_data": large_value} + + encrypter = SystemEncrypter("test_secret") + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + def test_many_fields_params(self): + """Test with many fields in params""" + params = {f"field_{i}": f"value_{i}" for i in range(1000)} + + encrypter = SystemEncrypter("test_secret") + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params + + def test_repeated_encryption_decryption(self): + """Test repeated encryption and decryption operations""" + encrypter = SystemEncrypter("test_secret") + params = {"client_id": "test_id", "client_secret": "test_secret"} + + # Test multiple rounds of encryption/decryption + for i in range(100): + encrypted = encrypter.encrypt_params(params) + decrypted = encrypter.decrypt_params(encrypted) + assert decrypted == params diff --git a/api/tests/unit_tests/utils/oauth_encryption/test_system_oauth_encryption.py b/api/tests/unit_tests/utils/oauth_encryption/test_system_oauth_encryption.py deleted file mode 100644 index e2607f0fb1..0000000000 --- a/api/tests/unit_tests/utils/oauth_encryption/test_system_oauth_encryption.py +++ /dev/null @@ -1,619 +0,0 @@ -import base64 -import hashlib -from unittest.mock import patch - -import pytest -from Crypto.Cipher import AES -from Crypto.Random import get_random_bytes -from Crypto.Util.Padding import pad - -from core.tools.utils.system_oauth_encryption import ( - OAuthEncryptionError, - SystemOAuthEncrypter, - create_system_oauth_encrypter, - decrypt_system_oauth_params, - encrypt_system_oauth_params, - get_system_oauth_encrypter, -) - - -class TestSystemOAuthEncrypter: - """Test cases for SystemOAuthEncrypter class""" - - def test_init_with_secret_key(self): - """Test initialization with provided secret key""" - secret_key = "test_secret_key" - encrypter = SystemOAuthEncrypter(secret_key=secret_key) - expected_key = hashlib.sha256(secret_key.encode()).digest() - assert encrypter.key == expected_key - - def test_init_with_none_secret_key(self): - """Test initialization with None secret key falls back to config""" - with patch("core.tools.utils.system_oauth_encryption.dify_config") as mock_config: - mock_config.SECRET_KEY = "config_secret" - encrypter = SystemOAuthEncrypter(secret_key=None) - expected_key = hashlib.sha256(b"config_secret").digest() - assert encrypter.key == expected_key - - def test_init_with_empty_secret_key(self): - """Test initialization with empty secret key""" - encrypter = SystemOAuthEncrypter(secret_key="") - expected_key = hashlib.sha256(b"").digest() - assert encrypter.key == expected_key - - def test_init_without_secret_key_uses_config(self): - """Test initialization without secret key uses config""" - with patch("core.tools.utils.system_oauth_encryption.dify_config") as mock_config: - mock_config.SECRET_KEY = "default_secret" - encrypter = SystemOAuthEncrypter() - expected_key = hashlib.sha256(b"default_secret").digest() - assert encrypter.key == expected_key - - def test_encrypt_oauth_params_basic(self): - """Test basic OAuth parameters encryption""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {"client_id": "test_id", "client_secret": "test_secret"} - - encrypted = encrypter.encrypt_oauth_params(oauth_params) - - assert isinstance(encrypted, str) - assert len(encrypted) > 0 - # Should be valid base64 - try: - base64.b64decode(encrypted) - except Exception: - pytest.fail("Encrypted result is not valid base64") - - def test_encrypt_oauth_params_empty_dict(self): - """Test encryption with empty dictionary""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {} - - encrypted = encrypter.encrypt_oauth_params(oauth_params) - assert isinstance(encrypted, str) - assert len(encrypted) > 0 - - def test_encrypt_oauth_params_complex_data(self): - """Test encryption with complex data structures""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = { - "client_id": "test_id", - "client_secret": "test_secret", - "scopes": ["read", "write", "admin"], - "metadata": {"issuer": "test_issuer", "expires_in": 3600, "is_active": True}, - "numeric_value": 42, - "boolean_value": False, - "null_value": None, - } - - encrypted = encrypter.encrypt_oauth_params(oauth_params) - assert isinstance(encrypted, str) - assert len(encrypted) > 0 - - def test_encrypt_oauth_params_unicode_data(self): - """Test encryption with unicode data""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {"client_id": "test_id", "client_secret": "test_secret", "description": "This is a test case 🚀"} - - encrypted = encrypter.encrypt_oauth_params(oauth_params) - assert isinstance(encrypted, str) - assert len(encrypted) > 0 - - def test_encrypt_oauth_params_large_data(self): - """Test encryption with large data""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = { - "client_id": "test_id", - "large_data": "x" * 10000, # 10KB of data - } - - encrypted = encrypter.encrypt_oauth_params(oauth_params) - assert isinstance(encrypted, str) - assert len(encrypted) > 0 - - def test_encrypt_oauth_params_invalid_input(self): - """Test encryption with invalid input types""" - encrypter = SystemOAuthEncrypter("test_secret") - - with pytest.raises(Exception): # noqa: B017 - encrypter.encrypt_oauth_params(None) - - with pytest.raises(Exception): # noqa: B017 - encrypter.encrypt_oauth_params("not_a_dict") - - def test_decrypt_oauth_params_basic(self): - """Test basic OAuth parameters decryption""" - encrypter = SystemOAuthEncrypter("test_secret") - original_params = {"client_id": "test_id", "client_secret": "test_secret"} - - encrypted = encrypter.encrypt_oauth_params(original_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - - assert decrypted == original_params - - def test_decrypt_oauth_params_empty_dict(self): - """Test decryption of empty dictionary""" - encrypter = SystemOAuthEncrypter("test_secret") - original_params = {} - - encrypted = encrypter.encrypt_oauth_params(original_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - - assert decrypted == original_params - - def test_decrypt_oauth_params_complex_data(self): - """Test decryption with complex data structures""" - encrypter = SystemOAuthEncrypter("test_secret") - original_params = { - "client_id": "test_id", - "client_secret": "test_secret", - "scopes": ["read", "write", "admin"], - "metadata": {"issuer": "test_issuer", "expires_in": 3600, "is_active": True}, - "numeric_value": 42, - "boolean_value": False, - "null_value": None, - } - - encrypted = encrypter.encrypt_oauth_params(original_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - - assert decrypted == original_params - - def test_decrypt_oauth_params_unicode_data(self): - """Test decryption with unicode data""" - encrypter = SystemOAuthEncrypter("test_secret") - original_params = { - "client_id": "test_id", - "client_secret": "test_secret", - "description": "This is a test case 🚀", - } - - encrypted = encrypter.encrypt_oauth_params(original_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - - assert decrypted == original_params - - def test_decrypt_oauth_params_large_data(self): - """Test decryption with large data""" - encrypter = SystemOAuthEncrypter("test_secret") - original_params = { - "client_id": "test_id", - "large_data": "x" * 10000, # 10KB of data - } - - encrypted = encrypter.encrypt_oauth_params(original_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - - assert decrypted == original_params - - def test_decrypt_oauth_params_invalid_base64(self): - """Test decryption with invalid base64 data""" - encrypter = SystemOAuthEncrypter("test_secret") - - with pytest.raises(OAuthEncryptionError): - encrypter.decrypt_oauth_params("invalid_base64!") - - def test_decrypt_oauth_params_empty_string(self): - """Test decryption with empty string""" - encrypter = SystemOAuthEncrypter("test_secret") - - with pytest.raises(ValueError) as exc_info: - encrypter.decrypt_oauth_params("") - - assert "encrypted_data cannot be empty" in str(exc_info.value) - - def test_decrypt_oauth_params_non_string_input(self): - """Test decryption with non-string input""" - encrypter = SystemOAuthEncrypter("test_secret") - - with pytest.raises(ValueError) as exc_info: - encrypter.decrypt_oauth_params(123) - - assert "encrypted_data must be a string" in str(exc_info.value) - - with pytest.raises(ValueError) as exc_info: - encrypter.decrypt_oauth_params(None) - - assert "encrypted_data must be a string" in str(exc_info.value) - - def test_decrypt_oauth_params_too_short_data(self): - """Test decryption with too short encrypted data""" - encrypter = SystemOAuthEncrypter("test_secret") - - # Create data that's too short (less than 32 bytes) - short_data = base64.b64encode(b"short").decode() - - with pytest.raises(OAuthEncryptionError) as exc_info: - encrypter.decrypt_oauth_params(short_data) - - assert "Invalid encrypted data format" in str(exc_info.value) - - def test_decrypt_oauth_params_corrupted_data(self): - """Test decryption with corrupted data""" - encrypter = SystemOAuthEncrypter("test_secret") - - # Create corrupted data (valid base64 but invalid encrypted content) - corrupted_data = base64.b64encode(b"x" * 48).decode() # 48 bytes of garbage - - with pytest.raises(OAuthEncryptionError): - encrypter.decrypt_oauth_params(corrupted_data) - - def test_decrypt_oauth_params_wrong_key(self): - """Test decryption with wrong key""" - encrypter1 = SystemOAuthEncrypter("secret1") - encrypter2 = SystemOAuthEncrypter("secret2") - - original_params = {"client_id": "test_id", "client_secret": "test_secret"} - encrypted = encrypter1.encrypt_oauth_params(original_params) - - with pytest.raises(OAuthEncryptionError): - encrypter2.decrypt_oauth_params(encrypted) - - def test_encryption_decryption_consistency(self): - """Test that encryption and decryption are consistent""" - encrypter = SystemOAuthEncrypter("test_secret") - - test_cases = [ - {}, - {"simple": "value"}, - {"client_id": "id", "client_secret": "secret"}, - {"complex": {"nested": {"deep": "value"}}}, - {"unicode": "test 🚀"}, - {"numbers": 42, "boolean": True, "null": None}, - {"array": [1, 2, 3, "four", {"five": 5}]}, - ] - - for original_params in test_cases: - encrypted = encrypter.encrypt_oauth_params(original_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == original_params, f"Failed for case: {original_params}" - - def test_encryption_randomness(self): - """Test that encryption produces different results for same input""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {"client_id": "test_id", "client_secret": "test_secret"} - - encrypted1 = encrypter.encrypt_oauth_params(oauth_params) - encrypted2 = encrypter.encrypt_oauth_params(oauth_params) - - # Should be different due to random IV - assert encrypted1 != encrypted2 - - # But should decrypt to same result - decrypted1 = encrypter.decrypt_oauth_params(encrypted1) - decrypted2 = encrypter.decrypt_oauth_params(encrypted2) - assert decrypted1 == decrypted2 == oauth_params - - def test_different_secret_keys_produce_different_results(self): - """Test that different secret keys produce different encrypted results""" - encrypter1 = SystemOAuthEncrypter("secret1") - encrypter2 = SystemOAuthEncrypter("secret2") - - oauth_params = {"client_id": "test_id", "client_secret": "test_secret"} - - encrypted1 = encrypter1.encrypt_oauth_params(oauth_params) - encrypted2 = encrypter2.encrypt_oauth_params(oauth_params) - - # Should produce different encrypted results - assert encrypted1 != encrypted2 - - # But each should decrypt correctly with its own key - decrypted1 = encrypter1.decrypt_oauth_params(encrypted1) - decrypted2 = encrypter2.decrypt_oauth_params(encrypted2) - assert decrypted1 == decrypted2 == oauth_params - - @patch("core.tools.utils.system_oauth_encryption.get_random_bytes") - def test_encrypt_oauth_params_crypto_error(self, mock_get_random_bytes): - """Test encryption when crypto operation fails""" - mock_get_random_bytes.side_effect = Exception("Crypto error") - - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {"client_id": "test_id"} - - with pytest.raises(OAuthEncryptionError) as exc_info: - encrypter.encrypt_oauth_params(oauth_params) - - assert "Encryption failed" in str(exc_info.value) - - @patch("core.tools.utils.system_oauth_encryption.TypeAdapter") - def test_encrypt_oauth_params_serialization_error(self, mock_type_adapter): - """Test encryption when JSON serialization fails""" - mock_type_adapter.return_value.dump_json.side_effect = Exception("Serialization error") - - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {"client_id": "test_id"} - - with pytest.raises(OAuthEncryptionError) as exc_info: - encrypter.encrypt_oauth_params(oauth_params) - - assert "Encryption failed" in str(exc_info.value) - - def test_decrypt_oauth_params_invalid_json(self): - """Test decryption with invalid JSON data""" - encrypter = SystemOAuthEncrypter("test_secret") - - # Create valid encrypted data but with invalid JSON content - iv = get_random_bytes(16) - cipher = AES.new(encrypter.key, AES.MODE_CBC, iv) - invalid_json = b"invalid json content" - padded_data = pad(invalid_json, AES.block_size) - encrypted_data = cipher.encrypt(padded_data) - combined = iv + encrypted_data - encoded = base64.b64encode(combined).decode() - - with pytest.raises(OAuthEncryptionError): - encrypter.decrypt_oauth_params(encoded) - - def test_key_derivation_consistency(self): - """Test that key derivation is consistent""" - secret_key = "test_secret" - encrypter1 = SystemOAuthEncrypter(secret_key) - encrypter2 = SystemOAuthEncrypter(secret_key) - - assert encrypter1.key == encrypter2.key - - # Keys should be 32 bytes (256 bits) - assert len(encrypter1.key) == 32 - - -class TestFactoryFunctions: - """Test cases for factory functions""" - - def test_create_system_oauth_encrypter_with_secret(self): - """Test factory function with secret key""" - secret_key = "test_secret" - encrypter = create_system_oauth_encrypter(secret_key) - - assert isinstance(encrypter, SystemOAuthEncrypter) - expected_key = hashlib.sha256(secret_key.encode()).digest() - assert encrypter.key == expected_key - - def test_create_system_oauth_encrypter_without_secret(self): - """Test factory function without secret key""" - with patch("core.tools.utils.system_oauth_encryption.dify_config") as mock_config: - mock_config.SECRET_KEY = "config_secret" - encrypter = create_system_oauth_encrypter() - - assert isinstance(encrypter, SystemOAuthEncrypter) - expected_key = hashlib.sha256(b"config_secret").digest() - assert encrypter.key == expected_key - - def test_create_system_oauth_encrypter_with_none_secret(self): - """Test factory function with None secret key""" - with patch("core.tools.utils.system_oauth_encryption.dify_config") as mock_config: - mock_config.SECRET_KEY = "config_secret" - encrypter = create_system_oauth_encrypter(None) - - assert isinstance(encrypter, SystemOAuthEncrypter) - expected_key = hashlib.sha256(b"config_secret").digest() - assert encrypter.key == expected_key - - -class TestGlobalEncrypterInstance: - """Test cases for global encrypter instance""" - - def test_get_system_oauth_encrypter_singleton(self): - """Test that get_system_oauth_encrypter returns singleton instance""" - # Clear the global instance first - import core.tools.utils.system_oauth_encryption - - core.tools.utils.system_oauth_encryption._oauth_encrypter = None - - encrypter1 = get_system_oauth_encrypter() - encrypter2 = get_system_oauth_encrypter() - - assert encrypter1 is encrypter2 - assert isinstance(encrypter1, SystemOAuthEncrypter) - - def test_get_system_oauth_encrypter_uses_config(self): - """Test that global encrypter uses config""" - # Clear the global instance first - import core.tools.utils.system_oauth_encryption - - core.tools.utils.system_oauth_encryption._oauth_encrypter = None - - with patch("core.tools.utils.system_oauth_encryption.dify_config") as mock_config: - mock_config.SECRET_KEY = "global_secret" - encrypter = get_system_oauth_encrypter() - - expected_key = hashlib.sha256(b"global_secret").digest() - assert encrypter.key == expected_key - - -class TestConvenienceFunctions: - """Test cases for convenience functions""" - - def test_encrypt_system_oauth_params(self): - """Test encrypt_system_oauth_params convenience function""" - oauth_params = {"client_id": "test_id", "client_secret": "test_secret"} - - encrypted = encrypt_system_oauth_params(oauth_params) - - assert isinstance(encrypted, str) - assert len(encrypted) > 0 - - def test_decrypt_system_oauth_params(self): - """Test decrypt_system_oauth_params convenience function""" - oauth_params = {"client_id": "test_id", "client_secret": "test_secret"} - - encrypted = encrypt_system_oauth_params(oauth_params) - decrypted = decrypt_system_oauth_params(encrypted) - - assert decrypted == oauth_params - - def test_convenience_functions_consistency(self): - """Test that convenience functions work consistently""" - test_cases = [ - {}, - {"simple": "value"}, - {"client_id": "id", "client_secret": "secret"}, - {"complex": {"nested": {"deep": "value"}}}, - {"unicode": "test 🚀"}, - {"numbers": 42, "boolean": True, "null": None}, - ] - - for original_params in test_cases: - encrypted = encrypt_system_oauth_params(original_params) - decrypted = decrypt_system_oauth_params(encrypted) - assert decrypted == original_params, f"Failed for case: {original_params}" - - def test_convenience_functions_with_errors(self): - """Test convenience functions with error conditions""" - # Test encryption with invalid input - with pytest.raises(Exception): # noqa: B017 - encrypt_system_oauth_params(None) - - # Test decryption with invalid input - with pytest.raises(ValueError): - decrypt_system_oauth_params("") - - with pytest.raises(ValueError): - decrypt_system_oauth_params(None) - - -class TestErrorHandling: - """Test cases for error handling""" - - def test_oauth_encryption_error_inheritance(self): - """Test that OAuthEncryptionError is a proper exception""" - error = OAuthEncryptionError("Test error") - assert isinstance(error, Exception) - assert str(error) == "Test error" - - def test_oauth_encryption_error_with_cause(self): - """Test OAuthEncryptionError with cause""" - original_error = ValueError("Original error") - error = OAuthEncryptionError("Wrapper error") - error.__cause__ = original_error - - assert isinstance(error, Exception) - assert str(error) == "Wrapper error" - assert error.__cause__ is original_error - - def test_error_messages_are_informative(self): - """Test that error messages are informative""" - encrypter = SystemOAuthEncrypter("test_secret") - - # Test empty string error - with pytest.raises(ValueError) as exc_info: - encrypter.decrypt_oauth_params("") - assert "encrypted_data cannot be empty" in str(exc_info.value) - - # Test non-string error - with pytest.raises(ValueError) as exc_info: - encrypter.decrypt_oauth_params(123) - assert "encrypted_data must be a string" in str(exc_info.value) - - # Test invalid format error - short_data = base64.b64encode(b"short").decode() - with pytest.raises(OAuthEncryptionError) as exc_info: - encrypter.decrypt_oauth_params(short_data) - assert "Invalid encrypted data format" in str(exc_info.value) - - -class TestEdgeCases: - """Test cases for edge cases and boundary conditions""" - - def test_very_long_secret_key(self): - """Test with very long secret key""" - long_secret = "x" * 10000 - encrypter = SystemOAuthEncrypter(long_secret) - - # Key should still be 32 bytes due to SHA-256 - assert len(encrypter.key) == 32 - - # Should still work normally - oauth_params = {"client_id": "test_id"} - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - def test_special_characters_in_secret_key(self): - """Test with special characters in secret key""" - special_secret = "!@#$%^&*()_+-=[]{}|;':\",./<>?`~test🚀" - encrypter = SystemOAuthEncrypter(special_secret) - - oauth_params = {"client_id": "test_id"} - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - def test_empty_values_in_oauth_params(self): - """Test with empty values in oauth params""" - oauth_params = { - "client_id": "", - "client_secret": "", - "empty_dict": {}, - "empty_list": [], - "empty_string": "", - "zero": 0, - "false": False, - "none": None, - } - - encrypter = SystemOAuthEncrypter("test_secret") - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - def test_deeply_nested_oauth_params(self): - """Test with deeply nested oauth params""" - oauth_params = {"level1": {"level2": {"level3": {"level4": {"level5": {"deep_value": "found"}}}}}} - - encrypter = SystemOAuthEncrypter("test_secret") - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - def test_oauth_params_with_all_json_types(self): - """Test with all JSON-supported data types""" - oauth_params = { - "string": "test_string", - "integer": 42, - "float": 3.14159, - "boolean_true": True, - "boolean_false": False, - "null_value": None, - "empty_string": "", - "array": [1, "two", 3.0, True, False, None], - "object": {"nested_string": "nested_value", "nested_number": 123, "nested_bool": True}, - } - - encrypter = SystemOAuthEncrypter("test_secret") - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - -class TestPerformance: - """Test cases for performance considerations""" - - def test_large_oauth_params(self): - """Test with large oauth params""" - large_value = "x" * 100000 # 100KB - oauth_params = {"client_id": "test_id", "large_data": large_value} - - encrypter = SystemOAuthEncrypter("test_secret") - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - def test_many_fields_oauth_params(self): - """Test with many fields in oauth params""" - oauth_params = {f"field_{i}": f"value_{i}" for i in range(1000)} - - encrypter = SystemOAuthEncrypter("test_secret") - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params - - def test_repeated_encryption_decryption(self): - """Test repeated encryption and decryption operations""" - encrypter = SystemOAuthEncrypter("test_secret") - oauth_params = {"client_id": "test_id", "client_secret": "test_secret"} - - # Test multiple rounds of encryption/decryption - for i in range(100): - encrypted = encrypter.encrypt_oauth_params(oauth_params) - decrypted = encrypter.decrypt_oauth_params(encrypted) - assert decrypted == oauth_params diff --git a/api/uv.lock b/api/uv.lock index bab24a87d2..7d6777fa06 100644 --- a/api/uv.lock +++ b/api/uv.lock @@ -1627,7 +1627,7 @@ dev = [ { name = "lxml-stubs", specifier = ">=0.5.1" }, { name = "mypy", specifier = ">=1.20.1" }, { name = "pandas-stubs", specifier = ">=3.0.0" }, - { name = "pyrefly", specifier = ">=0.61.1" }, + { name = "pyrefly", specifier = ">=0.62.0" }, { name = "pytest", specifier = ">=9.0.3" }, { name = "pytest-benchmark", specifier = ">=5.2.3" }, { name = "pytest-cov", specifier = ">=7.1.0" }, @@ -3687,28 +3687,28 @@ wheels = [ [[package]] name = "lxml" -version = "6.0.2" +version = "6.1.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/aa/88/262177de60548e5a2bfc46ad28232c9e9cbde697bd94132aeb80364675cb/lxml-6.0.2.tar.gz", hash = "sha256:cd79f3367bd74b317dda655dc8fcfa304d9eb6e4fb06b7168c5cf27f96e0cd62", size = 4073426, upload-time = "2025-09-22T04:04:59.287Z" } +sdist = { url = "https://files.pythonhosted.org/packages/28/30/9abc9e34c657c33834eaf6cd02124c61bdf5944d802aa48e69be8da3585d/lxml-6.1.0.tar.gz", hash = "sha256:bfd57d8008c4965709a919c3e9a98f76c2c7cb319086b3d26858250620023b13", size = 4197006, upload-time = "2026-04-18T04:32:51.613Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/c8/8ff2bc6b920c84355146cd1ab7d181bc543b89241cfb1ebee824a7c81457/lxml-6.0.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a59f5448ba2ceccd06995c95ea59a7674a10de0810f2ce90c9006f3cbc044456", size = 8661887, upload-time = "2025-09-22T04:01:17.265Z" }, - { url = "https://files.pythonhosted.org/packages/37/6f/9aae1008083bb501ef63284220ce81638332f9ccbfa53765b2b7502203cf/lxml-6.0.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:e8113639f3296706fbac34a30813929e29247718e88173ad849f57ca59754924", size = 4667818, upload-time = "2025-09-22T04:01:19.688Z" }, - { url = "https://files.pythonhosted.org/packages/f1/ca/31fb37f99f37f1536c133476674c10b577e409c0a624384147653e38baf2/lxml-6.0.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a8bef9b9825fa8bc816a6e641bb67219489229ebc648be422af695f6e7a4fa7f", size = 4950807, upload-time = "2025-09-22T04:01:21.487Z" }, - { url = "https://files.pythonhosted.org/packages/da/87/f6cb9442e4bada8aab5ae7e1046264f62fdbeaa6e3f6211b93f4c0dd97f1/lxml-6.0.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:65ea18d710fd14e0186c2f973dc60bb52039a275f82d3c44a0e42b43440ea534", size = 5109179, upload-time = "2025-09-22T04:01:23.32Z" }, - { url = "https://files.pythonhosted.org/packages/c8/20/a7760713e65888db79bbae4f6146a6ae5c04e4a204a3c48896c408cd6ed2/lxml-6.0.2-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c371aa98126a0d4c739ca93ceffa0fd7a5d732e3ac66a46e74339acd4d334564", size = 5023044, upload-time = "2025-09-22T04:01:25.118Z" }, - { url = "https://files.pythonhosted.org/packages/a2/b0/7e64e0460fcb36471899f75831509098f3fd7cd02a3833ac517433cb4f8f/lxml-6.0.2-cp312-cp312-manylinux_2_26_i686.manylinux_2_28_i686.whl", hash = "sha256:700efd30c0fa1a3581d80a748157397559396090a51d306ea59a70020223d16f", size = 5359685, upload-time = "2025-09-22T04:01:27.398Z" }, - { url = "https://files.pythonhosted.org/packages/b9/e1/e5df362e9ca4e2f48ed6411bd4b3a0ae737cc842e96877f5bf9428055ab4/lxml-6.0.2-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c33e66d44fe60e72397b487ee92e01da0d09ba2d66df8eae42d77b6d06e5eba0", size = 5654127, upload-time = "2025-09-22T04:01:29.629Z" }, - { url = "https://files.pythonhosted.org/packages/c6/d1/232b3309a02d60f11e71857778bfcd4acbdb86c07db8260caf7d008b08f8/lxml-6.0.2-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:90a345bbeaf9d0587a3aaffb7006aa39ccb6ff0e96a57286c0cb2fd1520ea192", size = 5253958, upload-time = "2025-09-22T04:01:31.535Z" }, - { url = "https://files.pythonhosted.org/packages/35/35/d955a070994725c4f7d80583a96cab9c107c57a125b20bb5f708fe941011/lxml-6.0.2-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:064fdadaf7a21af3ed1dcaa106b854077fbeada827c18f72aec9346847cd65d0", size = 4711541, upload-time = "2025-09-22T04:01:33.801Z" }, - { url = "https://files.pythonhosted.org/packages/1e/be/667d17363b38a78c4bd63cfd4b4632029fd68d2c2dc81f25ce9eb5224dd5/lxml-6.0.2-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:fbc74f42c3525ac4ffa4b89cbdd00057b6196bcefe8bce794abd42d33a018092", size = 5267426, upload-time = "2025-09-22T04:01:35.639Z" }, - { url = "https://files.pythonhosted.org/packages/ea/47/62c70aa4a1c26569bc958c9ca86af2bb4e1f614e8c04fb2989833874f7ae/lxml-6.0.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6ddff43f702905a4e32bc24f3f2e2edfe0f8fde3277d481bffb709a4cced7a1f", size = 5064917, upload-time = "2025-09-22T04:01:37.448Z" }, - { url = "https://files.pythonhosted.org/packages/bd/55/6ceddaca353ebd0f1908ef712c597f8570cc9c58130dbb89903198e441fd/lxml-6.0.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:6da5185951d72e6f5352166e3da7b0dc27aa70bd1090b0eb3f7f7212b53f1bb8", size = 4788795, upload-time = "2025-09-22T04:01:39.165Z" }, - { url = "https://files.pythonhosted.org/packages/cf/e8/fd63e15da5e3fd4c2146f8bbb3c14e94ab850589beab88e547b2dbce22e1/lxml-6.0.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:57a86e1ebb4020a38d295c04fc79603c7899e0df71588043eb218722dabc087f", size = 5676759, upload-time = "2025-09-22T04:01:41.506Z" }, - { url = "https://files.pythonhosted.org/packages/76/47/b3ec58dc5c374697f5ba37412cd2728f427d056315d124dd4b61da381877/lxml-6.0.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:2047d8234fe735ab77802ce5f2297e410ff40f5238aec569ad7c8e163d7b19a6", size = 5255666, upload-time = "2025-09-22T04:01:43.363Z" }, - { url = "https://files.pythonhosted.org/packages/19/93/03ba725df4c3d72afd9596eef4a37a837ce8e4806010569bedfcd2cb68fd/lxml-6.0.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:6f91fd2b2ea15a6800c8e24418c0775a1694eefc011392da73bc6cef2623b322", size = 5277989, upload-time = "2025-09-22T04:01:45.215Z" }, - { url = "https://files.pythonhosted.org/packages/c6/80/c06de80bfce881d0ad738576f243911fccf992687ae09fd80b734712b39c/lxml-6.0.2-cp312-cp312-win32.whl", hash = "sha256:3ae2ce7d6fedfb3414a2b6c5e20b249c4c607f72cb8d2bb7cc9c6ec7c6f4e849", size = 3611456, upload-time = "2025-09-22T04:01:48.243Z" }, - { url = "https://files.pythonhosted.org/packages/f7/d7/0cdfb6c3e30893463fb3d1e52bc5f5f99684a03c29a0b6b605cfae879cd5/lxml-6.0.2-cp312-cp312-win_amd64.whl", hash = "sha256:72c87e5ee4e58a8354fb9c7c84cbf95a1c8236c127a5d1b7683f04bed8361e1f", size = 4011793, upload-time = "2025-09-22T04:01:50.042Z" }, - { url = "https://files.pythonhosted.org/packages/ea/7b/93c73c67db235931527301ed3785f849c78991e2e34f3fd9a6663ffda4c5/lxml-6.0.2-cp312-cp312-win_arm64.whl", hash = "sha256:61cb10eeb95570153e0c0e554f58df92ecf5109f75eacad4a95baa709e26c3d6", size = 3672836, upload-time = "2025-09-22T04:01:52.145Z" }, + { url = "https://files.pythonhosted.org/packages/d2/d4/9326838b59dc36dfae42eec9656b97520f9997eee1de47b8316aaeed169c/lxml-6.1.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:d2f17a16cd8751e8eb233a7e41aecdf8e511712e00088bf9be455f604cd0d28d", size = 8570663, upload-time = "2026-04-18T04:27:48.253Z" }, + { url = "https://files.pythonhosted.org/packages/d8/a4/053745ce1f8303ccbb788b86c0db3a91b973675cefc42566a188637b7c40/lxml-6.1.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f0cea5b1d3e6e77d71bd2b9972eb2446221a69dc52bb0b9c3c6f6e5700592d93", size = 4624024, upload-time = "2026-04-18T04:27:52.594Z" }, + { url = "https://files.pythonhosted.org/packages/90/97/a517944b20f8fd0932ad2109482bee4e29fe721416387a363306667941f6/lxml-6.1.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fc46da94826188ed45cb53bd8e3fc076ae22675aea2087843d4735627f867c6d", size = 4930895, upload-time = "2026-04-18T04:32:56.29Z" }, + { url = "https://files.pythonhosted.org/packages/94/7c/e08a970727d556caa040a44773c7b7e3ad0f0d73dedc863543e9a8b931f2/lxml-6.1.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9147d8e386ec3b82c3b15d88927f734f565b0aaadef7def562b853adca45784a", size = 5093820, upload-time = "2026-04-18T04:32:58.94Z" }, + { url = "https://files.pythonhosted.org/packages/88/ee/2a5c2aa2c32016a226ca25d3e1056a8102ea6e1fe308bf50213586635400/lxml-6.1.0-cp312-cp312-manylinux_2_26_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5715e0e28736a070f3f34a7ccc09e2fdcba0e3060abbcf61a1a5718ff6d6b105", size = 5005790, upload-time = "2026-04-18T04:33:01.272Z" }, + { url = "https://files.pythonhosted.org/packages/e3/38/a0db9be8f38ad6043ab9429487c128dd1d30f07956ef43040402f8da49e8/lxml-6.1.0-cp312-cp312-manylinux_2_26_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4937460dc5df0cdd2f06a86c285c28afda06aefa3af949f9477d3e8df430c485", size = 5630827, upload-time = "2026-04-18T04:33:04.036Z" }, + { url = "https://files.pythonhosted.org/packages/31/ba/3c13d3fc24b7cacf675f808a3a1baabf43a30d0cd24c98f94548e9aa58eb/lxml-6.1.0-cp312-cp312-manylinux_2_26_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bc783ee3147e60a25aa0445ea82b3e8aabb83b240f2b95d32cb75587ff781814", size = 5240445, upload-time = "2026-04-18T04:33:06.87Z" }, + { url = "https://files.pythonhosted.org/packages/55/ba/eeef4ccba09b2212fe239f46c1692a98db1878e0872ae320756488878a94/lxml-6.1.0-cp312-cp312-manylinux_2_28_i686.whl", hash = "sha256:40d9189f80075f2e1f88db21ef815a2b17b28adf8e50aaf5c789bfe737027f32", size = 5350121, upload-time = "2026-04-18T04:33:09.365Z" }, + { url = "https://files.pythonhosted.org/packages/7e/01/1da87c7b587c38d0cbe77a01aae3b9c1c49ed47d76918ef3db8fc151b1ca/lxml-6.1.0-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:05b9b8787e35bec69e68daf4952b2e6dfcfb0db7ecf1a06f8cdfbbac4eb71aad", size = 4694949, upload-time = "2026-04-18T04:33:11.628Z" }, + { url = "https://files.pythonhosted.org/packages/a1/88/7db0fe66d5aaf128443ee1623dec3db1576f3e4c17751ec0ef5866468590/lxml-6.1.0-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:0f0f08beb0182e3e9a86fae124b3c47a7b41b7b69b225e1377db983802404e54", size = 5243901, upload-time = "2026-04-18T04:33:13.95Z" }, + { url = "https://files.pythonhosted.org/packages/00/a8/1346726af7d1f6fca1f11223ba34001462b0a3660416986d37641708d57c/lxml-6.1.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73becf6d8c81d4c76b1014dbd3584cb26d904492dcf73ca85dc8bff08dcd6d2d", size = 5048054, upload-time = "2026-04-18T04:33:16.965Z" }, + { url = "https://files.pythonhosted.org/packages/2e/b7/85057012f035d1a0c87e02f8c723ca3c3e6e0728bcf4cb62080b21b1c1e3/lxml-6.1.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1ae225f66e5938f4fa29d37e009a3bb3b13032ac57eb4eb42afa44f6e4054e69", size = 4777324, upload-time = "2026-04-18T04:33:19.832Z" }, + { url = "https://files.pythonhosted.org/packages/75/6c/ad2f94a91073ef570f33718040e8e160d5fb93331cf1ab3ca1323f939e2d/lxml-6.1.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:690022c7fae793b0489aa68a658822cea83e0d5933781811cabbf5ea3bcfe73d", size = 5645702, upload-time = "2026-04-18T04:33:22.436Z" }, + { url = "https://files.pythonhosted.org/packages/3b/89/0bb6c0bd549c19004c60eea9dc554dd78fd647b72314ef25d460e0d208c6/lxml-6.1.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:63aeafc26aac0be8aff14af7871249e87ea1319be92090bfd632ec68e03b16a5", size = 5232901, upload-time = "2026-04-18T04:33:26.21Z" }, + { url = "https://files.pythonhosted.org/packages/a1/d9/d609a11fb567da9399f525193e2b49847b5a409cdebe737f06a8b7126bdc/lxml-6.1.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:264c605ab9c0e4aa1a679636f4582c4d3313700009fac3ec9c3412ed0d8f3e1d", size = 5261333, upload-time = "2026-04-18T04:33:28.984Z" }, + { url = "https://files.pythonhosted.org/packages/a6/3a/ac3f99ec8ac93089e7dd556f279e0d14c24de0a74a507e143a2e4b496e7c/lxml-6.1.0-cp312-cp312-win32.whl", hash = "sha256:56971379bc5ee8037c5a0f09fa88f66cdb7d37c3e38af3e45cf539f41131ac1f", size = 3596289, upload-time = "2026-04-18T04:27:42.819Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a7/0a915557538593cb1bbeedcd40e13c7a261822c26fecbbdb71dad0c2f540/lxml-6.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:bba078de0031c219e5dd06cf3e6bf8fb8e6e64a77819b358f53bb132e3e03366", size = 3997059, upload-time = "2026-04-18T04:27:46.764Z" }, + { url = "https://files.pythonhosted.org/packages/92/96/a5dc078cf0126fbfbc35611d77ecd5da80054b5893e28fb213a5613b9e1d/lxml-6.1.0-cp312-cp312-win_arm64.whl", hash = "sha256:c3592631e652afa34999a088f98ba7dfc7d6aff0d535c410bea77a71743f3819", size = 3659552, upload-time = "2026-04-18T04:27:51.133Z" }, ] [[package]] @@ -5357,19 +5357,19 @@ wheels = [ [[package]] name = "pyrefly" -version = "0.61.1" +version = "0.62.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/b0/c8/52fce3f0e3718d9ff71d16af41cef925e58613741328004d3aa3fe585057/pyrefly-0.61.1.tar.gz", hash = "sha256:2a871320b7d2b28b8635064b620097d7091e84c49e4808d915ad31dad685d0f5", size = 5535788, upload-time = "2026-04-17T18:47:33.958Z" } +sdist = { url = "https://files.pythonhosted.org/packages/bb/ad/8874ed25781e7dd561c6d75fb4a7becf10a18d75b074f25b845cc334f781/pyrefly-0.62.0.tar.gz", hash = "sha256:da1fbe1075dc1e6c8e3134e9370b0a0e7a296061d782cca5bf83dbb8e4c10d7c", size = 5537672, upload-time = "2026-04-20T17:12:15.718Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/38/e94ff401405a05fbf81c9bbfa993a34ffd03be84812b545063c8efb56b44/pyrefly-0.61.1-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:6e3ed857b99291fc4aa3b54ce22deb086c0174cf3a3775eccea7439efd16d925", size = 12969301, upload-time = "2026-04-17T18:47:06.036Z" }, - { url = "https://files.pythonhosted.org/packages/f3/be/53c7f9400696e46633c8cee8b6fd32ce7ab4a965ddf9ac4f4ea9e2034647/pyrefly-0.61.1-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cf6335c1baf9470ca8113f7ea8bdbd0b96081c82a911157c576cdfc8a67a9a87", size = 12475413, upload-time = "2026-04-17T18:47:08.863Z" }, - { url = "https://files.pythonhosted.org/packages/77/68/83cc3267620b14f81fa596a84efc7ebcf5c49f79b521499e85d1a4fca6d8/pyrefly-0.61.1-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:844b5baddc2a631f69648a4756c54c97d86e4b9c07e335b216668e24390b77b6", size = 36074785, upload-time = "2026-04-17T18:47:11.845Z" }, - { url = "https://files.pythonhosted.org/packages/d8/00/e8d437995b8dcea022f5310bc873f5de1dcc71da4876d5be917ee9a93fef/pyrefly-0.61.1-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eaa294f90622c5b3743af8e9de4263447f22bb0e8b60c80cf83292adb4f2d14b", size = 38802979, upload-time = "2026-04-17T18:47:16.058Z" }, - { url = "https://files.pythonhosted.org/packages/16/3f/f1cbc58e8875608ae740d9575de95c8bc6d4dce202f82b4fe90005727618/pyrefly-0.61.1-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a8d8c3fe08b9593dce23ad4bc7c393891a379c2d580aa1f263182567721bd6f", size = 37029339, upload-time = "2026-04-17T18:47:19.601Z" }, - { url = "https://files.pythonhosted.org/packages/18/8c/0ff67041c88c28f48b10ce15758831d1e4e60f11db5bfc09dcffd5edb6ba/pyrefly-0.61.1-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:305f2086f4d7d796244b337884d96cf0d32435420336a77840ca369cf6fa06fd", size = 41595667, upload-time = "2026-04-17T18:47:23.122Z" }, - { url = "https://files.pythonhosted.org/packages/ff/9e/62b8139b140931593a6b29334802ea6b86d033c0bfd9794950279732253b/pyrefly-0.61.1-py3-none-win32.whl", hash = "sha256:3271a019885a72c8dd064e928bb445af807771506842f5f2faaac17d8e6e73a5", size = 11963660, upload-time = "2026-04-17T18:47:25.86Z" }, - { url = "https://files.pythonhosted.org/packages/38/6e/73280243d12bec28f55b6edd4e70c5cf11e3d7de2395ecb4eb36cca7dab4/pyrefly-0.61.1-py3-none-win_amd64.whl", hash = "sha256:3e3763d5d76f505c5b8897db1446bde8e138d50a67751f2aa76d6c6034254836", size = 12804056, upload-time = "2026-04-17T18:47:28.674Z" }, - { url = "https://files.pythonhosted.org/packages/87/32/38ac5af84d96167412024abf5e2f49f15b777987a1942e7a442e8e5fef82/pyrefly-0.61.1-py3-none-win_arm64.whl", hash = "sha256:cef5631e2ab09702274ec2eaaafee28a114891cf85f2d31568b329727e3ff735", size = 12302467, upload-time = "2026-04-17T18:47:31.409Z" }, + { url = "https://files.pythonhosted.org/packages/1b/ea/09bd9da7d5df294db800312fb415be2fefbaa5594178e9e49f44fa071aea/pyrefly-0.62.0-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:9d78ec4f126dee1fa76215b193b964490ce10e62a32d2787a72c51623658b803", size = 13020414, upload-time = "2026-04-20T17:11:43.617Z" }, + { url = "https://files.pythonhosted.org/packages/4b/f0/f84afac4f220c4c8c801b779ee2ff28ad3f7731f4283c2e1b6ee9012e8c2/pyrefly-0.62.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:2a41a34902d20756264486f9e309f22633d100261bd960feea6e858a098d985d", size = 12515659, upload-time = "2026-04-20T17:11:46.59Z" }, + { url = "https://files.pythonhosted.org/packages/40/0b/620c39cefa9ae1b25ee7a2da9d8d3c278b095649cb8435c5e01ea64f7c17/pyrefly-0.62.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4666c6b65aea662e5f77b64dc91c091b7ea5cede6aa66c0f4cbae26480403583", size = 36228332, upload-time = "2026-04-20T17:11:50.523Z" }, + { url = "https://files.pythonhosted.org/packages/2d/fb/47b8b76438c12761e509a3666cd5a99d4af7f21976ba8385feb475cbfe30/pyrefly-0.62.0-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1aefab798f47d37c13ded791192fee9b39a6d2b12e31f38ae06a1f80c4b26e22", size = 38995741, upload-time = "2026-04-20T17:11:54.702Z" }, + { url = "https://files.pythonhosted.org/packages/55/d2/03bd17673f61147cd5609cd7d6a1455eeccc17a07a7e141ed9931b0c42c0/pyrefly-0.62.0-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8fa986b50d56740da1d7ae7c660a505143cb9d286fa98cc7e5f4a759cc6eaa5d", size = 37205321, upload-time = "2026-04-20T17:11:58.9Z" }, + { url = "https://files.pythonhosted.org/packages/75/14/20ba7b7f2d182f9b7c1e24a3041dac9b5730ae28cfe1614a2c98706650f2/pyrefly-0.62.0-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:32e9b175805c82ffb967e4708f4910bace7e1a12736907380cc9afdbaabb0efb", size = 41786834, upload-time = "2026-04-20T17:12:03.221Z" }, + { url = "https://files.pythonhosted.org/packages/fa/c8/5a7ba88c4fa1b5090d877f70fa1b742b921b9e7d8d3f4b6b9b1ba1820850/pyrefly-0.62.0-py3-none-win32.whl", hash = "sha256:1cd98edc20cab5bac8016c9220ee66080e39bd22e7f0e9bb3e2c4e2be1555eed", size = 12010170, upload-time = "2026-04-20T17:12:06.791Z" }, + { url = "https://files.pythonhosted.org/packages/2e/78/d8f810de010ff2ed594c630c724fd817ef430963249e9eb396ce8f785e9d/pyrefly-0.62.0-py3-none-win_amd64.whl", hash = "sha256:6994f8ee7d6720325ee52207fbdaca98a799a1efe462bb5ba90c47160f7f3e6e", size = 12861816, upload-time = "2026-04-20T17:12:09.689Z" }, + { url = "https://files.pythonhosted.org/packages/c7/a9/ac824ef6a3f50b7c0ec5974471f8f2cb205cd1edd53a5abbcf7ba37feb5d/pyrefly-0.62.0-py3-none-win_arm64.whl", hash = "sha256:362a5d47a5ac5aaa5258091e878a1759ff8b687d8cf462af1c516144f7b0108a", size = 12352977, upload-time = "2026-04-20T17:12:12.736Z" }, ] [[package]] diff --git a/docker/.env.example b/docker/.env.example index ec7d572057..29741474fa 100644 --- a/docker/.env.example +++ b/docker/.env.example @@ -1467,6 +1467,11 @@ ENDPOINT_URL_TEMPLATE=http://localhost/e/{hook_id} MARKETPLACE_ENABLED=true MARKETPLACE_API_URL=https://marketplace.dify.ai +# Creators Platform configuration +CREATORS_PLATFORM_FEATURES_ENABLED=true +CREATORS_PLATFORM_API_URL=https://creators.dify.ai +CREATORS_PLATFORM_OAUTH_CLIENT_ID= + FORCE_VERIFYING_SIGNATURE=true ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES=true diff --git a/docker/docker-compose.yaml b/docker/docker-compose.yaml index aaf099453a..60ba510f44 100644 --- a/docker/docker-compose.yaml +++ b/docker/docker-compose.yaml @@ -629,6 +629,9 @@ x-shared-env: &shared-api-worker-env ENDPOINT_URL_TEMPLATE: ${ENDPOINT_URL_TEMPLATE:-http://localhost/e/{hook_id}} MARKETPLACE_ENABLED: ${MARKETPLACE_ENABLED:-true} MARKETPLACE_API_URL: ${MARKETPLACE_API_URL:-https://marketplace.dify.ai} + CREATORS_PLATFORM_FEATURES_ENABLED: ${CREATORS_PLATFORM_FEATURES_ENABLED:-true} + CREATORS_PLATFORM_API_URL: ${CREATORS_PLATFORM_API_URL:-https://creators.dify.ai} + CREATORS_PLATFORM_OAUTH_CLIENT_ID: ${CREATORS_PLATFORM_OAUTH_CLIENT_ID:-} FORCE_VERIFYING_SIGNATURE: ${FORCE_VERIFYING_SIGNATURE:-true} ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES: ${ENFORCE_LANGGENIUS_PLUGIN_SIGNATURES:-true} PLUGIN_STDIO_BUFFER_SIZE: ${PLUGIN_STDIO_BUFFER_SIZE:-1024} diff --git a/docs/suggested-questions-configuration.md b/docs/suggested-questions-configuration.md deleted file mode 100644 index c726d3b157..0000000000 --- a/docs/suggested-questions-configuration.md +++ /dev/null @@ -1,253 +0,0 @@ -# Configurable Suggested Questions After Answer - -This document explains how to configure the "Suggested Questions After Answer" feature in Dify using environment variables. - -## Overview - -The suggested questions feature generates follow-up questions after each AI response to help users continue the conversation. By default, Dify generates 3 short questions (under 20 characters each), but you can customize this behavior to better fit your specific use case. - -## Environment Variables - -### `SUGGESTED_QUESTIONS_PROMPT` - -**Description**: Custom prompt template for generating suggested questions. - -**Default**: - -``` -Please help me predict the three most likely questions that human would ask, and keep each question under 20 characters. -MAKE SURE your output is the SAME language as the Assistant's latest response. -The output must be an array in JSON format following the specified schema: -["question1","question2","question3"] -``` - -**Usage Examples**: - -1. **Technical/Developer Questions (Your Use Case)**: - - ```bash - export SUGGESTED_QUESTIONS_PROMPT='Please help me predict the five most likely technical follow-up questions a developer would ask. Focus on implementation details, best practices, and architecture considerations. Keep each question between 40-60 characters. Output must be JSON array: ["question1","question2","question3","question4","question5"]' - ``` - -1. **Customer Support**: - - ```bash - export SUGGESTED_QUESTIONS_PROMPT='Generate 3 helpful follow-up questions that guide customers toward solving their own problems. Focus on troubleshooting steps and common issues. Keep questions under 30 characters. JSON format: ["q1","q2","q3"]' - ``` - -1. **Educational Content**: - - ```bash - export SUGGESTED_QUESTIONS_PROMPT='Create 4 thought-provoking questions that help students deeper understand the topic. Focus on concepts, relationships, and applications. Questions should be 25-40 characters. JSON: ["question1","question2","question3","question4"]' - ``` - -1. **Multilingual Support**: - - ```bash - export SUGGESTED_QUESTIONS_PROMPT='Generate exactly 3 follow-up questions in the same language as the conversation. Adapt question length appropriately for the language (Chinese: 10-15 chars, English: 20-30 chars, Arabic: 25-35 chars). Always output valid JSON array.' - ``` - -**Important Notes**: - -- The prompt must request JSON array output format -- Include language matching instructions for multilingual support -- Specify clear character limits or question count requirements -- Focus on your specific domain or use case - -### `SUGGESTED_QUESTIONS_MAX_TOKENS` - -**Description**: Maximum number of tokens for the LLM response. - -**Default**: `256` - -**Usage**: - -```bash -export SUGGESTED_QUESTIONS_MAX_TOKENS=512 # For longer questions or more questions -``` - -**Recommended Values**: - -- `256`: Default, good for 3-4 short questions -- `384`: Medium, good for 4-5 medium-length questions -- `512`: High, good for 5+ longer questions or complex prompts -- `1024`: Maximum, for very complex question generation - -### `SUGGESTED_QUESTIONS_TEMPERATURE` - -**Description**: Temperature parameter for LLM creativity. - -**Default**: `0.0` - -**Usage**: - -```bash -export SUGGESTED_QUESTIONS_TEMPERATURE=0.3 # Balanced creativity -``` - -**Recommended Values**: - -- `0.0-0.2`: Very focused, predictable questions (good for technical support) -- `0.3-0.5`: Balanced creativity and relevance (good for general use) -- `0.6-0.8`: More creative, diverse questions (good for brainstorming) -- `0.9-1.0`: Maximum creativity (good for educational exploration) - -## Configuration Examples - -### Example 1: Developer Documentation Chatbot - -```bash -# .env file -SUGGESTED_QUESTIONS_PROMPT='Generate exactly 5 technical follow-up questions that developers would ask after reading code documentation. Focus on implementation details, edge cases, performance considerations, and best practices. Each question should be 40-60 characters long. Output as JSON array: ["question1","question2","question3","question4","question5"]' -SUGGESTED_QUESTIONS_MAX_TOKENS=512 -SUGGESTED_QUESTIONS_TEMPERATURE=0.3 -``` - -### Example 2: Customer Service Bot - -```bash -# .env file -SUGGESTED_QUESTIONS_PROMPT='Create 3 actionable follow-up questions that help customers resolve their own issues. Focus on common problems, troubleshooting steps, and product features. Keep questions simple and under 25 characters. JSON: ["q1","q2","q3"]' -SUGGESTED_QUESTIONS_MAX_TOKENS=256 -SUGGESTED_QUESTIONS_TEMPERATURE=0.1 -``` - -### Example 3: Educational Tutor - -```bash -# .env file -SUGGESTED_QUESTIONS_PROMPT='Generate 4 thought-provoking questions that help students deepen their understanding of the topic. Focus on relationships between concepts, practical applications, and critical thinking. Questions should be 30-45 characters. Output: ["question1","question2","question3","question4"]' -SUGGESTED_QUESTIONS_MAX_TOKENS=384 -SUGGESTED_QUESTIONS_TEMPERATURE=0.6 -``` - -## Implementation Details - -### How It Works - -1. **Environment Variable Loading**: The system checks for environment variables at startup -1. **Fallback to Defaults**: If no environment variables are set, original behavior is preserved -1. **Prompt Template**: The custom prompt is used as-is, allowing full control over question generation -1. **LLM Parameters**: Custom max_tokens and temperature are passed to the LLM API -1. **JSON Parsing**: The system expects JSON array output and parses it accordingly - -### File Changes - -The implementation modifies these files: - -- `api/core/llm_generator/prompts.py`: Environment variable support -- `api/core/llm_generator/llm_generator.py`: Custom LLM parameters -- `api/.env.example`: Documentation of new variables - -### Backward Compatibility - -- ✅ **Zero Breaking Changes**: Works exactly as before if no environment variables are set -- ✅ **Default Behavior Preserved**: Original prompt and parameters used as fallbacks -- ✅ **No Database Changes**: Pure environment variable configuration -- ✅ **No UI Changes Required**: Configuration happens at deployment level - -## Testing Your Configuration - -### Local Testing - -1. Set environment variables: - - ```bash - export SUGGESTED_QUESTIONS_PROMPT='Your test prompt...' - export SUGGESTED_QUESTIONS_MAX_TOKENS=300 - export SUGGESTED_QUESTIONS_TEMPERATURE=0.4 - ``` - -1. Start Dify API: - - ```bash - cd api - python -m flask run --host 0.0.0.0 --port=5001 --debug - ``` - -1. Test the feature in your chat application and verify the questions match your expectations. - -### Monitoring - -Monitor the following when testing: - -- **Question Quality**: Are questions relevant and helpful? -- **Language Matching**: Do questions match the conversation language? -- **JSON Format**: Is output properly formatted as JSON array? -- **Length Constraints**: Do questions follow your length requirements? -- **Response Time**: Are the custom parameters affecting performance? - -## Troubleshooting - -### Common Issues - -1. **Invalid JSON Output**: - - - **Problem**: LLM doesn't return valid JSON - - **Solution**: Make sure your prompt explicitly requests JSON array format - -1. **Questions Too Long/Short**: - - - **Problem**: Questions don't follow length constraints - - **Solution**: Be more specific about character limits in your prompt - -1. **Too Few/Many Questions**: - - - **Problem**: Wrong number of questions generated - - **Solution**: Clearly specify the exact number in your prompt - -1. **Language Mismatch**: - - - **Problem**: Questions in wrong language - - **Solution**: Include explicit language matching instructions in prompt - -1. **Performance Issues**: - - - **Problem**: Slow response times - - **Solution**: Reduce `SUGGESTED_QUESTIONS_MAX_TOKENS` or simplify prompt - -### Debug Logging - -To debug your configuration, you can temporarily add logging to see the actual prompt and parameters being used: - -```python -import logging -logger = logging.getLogger(__name__) - -# In llm_generator.py -logger.info(f"Suggested questions prompt: {prompt}") -logger.info(f"Max tokens: {SUGGESTED_QUESTIONS_MAX_TOKENS}") -logger.info(f"Temperature: {SUGGESTED_QUESTIONS_TEMPERATURE}") -``` - -## Migration Guide - -### From Default Configuration - -If you're currently using the default configuration and want to customize: - -1. **Assess Your Needs**: Determine what aspects need customization (question count, length, domain focus) -1. **Design Your Prompt**: Write a custom prompt that addresses your specific use case -1. **Choose Parameters**: Select appropriate max_tokens and temperature values -1. **Test Incrementally**: Start with small changes and test thoroughly -1. **Deploy Gradually**: Roll out to production after successful testing - -### Best Practices - -1. **Start Simple**: Begin with minimal changes to the default prompt -1. **Test Thoroughly**: Test with various conversation types and languages -1. **Monitor Performance**: Watch for impact on response times and costs -1. **Get User Feedback**: Collect feedback on question quality and relevance -1. **Iterate**: Refine your configuration based on real-world usage - -## Future Enhancements - -This environment variable approach provides immediate customization while maintaining backward compatibility. Future enhancements could include: - -1. **App-Level Configuration**: Different apps with different suggested question settings -1. **Dynamic Prompts**: Context-aware prompts based on conversation content -1. **Multi-Model Support**: Different models for different types of questions -1. **Analytics Dashboard**: Insights into question effectiveness and usage patterns -1. **A/B Testing**: Built-in testing of different prompt configurations - -For now, the environment variable approach offers a simple, reliable way to customize the suggested questions feature for your specific needs. diff --git a/e2e/features/apps/app-detail-navigation.feature b/e2e/features/apps/app-detail-navigation.feature new file mode 100644 index 0000000000..7ac32039ec --- /dev/null +++ b/e2e/features/apps/app-detail-navigation.feature @@ -0,0 +1,26 @@ +@apps @authenticated @core +Feature: App detail navigation + + Scenario: Opening a workflow app navigates to the workflow editor + Given I am signed in as the default E2E admin + And a "workflow" app has been created via API + When I open the app from the app list + Then I should land on the workflow editor + + Scenario: Opening a chatbot app navigates to the configuration page + Given I am signed in as the default E2E admin + And a "chat" app has been created via API + When I open the app from the app list + Then I should land on the app configuration page + + Scenario: The develop tab is accessible from a workflow app + Given I am signed in as the default E2E admin + And a "workflow" app has been created via API + When I navigate to the app develop page + Then I should be on the app develop page + + Scenario: The overview tab is accessible from a workflow app + Given I am signed in as the default E2E admin + And a "workflow" app has been created via API + When I navigate to the app overview page + Then I should be on the app overview page diff --git a/e2e/features/apps/create-app.feature b/e2e/features/apps/create-app.feature index c0ca8ea4e0..d980bb9eb9 100644 --- a/e2e/features/apps/create-app.feature +++ b/e2e/features/apps/create-app.feature @@ -1,4 +1,4 @@ -@apps @authenticated +@apps @authenticated @core Feature: Create app Scenario: Create a new blank app and redirect to the editor Given I am signed in as the default E2E admin diff --git a/e2e/features/apps/create-chatbot-app.feature b/e2e/features/apps/create-chatbot-app.feature index 4f506e4f40..45f66aaa52 100644 --- a/e2e/features/apps/create-chatbot-app.feature +++ b/e2e/features/apps/create-chatbot-app.feature @@ -1,4 +1,4 @@ -@apps @authenticated +@apps @authenticated @core @mode-matrix Feature: Create Chatbot app Scenario: Create a new Chatbot app and redirect to the configuration page Given I am signed in as the default E2E admin diff --git a/e2e/features/apps/create-workflow-app.feature b/e2e/features/apps/create-workflow-app.feature index b88d94d899..2c11cf7a7a 100644 --- a/e2e/features/apps/create-workflow-app.feature +++ b/e2e/features/apps/create-workflow-app.feature @@ -1,4 +1,4 @@ -@apps @authenticated +@apps @authenticated @core @mode-matrix Feature: Create Workflow app Scenario: Create a new Workflow app and redirect to the workflow editor Given I am signed in as the default E2E admin diff --git a/e2e/features/apps/publish-app.feature b/e2e/features/apps/publish-app.feature new file mode 100644 index 0000000000..2d002d3cb7 --- /dev/null +++ b/e2e/features/apps/publish-app.feature @@ -0,0 +1,11 @@ +@apps @authenticated @core +Feature: Publish app + + Scenario: Publish a workflow app for the first time + Given I am signed in as the default E2E admin + And a "workflow" app has been created via API + And a minimal workflow draft has been synced + When I open the app from the app list + And I open the publish panel + And I publish the app + Then the app should be marked as published diff --git a/e2e/features/auth/sign-in.feature b/e2e/features/auth/sign-in.feature new file mode 100644 index 0000000000..a9a1e13626 --- /dev/null +++ b/e2e/features/auth/sign-in.feature @@ -0,0 +1,8 @@ +@auth @smoke @core @unauthenticated +Feature: Sign in + + Scenario: Sign in with valid credentials and reach the apps console + Given I am not signed in + When I open the sign-in page + And I sign in as the default E2E admin + Then I should be on the apps console diff --git a/e2e/features/auth/sign-out.feature b/e2e/features/auth/sign-out.feature index 9112f1220a..4446beaf76 100644 --- a/e2e/features/auth/sign-out.feature +++ b/e2e/features/auth/sign-out.feature @@ -1,4 +1,4 @@ -@auth @authenticated +@auth @authenticated @core Feature: Sign out Scenario: Sign out from the apps console Given I am signed in as the default E2E admin diff --git a/e2e/features/step-definitions/apps/app-detail-navigation.steps.ts b/e2e/features/step-definitions/apps/app-detail-navigation.steps.ts new file mode 100644 index 0000000000..c7f30b3e1b --- /dev/null +++ b/e2e/features/step-definitions/apps/app-detail-navigation.steps.ts @@ -0,0 +1,21 @@ +import type { DifyWorld } from '../../support/world' +import { Then, When } from '@cucumber/cucumber' +import { expect } from '@playwright/test' + +When('I navigate to the app develop page', async function (this: DifyWorld) { + const appId = this.createdAppIds.at(-1) + await this.getPage().goto(`/app/${appId}/develop`) +}) + +When('I navigate to the app overview page', async function (this: DifyWorld) { + const appId = this.createdAppIds.at(-1) + await this.getPage().goto(`/app/${appId}/overview`) +}) + +Then('I should be on the app develop page', async function (this: DifyWorld) { + await expect(this.getPage()).toHaveURL(/\/app\/[^/]+\/develop(?:\?.*)?$/, { timeout: 30_000 }) +}) + +Then('I should be on the app overview page', async function (this: DifyWorld) { + await expect(this.getPage()).toHaveURL(/\/app\/[^/]+\/overview(?:\?.*)?$/, { timeout: 30_000 }) +}) diff --git a/e2e/features/step-definitions/apps/publish-app.steps.ts b/e2e/features/step-definitions/apps/publish-app.steps.ts new file mode 100644 index 0000000000..de4f5ee63f --- /dev/null +++ b/e2e/features/step-definitions/apps/publish-app.steps.ts @@ -0,0 +1,15 @@ +import type { DifyWorld } from '../../support/world' +import { Then, When } from '@cucumber/cucumber' +import { expect } from '@playwright/test' + +When('I open the publish panel', async function (this: DifyWorld) { + await this.getPage().getByRole('button', { name: 'Publish' }).first().click() +}) + +When('I publish the app', async function (this: DifyWorld) { + await this.getPage().getByRole('button', { name: /Publish Update/ }).click() +}) + +Then('the app should be marked as published', async function (this: DifyWorld) { + await expect(this.getPage().getByRole('button', { name: 'Published' })).toBeVisible({ timeout: 30_000 }) +}) diff --git a/e2e/features/step-definitions/auth/sign-in.steps.ts b/e2e/features/step-definitions/auth/sign-in.steps.ts new file mode 100644 index 0000000000..8f9e8e765c --- /dev/null +++ b/e2e/features/step-definitions/auth/sign-in.steps.ts @@ -0,0 +1,20 @@ +import type { DifyWorld } from '../../support/world' +import { Then, When } from '@cucumber/cucumber' +import { expect } from '@playwright/test' +import { adminCredentials } from '../../../fixtures/auth' + +When('I open the sign-in page', async function (this: DifyWorld) { + await this.getPage().goto('/signin') +}) + +When('I sign in as the default E2E admin', async function (this: DifyWorld) { + const page = this.getPage() + + await page.getByLabel('Email address').fill(adminCredentials.email) + await page.getByLabel('Password').fill(adminCredentials.password) + await page.getByRole('button', { name: 'Sign in' }).click() +}) + +Then('I should be on the apps console', async function (this: DifyWorld) { + await expect(this.getPage()).toHaveURL(/\/apps(?:\?.*)?$/, { timeout: 30_000 }) +}) diff --git a/e2e/features/step-definitions/common/app.steps.ts b/e2e/features/step-definitions/common/app.steps.ts new file mode 100644 index 0000000000..93e808e3c5 --- /dev/null +++ b/e2e/features/step-definitions/common/app.steps.ts @@ -0,0 +1,22 @@ +import type { DifyWorld } from '../../support/world' +import { Given, When } from '@cucumber/cucumber' +import { expect } from '@playwright/test' +import { createTestApp, syncMinimalWorkflowDraft } from '../../../support/api' + +Given('a {string} app has been created via API', async function (this: DifyWorld, mode: string) { + const app = await createTestApp(`E2E ${Date.now()}`, mode) + this.createdAppIds.push(app.id) + this.lastCreatedAppName = app.name +}) + +Given('a minimal workflow draft has been synced', async function (this: DifyWorld) { + const appId = this.createdAppIds.at(-1)! + await syncMinimalWorkflowDraft(appId) +}) + +When('I open the app from the app list', async function (this: DifyWorld) { + const page = this.getPage() + await page.goto('/apps') + await expect(page.getByRole('button', { name: 'Create from Blank' })).toBeVisible() + await page.getByText(this.lastCreatedAppName!).click() +}) diff --git a/e2e/package.json b/e2e/package.json index 94fc857c0b..77d7db80f0 100644 --- a/e2e/package.json +++ b/e2e/package.json @@ -12,13 +12,14 @@ "e2e:middleware:down": "tsx ./scripts/setup.ts middleware-down", "e2e:middleware:up": "tsx ./scripts/setup.ts middleware-up", "e2e:reset": "tsx ./scripts/setup.ts reset", - "type-check": "tsc" + "type-check": "tsgo" }, "devDependencies": { "@cucumber/cucumber": "catalog:", "@dify/tsconfig": "workspace:*", "@playwright/test": "catalog:", "@types/node": "catalog:", + "@typescript/native-preview": "catalog:", "tsx": "catalog:", "typescript": "catalog:", "vite": "catalog:", diff --git a/e2e/support/api.ts b/e2e/support/api.ts index c6d6c98bde..7d9fd0264f 100644 --- a/e2e/support/api.ts +++ b/e2e/support/api.ts @@ -43,6 +43,34 @@ export async function createTestApp(name: string, mode = 'workflow'): Promise { + const ctx = await createApiContext() + try { + await ctx.post(`/console/api/apps/${appId}/workflows/draft`, { + data: { + graph: { + nodes: [ + { + id: '1', + type: 'custom', + position: { x: 80, y: 282 }, + data: { id: '1', type: 'start', title: 'Start', variables: [] }, + }, + ], + edges: [], + viewport: { x: 0, y: 0, zoom: 1 }, + }, + features: {}, + environment_variables: [], + conversation_variables: [], + }, + }) + } + finally { + await ctx.dispose() + } +} + export async function deleteTestApp(id: string): Promise { const ctx = await createApiContext() try { diff --git a/eslint-suppressions.json b/eslint-suppressions.json index 0e0970f90d..1bff82ac17 100644 --- a/eslint-suppressions.json +++ b/eslint-suppressions.json @@ -111,16 +111,6 @@ "count": 1 } }, - "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, - "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/svg-attribute-error-reproduction.spec.tsx": { "no-console": { "count": 19 @@ -134,11 +124,6 @@ "count": 1 } }, - "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-panel.tsx": { "ts/no-explicit-any": { "count": 1 @@ -534,11 +519,6 @@ "count": 1 } }, - "web/app/components/app/configuration/debug/chat-user-input.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/app/configuration/debug/debug-with-multiple-model/chat-item.tsx": { "ts/no-explicit-any": { "count": 6 @@ -584,7 +564,7 @@ }, "web/app/components/app/configuration/prompt-value-panel/index.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 } }, "web/app/components/app/configuration/prompt-value-panel/utils.ts": { @@ -681,7 +661,7 @@ }, "web/app/components/app/overview/settings/index.tsx": { "no-restricted-imports": { - "count": 3 + "count": 2 }, "react/set-state-in-effect": { "count": 3 @@ -920,9 +900,6 @@ } }, "web/app/components/base/chat/chat-with-history/inputs-form/content.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 3 } @@ -1036,9 +1013,6 @@ } }, "web/app/components/base/chat/embedded-chatbot/inputs-form/content.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 3 } @@ -1101,11 +1075,6 @@ "count": 1 } }, - "web/app/components/base/emoji-picker/Inner.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/base/emoji-picker/index.tsx": { "no-restricted-imports": { "count": 1 @@ -1175,11 +1144,6 @@ "count": 5 } }, - "web/app/components/base/features/new-feature-panel/moderation/form-generation.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/base/features/new-feature-panel/moderation/index.tsx": { "ts/no-explicit-any": { "count": 1 @@ -1195,7 +1159,7 @@ }, "web/app/components/base/features/new-feature-panel/text-to-speech/param-config-content.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 } }, "web/app/components/base/features/types.ts": { @@ -2438,11 +2402,6 @@ "count": 4 } }, - "web/app/components/datasets/documents/components/documents-header.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/components/operations.tsx": { "no-restricted-imports": { "count": 1 @@ -2576,11 +2535,6 @@ "count": 3 } }, - "web/app/components/datasets/documents/detail/completed/components/menu-bar.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/datasets/documents/detail/completed/components/segment-list-content.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 @@ -2596,11 +2550,6 @@ "count": 5 } }, - "web/app/components/datasets/documents/detail/completed/hooks/use-search-filter.ts": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/completed/index.tsx": { "no-barrel-files/no-barrel-files": { "count": 2 @@ -2617,11 +2566,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/completed/status-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/context.ts": { "ts/no-explicit-any": { "count": 1 @@ -2642,11 +2586,6 @@ "count": 1 } }, - "web/app/components/datasets/documents/detail/metadata/components/field-info.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/datasets/documents/detail/metadata/components/metadata-field-list.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 @@ -3034,11 +2973,6 @@ "count": 1 } }, - "web/app/components/header/account-setting/language-page/index.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/header/account-setting/members-page/invite-modal/index.tsx": { "react/set-state-in-effect": { "count": 3 @@ -3121,7 +3055,7 @@ }, "web/app/components/header/account-setting/model-provider-page/model-modal/Form.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 }, "ts/no-explicit-any": { "count": 6 @@ -3273,16 +3207,13 @@ }, "web/app/components/plugins/install-plugin/install-from-github/index.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 }, "ts/no-explicit-any": { "count": 3 } }, "web/app/components/plugins/install-plugin/install-from-github/steps/selectPackage.tsx": { - "no-restricted-imports": { - "count": 2 - }, "ts/no-explicit-any": { "count": 1 } @@ -3312,14 +3243,6 @@ "count": 2 } }, - "web/app/components/plugins/plugin-auth/authorize/api-key-modal.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 2 - } - }, "web/app/components/plugins/plugin-auth/authorize/index.tsx": { "no-restricted-imports": { "count": 1 @@ -3386,9 +3309,6 @@ } }, "web/app/components/plugins/plugin-detail-panel/app-selector/app-inputs-form.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 8 } @@ -3492,7 +3412,7 @@ "count": 3 }, "no-restricted-imports": { - "count": 3 + "count": 1 } }, "web/app/components/plugins/plugin-detail-panel/subscription-list/create/oauth-client.tsx": { @@ -3561,11 +3481,6 @@ "count": 7 } }, - "web/app/components/plugins/plugin-detail-panel/tool-selector/components/reasoning-config-form.tsx": { - "no-restricted-imports": { - "count": 2 - } - }, "web/app/components/plugins/plugin-detail-panel/tool-selector/components/schema-modal.tsx": { "no-restricted-imports": { "count": 1 @@ -3609,11 +3524,6 @@ "count": 1 } }, - "web/app/components/plugins/plugin-page/debug-info.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/plugins/plugin-page/empty/index.tsx": { "react/set-state-in-effect": { "count": 2 @@ -3867,9 +3777,6 @@ } }, "web/app/components/share/text-generation/run-once/index.tsx": { - "no-restricted-imports": { - "count": 1 - }, "react/set-state-in-effect": { "count": 1 }, @@ -4289,9 +4196,6 @@ } }, "web/app/components/workflow/nodes/_base/components/before-run-form/form-item.tsx": { - "no-restricted-imports": { - "count": 1 - }, "ts/no-explicit-any": { "count": 11 } @@ -4348,11 +4252,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/error-handle/error-handle-on-panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/error-handle/error-handle-type-selector.tsx": { "no-restricted-imports": { "count": 1 @@ -4371,24 +4270,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/form-input-item.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 4 - } - }, - "web/app/components/workflow/nodes/_base/components/form-input-type-switch.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/_base/components/help-link.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/input-support-select-var.tsx": { "no-restricted-imports": { "count": 1 @@ -4435,11 +4316,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/node-handle.tsx": { - "react/set-state-in-effect": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/option-card.tsx": { "no-restricted-imports": { "count": 1 @@ -4476,11 +4352,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/_base/components/variable/constant-field.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/_base/components/variable/match-schema-type.ts": { "ts/no-explicit-any": { "count": 8 @@ -4598,22 +4469,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/agent/components/model-bar.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-empty-object-type": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/agent/components/tool-icon.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "react/unsupported-syntax": { - "count": 1 - } - }, "web/app/components/workflow/nodes/agent/default.ts": { "ts/no-explicit-any": { "count": 3 @@ -4890,11 +4745,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/if-else/components/condition-list/condition-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/if-else/components/condition-list/condition-operator.tsx": { "no-restricted-imports": { "count": 1 @@ -4905,11 +4755,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/if-else/components/condition-wrap.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/if-else/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -4940,16 +4785,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/iteration/panel.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/iteration/use-config.ts": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/iteration/use-single-run-form-params.ts": { "ts/no-explicit-any": { "count": 6 @@ -4975,11 +4810,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/top-k-and-score-threshold.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/knowledge-base/components/retrieval-setting/type.ts": { "ts/no-explicit-any": { "count": 2 @@ -5052,17 +4882,6 @@ } }, "web/app/components/workflow/nodes/list-operator/components/filter-condition.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 1 - } - }, - "web/app/components/workflow/nodes/list-operator/components/sub-variable-picker.tsx": { - "no-restricted-imports": { - "count": 2 - }, "ts/no-explicit-any": { "count": 1 } @@ -5093,14 +4912,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/code-editor.tsx": { - "no-restricted-imports": { - "count": 1 - }, - "ts/no-explicit-any": { - "count": 4 - } - }, "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/index.tsx": { "no-restricted-imports": { "count": 1 @@ -5136,11 +4947,6 @@ "count": 2 } }, - "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/edit-card/actions.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/llm/components/json-schema-config-modal/visual-editor/edit-card/auto-width-input.tsx": { "react/set-state-in-effect": { "count": 1 @@ -5202,11 +5008,6 @@ "count": 1 } }, - "web/app/components/workflow/nodes/loop/components/condition-list/condition-item.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/condition-list/condition-operator.tsx": { "no-restricted-imports": { "count": 1 @@ -5217,31 +5018,16 @@ "count": 1 } }, - "web/app/components/workflow/nodes/loop/components/condition-wrap.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/loop-variables/form-item.tsx": { "ts/no-explicit-any": { "count": 3 } }, - "web/app/components/workflow/nodes/loop/components/loop-variables/input-mode-selec.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/components/loop-variables/item.tsx": { "ts/no-explicit-any": { "count": 4 } }, - "web/app/components/workflow/nodes/loop/components/loop-variables/variable-type-select.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/loop/default.ts": { "ts/no-explicit-any": { "count": 1 @@ -5277,7 +5063,7 @@ }, "web/app/components/workflow/nodes/parameter-extractor/components/extract-parameter/update.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 }, "ts/no-explicit-any": { "count": 1 @@ -5382,11 +5168,6 @@ "count": 5 } }, - "web/app/components/workflow/nodes/tool/components/copy-id.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/tool/components/input-var-list.tsx": { "ts/no-explicit-any": { "count": 7 @@ -5494,11 +5275,6 @@ "count": 7 } }, - "web/app/components/workflow/nodes/trigger-schedule/components/frequency-selector.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/trigger-schedule/components/monthly-days-selector.tsx": { "no-restricted-imports": { "count": 1 @@ -5512,11 +5288,6 @@ "count": 10 } }, - "web/app/components/workflow/nodes/trigger-webhook/components/generic-table.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/nodes/trigger-webhook/components/parameter-table.tsx": { "ts/no-non-null-asserted-optional-chain": { "count": 1 @@ -5529,7 +5300,7 @@ }, "web/app/components/workflow/nodes/trigger-webhook/panel.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 } }, "web/app/components/workflow/nodes/utils.ts": { @@ -5562,11 +5333,6 @@ "count": 1 } }, - "web/app/components/workflow/note-node/note-editor/toolbar/command.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/components/workflow/note-node/note-editor/utils.ts": { "regexp/no-useless-quantifier": { "count": 1 @@ -6028,11 +5794,6 @@ "count": 1 } }, - "web/app/signin/invite-settings/page.tsx": { - "no-restricted-imports": { - "count": 1 - } - }, "web/app/signin/layout.tsx": { "ts/no-explicit-any": { "count": 1 @@ -6040,7 +5801,7 @@ }, "web/app/signin/one-more-step.tsx": { "no-restricted-imports": { - "count": 2 + "count": 1 }, "ts/no-explicit-any": { "count": 1 diff --git a/packages/dify-ui/README.md b/packages/dify-ui/README.md index 5e4e439e5f..cd9485c400 100644 --- a/packages/dify-ui/README.md +++ b/packages/dify-ui/README.md @@ -88,7 +88,23 @@ See `[web/docs/overlay-migration.md](../../web/docs/overlay-migration.md)` for t - `pnpm -C packages/dify-ui test` — Vitest unit tests for primitives. - `pnpm -C packages/dify-ui storybook` — Storybook on the default port. Each primitive has `index.stories.tsx`. -- `pnpm -C packages/dify-ui type-check` — `tsc --noEmit` for this package only. +- `pnpm -C packages/dify-ui type-check` — `tsgo --noEmit` for this package only. + +### Disabling Animations In Tests + +Base UI can wait for `element.getAnimations()` to finish before it unmounts overlays, panels, and transition-driven components. Browser-based test runners can make that timing unstable, especially when tests assert final DOM state rather than animation behavior. + +Set the Base UI test flag in a Vitest setup file to skip those waits: + +```ts +( + globalThis as typeof globalThis & { + BASE_UI_ANIMATIONS_DISABLED: boolean + } +).BASE_UI_ANIMATIONS_DISABLED = true +``` + +`packages/dify-ui/vitest.setup.ts` already applies this for primitive tests. See `[AGENTS.md](./AGENTS.md)` for: diff --git a/packages/dify-ui/package.json b/packages/dify-ui/package.json index 408ba2c432..483db46986 100644 --- a/packages/dify-ui/package.json +++ b/packages/dify-ui/package.json @@ -83,7 +83,7 @@ "storybook:build": "storybook build", "test": "vp test", "test:watch": "vp test --watch", - "type-check": "tsc" + "type-check": "tsgo" }, "peerDependencies": { "@base-ui/react": "catalog:", @@ -109,6 +109,7 @@ "@tailwindcss/vite": "catalog:", "@types/react": "catalog:", "@types/react-dom": "catalog:", + "@typescript/native-preview": "catalog:", "@vitejs/plugin-react": "catalog:", "@vitest/coverage-v8": "catalog:", "class-variance-authority": "catalog:", diff --git a/packages/dify-ui/src/toast/__tests__/index.spec.tsx b/packages/dify-ui/src/toast/__tests__/index.spec.tsx index edbdacd203..51fccf70d8 100644 --- a/packages/dify-ui/src/toast/__tests__/index.spec.tsx +++ b/packages/dify-ui/src/toast/__tests__/index.spec.tsx @@ -3,19 +3,20 @@ import { toast, ToastHost } from '../index' const asHTMLElement = (element: HTMLElement | SVGElement) => element as HTMLElement -declare global { - // eslint-disable-next-line vars-on-top - var BASE_UI_ANIMATIONS_DISABLED: boolean | undefined +const dispatchToastMouseOver = (element: HTMLElement | SVGElement) => { + element.dispatchEvent(new MouseEvent('mouseover', { + bubbles: true, + })) +} + +const dispatchToastMouseOut = (element: HTMLElement | SVGElement) => { + element.dispatchEvent(new MouseEvent('mouseout', { + bubbles: true, + relatedTarget: document.body, + })) } describe('@langgenius/dify-ui/toast', () => { - beforeAll(() => { - // Base UI waits for `requestAnimationFrame` + `getAnimations().finished` - // before unmounting a toast. Fake timers can't reliably drive that path, - // so short-circuit it to keep auto-dismiss assertions deterministic in CI. - globalThis.BASE_UI_ANIMATIONS_DISABLED = true - }) - beforeEach(() => { vi.clearAllMocks() vi.useFakeTimers() @@ -28,10 +29,6 @@ describe('@langgenius/dify-ui/toast', () => { vi.useRealTimers() }) - afterAll(() => { - globalThis.BASE_UI_ANIMATIONS_DISABLED = undefined - }) - it('should render a success toast when called through the typed shortcut', async () => { const screen = await render() @@ -62,13 +59,13 @@ describe('@langgenius/dify-ui/toast', () => { expect(document.body.querySelectorAll('[role="dialog"]')).toHaveLength(3) expect(document.body.querySelectorAll('button[aria-label="Close notification"][aria-hidden="true"]')).toHaveLength(3) - screen.getByRole('region', { name: 'Notifications' }).element().dispatchEvent(new MouseEvent('mouseover', { - bubbles: true, - })) + const viewport = screen.getByRole('region', { name: 'Notifications' }).element() + dispatchToastMouseOver(viewport) await vi.waitFor(() => { expect(document.body.querySelector('button[aria-label="Close notification"][aria-hidden="true"]')).not.toBeInTheDocument() }) + dispatchToastMouseOut(viewport) }) it('should render a neutral toast when called directly', async () => { @@ -115,11 +112,11 @@ describe('@langgenius/dify-ui/toast', () => { onClose, }) - screen.getByRole('region', { name: 'Notifications' }).element().dispatchEvent(new MouseEvent('mouseover', { - bubbles: true, - })) + const viewport = screen.getByRole('region', { name: 'Notifications' }).element() + dispatchToastMouseOver(viewport) await expect.element(screen.getByRole('button', { name: 'Close notification' })).toBeInTheDocument() + dispatchToastMouseOut(viewport) asHTMLElement(screen.getByRole('button', { name: 'Close notification' }).element()).click() await vi.waitFor(() => { @@ -128,21 +125,6 @@ describe('@langgenius/dify-ui/toast', () => { expect(onClose).toHaveBeenCalledTimes(1) }) - it('should auto dismiss toasts with the Base UI default timeout', async () => { - const screen = await render() - - toast('Default timeout') - await expect.element(screen.getByText('Default timeout')).toBeInTheDocument() - - await vi.advanceTimersByTimeAsync(4999) - expect(document.body).toHaveTextContent('Default timeout') - - await vi.advanceTimersByTimeAsync(1) - await vi.waitFor(() => { - expect(document.body).not.toHaveTextContent('Default timeout') - }) - }) - it('should respect the host timeout configuration', async () => { const screen = await render() diff --git a/packages/dify-ui/tsconfig.json b/packages/dify-ui/tsconfig.json index 10cffbcd76..514954c807 100644 --- a/packages/dify-ui/tsconfig.json +++ b/packages/dify-ui/tsconfig.json @@ -2,5 +2,7 @@ "extends": "@dify/tsconfig/react.json", "compilerOptions": { "types": ["vite-plus/test/globals"] - } + }, + "include": ["src/**/*.ts", "src/**/*.tsx", "vite.config.ts", "tailwind.config.ts"], + "exclude": ["node_modules", "dist", "storybook-static", "coverage"] } diff --git a/packages/dify-ui/vite.config.ts b/packages/dify-ui/vite.config.ts index 5f3533c706..f2a2d24e57 100644 --- a/packages/dify-ui/vite.config.ts +++ b/packages/dify-ui/vite.config.ts @@ -11,6 +11,7 @@ export default defineConfig({ }, test: { globals: true, + setupFiles: ['./vitest.setup.ts'], browser: { enabled: true, provider: playwright(), diff --git a/packages/dify-ui/vitest.setup.ts b/packages/dify-ui/vitest.setup.ts new file mode 100644 index 0000000000..285d6e7760 --- /dev/null +++ b/packages/dify-ui/vitest.setup.ts @@ -0,0 +1,5 @@ +( + globalThis as typeof globalThis & { + BASE_UI_ANIMATIONS_DISABLED: boolean + } +).BASE_UI_ANIMATIONS_DISABLED = true diff --git a/packages/migrate-no-unchecked-indexed-access/package.json b/packages/migrate-no-unchecked-indexed-access/package.json index 5da8d4cb50..6a29f40338 100644 --- a/packages/migrate-no-unchecked-indexed-access/package.json +++ b/packages/migrate-no-unchecked-indexed-access/package.json @@ -8,9 +8,10 @@ }, "scripts": { "build": "vp pack", - "type-check": "tsc" + "type-check": "tsgo" }, "dependencies": { + "@typescript/native-preview": "catalog:", "typescript": "catalog:" }, "devDependencies": { diff --git a/packages/migrate-no-unchecked-indexed-access/src/no-unchecked-indexed-access/run.ts b/packages/migrate-no-unchecked-indexed-access/src/no-unchecked-indexed-access/run.ts index ad655e4f11..6eea6c2459 100644 --- a/packages/migrate-no-unchecked-indexed-access/src/no-unchecked-indexed-access/run.ts +++ b/packages/migrate-no-unchecked-indexed-access/src/no-unchecked-indexed-access/run.ts @@ -117,17 +117,17 @@ async function runTypeCheck( await fs.mkdir(TYPECHECK_CACHE_DIR, { recursive: true }) - const tscArgs = ['exec', 'tsc', '--noEmit', '--pretty', 'false'] + const tsgoArgs = ['exec', 'tsgo', '--noEmit', '--pretty', 'false'] if (incremental) { - tscArgs.push('--incremental', '--tsBuildInfoFile', buildInfoPath) + tsgoArgs.push('--incremental', '--tsBuildInfoFile', buildInfoPath) } else { - tscArgs.push('--incremental', 'false') + tsgoArgs.push('--incremental', 'false') } - tscArgs.push('--project', projectPath) + tsgoArgs.push('--project', projectPath) try { - const { stdout, stderr } = await execFileAsync('pnpm', tscArgs, { + const { stdout, stderr } = await execFileAsync('pnpm', tsgoArgs, { cwd: projectDirectory, env: { ...process.env, diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index d573664e3a..9408bfb4b3 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -7,11 +7,11 @@ settings: catalogs: default: '@amplitude/analytics-browser': - specifier: 2.39.0 - version: 2.39.0 + specifier: 2.41.0 + version: 2.41.0 '@amplitude/plugin-session-replay-browser': - specifier: 1.27.7 - version: 1.27.7 + specifier: 1.27.10 + version: 1.27.10 '@antfu/eslint-config': specifier: 8.2.0 version: 8.2.0 @@ -22,8 +22,8 @@ catalogs: specifier: 5.1.2 version: 5.1.2 '@cucumber/cucumber': - specifier: 12.8.0 - version: 12.8.0 + specifier: 12.8.1 + version: 12.8.1 '@egoist/tailwindcss-icons': specifier: 1.9.2 version: 1.9.2 @@ -88,11 +88,11 @@ catalogs: specifier: 4.7.0 version: 4.7.0 '@next/eslint-plugin-next': - specifier: 16.2.3 - version: 16.2.3 + specifier: 16.2.4 + version: 16.2.4 '@next/mdx': - specifier: 16.2.3 - version: 16.2.3 + specifier: 16.2.4 + version: 16.2.4 '@orpc/client': specifier: 1.13.14 version: 1.13.14 @@ -115,8 +115,8 @@ catalogs: specifier: 4.2.0 version: 4.2.0 '@sentry/react': - specifier: 10.48.0 - version: 10.48.0 + specifier: 10.49.0 + version: 10.49.0 '@storybook/addon-docs': specifier: 10.3.5 version: 10.3.5 @@ -148,35 +148,35 @@ catalogs: specifier: 0.13.11 version: 0.13.11 '@tailwindcss/postcss': - specifier: 4.2.2 - version: 4.2.2 + specifier: 4.2.4 + version: 4.2.4 '@tailwindcss/typography': specifier: 0.5.19 version: 0.5.19 '@tailwindcss/vite': - specifier: 4.2.2 - version: 4.2.2 + specifier: 4.2.4 + version: 4.2.4 '@tanstack/eslint-plugin-query': - specifier: 5.99.0 - version: 5.99.0 + specifier: 5.99.2 + version: 5.99.2 '@tanstack/react-devtools': specifier: 0.10.2 version: 0.10.2 '@tanstack/react-form': - specifier: 1.29.0 - version: 1.29.0 + specifier: 1.29.1 + version: 1.29.1 '@tanstack/react-form-devtools': - specifier: 0.2.21 - version: 0.2.21 + specifier: 0.2.22 + version: 0.2.22 '@tanstack/react-query': - specifier: 5.99.0 - version: 5.99.0 + specifier: 5.99.2 + version: 5.99.2 '@tanstack/react-query-devtools': - specifier: 5.99.0 - version: 5.99.0 + specifier: 5.99.2 + version: 5.99.2 '@tanstack/react-virtual': - specifier: 3.13.23 - version: 3.13.23 + specifier: 3.13.24 + version: 3.13.24 '@testing-library/dom': specifier: 10.4.1 version: 10.4.1 @@ -190,14 +190,14 @@ catalogs: specifier: 14.6.1 version: 14.6.1 '@tsslint/cli': - specifier: 3.0.3 - version: 3.0.3 + specifier: 3.0.4 + version: 3.0.4 '@tsslint/compat-eslint': - specifier: 3.0.3 - version: 3.0.3 + specifier: 3.0.4 + version: 3.0.4 '@tsslint/config': - specifier: 3.0.3 - version: 3.0.3 + specifier: 3.0.4 + version: 3.0.4 '@types/js-cookie': specifier: 3.0.6 version: 3.0.6 @@ -223,14 +223,14 @@ catalogs: specifier: 1.15.9 version: 1.15.9 '@typescript-eslint/eslint-plugin': - specifier: 8.58.2 - version: 8.58.2 + specifier: 8.59.0 + version: 8.59.0 '@typescript-eslint/parser': - specifier: 8.58.2 - version: 8.58.2 + specifier: 8.59.0 + version: 8.59.0 '@typescript/native-preview': - specifier: 7.0.0-dev.20260413.1 - version: 7.0.0-dev.20260413.1 + specifier: 7.0.0-dev.20260422.1 + version: 7.0.0-dev.20260422.1 '@vitejs/plugin-react': specifier: 6.0.1 version: 6.0.1 @@ -238,8 +238,8 @@ catalogs: specifier: 0.5.24 version: 0.5.24 '@vitest/coverage-v8': - specifier: 4.1.4 - version: 4.1.4 + specifier: 4.1.5 + version: 4.1.5 abcjs: specifier: 6.6.2 version: 6.6.2 @@ -277,8 +277,8 @@ catalogs: specifier: 10.6.0 version: 10.6.0 dompurify: - specifier: 3.4.0 - version: 3.4.0 + specifier: 3.4.1 + version: 3.4.1 echarts: specifier: 6.0.0 version: 6.0.0 @@ -298,11 +298,11 @@ catalogs: specifier: 5.6.0 version: 5.6.0 es-toolkit: - specifier: 1.45.1 - version: 1.45.1 + specifier: 1.46.0 + version: 1.46.0 eslint: - specifier: 10.2.0 - version: 10.2.0 + specifier: 10.2.1 + version: 10.2.1 eslint-markdown: specifier: 0.6.1 version: 0.6.1 @@ -322,8 +322,8 @@ catalogs: specifier: 0.5.2 version: 0.5.2 eslint-plugin-sonarjs: - specifier: 4.0.2 - version: 4.0.2 + specifier: 4.0.3 + version: 4.0.3 eslint-plugin-storybook: specifier: 10.3.5 version: 10.3.5 @@ -346,8 +346,8 @@ catalogs: specifier: 1.11.13 version: 1.11.13 i18next: - specifier: 26.0.4 - version: 26.0.4 + specifier: 26.0.6 + version: 26.0.6 i18next-resources-to-backend: specifier: 1.2.1 version: 1.2.1 @@ -376,11 +376,11 @@ catalogs: specifier: 0.16.45 version: 0.16.45 knip: - specifier: 6.4.1 - version: 6.4.1 + specifier: 6.6.1 + version: 6.6.1 ky: - specifier: 2.0.0 - version: 2.0.0 + specifier: 2.0.2 + version: 2.0.2 lamejs: specifier: 1.2.1 version: 1.2.1 @@ -388,8 +388,8 @@ catalogs: specifier: 0.43.0 version: 0.43.0 loro-crdt: - specifier: 1.10.8 - version: 1.10.8 + specifier: 1.11.1 + version: 1.11.1 mermaid: specifier: 11.14.0 version: 11.14.0 @@ -403,8 +403,8 @@ catalogs: specifier: 1.0.0 version: 1.0.0 next: - specifier: 16.2.3 - version: 16.2.3 + specifier: 16.2.4 + version: 16.2.4 next-themes: specifier: 0.4.6 version: 0.4.6 @@ -418,8 +418,8 @@ catalogs: specifier: 1.59.1 version: 1.59.1 postcss: - specifier: 8.5.9 - version: 8.5.9 + specifier: 8.5.10 + version: 8.5.10 qrcode.react: specifier: 4.2.0 version: 4.2.0 @@ -502,8 +502,8 @@ catalogs: specifier: 3.5.0 version: 3.5.0 tailwindcss: - specifier: 4.2.2 - version: 4.2.2 + specifier: 4.2.4 + version: 4.2.4 tldts: specifier: 7.0.28 version: 7.0.28 @@ -511,8 +511,8 @@ catalogs: specifier: 4.21.0 version: 4.21.0 typescript: - specifier: 6.0.2 - version: 6.0.2 + specifier: 6.0.3 + version: 6.0.3 uglify-js: specifier: 3.19.3 version: 3.19.3 @@ -585,31 +585,31 @@ importers: devDependencies: '@antfu/eslint-config': specifier: 'catalog:' - version: 8.2.0(@eslint-react/eslint-plugin@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@next/eslint-plugin-next@16.2.3)(@types/node@25.6.0)(@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.2))(@typescript-eslint/utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(eslint-plugin-react-refresh@0.5.2(eslint@10.2.0(jiti@2.6.1)))(eslint@10.2.0(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 8.2.0(@eslint-react/eslint-plugin@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@next/eslint-plugin-next@16.2.4)(@types/node@25.6.0)(@typescript-eslint/typescript-estree@8.59.0(typescript@6.0.3))(@typescript-eslint/utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(eslint-plugin-react-refresh@0.5.2(eslint@10.2.1(jiti@2.6.1)))(eslint@10.2.1(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) eslint: specifier: 'catalog:' - version: 10.2.0(jiti@2.6.1) + version: 10.2.1(jiti@2.6.1) eslint-markdown: specifier: 'catalog:' - version: 0.6.1(eslint@10.2.0(jiti@2.6.1)) + version: 0.6.1(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-markdown-preferences: specifier: 'catalog:' - version: 0.41.1(@eslint/markdown@8.0.1)(eslint@10.2.0(jiti@2.6.1)) + version: 0.41.1(@eslint/markdown@8.0.1)(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-no-barrel-files: specifier: 'catalog:' - version: 1.3.1(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 1.3.1(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) vite: specifier: npm:@voidzero-dev/vite-plus-core@0.1.19 - version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plus: specifier: 'catalog:' - version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) e2e: devDependencies: '@cucumber/cucumber': specifier: 'catalog:' - version: 12.8.0 + version: 12.8.1 '@dify/tsconfig': specifier: workspace:* version: link:../packages/tsconfig @@ -619,18 +619,21 @@ importers: '@types/node': specifier: 'catalog:' version: 25.6.0 + '@typescript/native-preview': + specifier: 'catalog:' + version: 7.0.0-dev.20260422.1 tsx: specifier: 'catalog:' version: 4.21.0 typescript: specifier: 'catalog:' - version: 6.0.2 + version: 6.0.3 vite: specifier: npm:@voidzero-dev/vite-plus-core@0.1.19 - version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plus: specifier: 'catalog:' - version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) packages/dify-ui: dependencies: @@ -652,13 +655,13 @@ importers: version: link:../tsconfig '@egoist/tailwindcss-icons': specifier: 'catalog:' - version: 1.9.2(tailwindcss@4.2.2) + version: 1.9.2(tailwindcss@4.2.4) '@iconify-json/ri': specifier: 'catalog:' version: 1.2.10 '@storybook/addon-docs': specifier: 'catalog:' - version: 10.3.5(@types/react@19.2.14)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) + version: 10.3.5(@types/react@19.2.14)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) '@storybook/addon-links': specifier: 'catalog:' version: 10.3.5(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) @@ -667,22 +670,25 @@ importers: version: 10.3.5(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) '@storybook/react-vite': specifier: 'catalog:' - version: 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) + version: 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) '@tailwindcss/vite': specifier: 'catalog:' - version: 4.2.2(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + version: 4.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) '@types/react': specifier: 'catalog:' version: 19.2.14 '@types/react-dom': specifier: 'catalog:' version: 19.2.3(@types/react@19.2.14) + '@typescript/native-preview': + specifier: 'catalog:' + version: 7.0.0-dev.20260422.1 '@vitejs/plugin-react': specifier: 'catalog:' - version: 6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + version: 6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) class-variance-authority: specifier: 'catalog:' version: 0.7.1 @@ -700,19 +706,19 @@ importers: version: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) tailwindcss: specifier: 'catalog:' - version: 4.2.2 + version: 4.2.4 typescript: specifier: 'catalog:' - version: 6.0.2 + version: 6.0.3 vite: specifier: npm:@voidzero-dev/vite-plus-core@0.1.19 - version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plus: specifier: 'catalog:' - version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) vitest-browser-react: specifier: 'catalog:' - version: 2.2.0(@types/node@25.6.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 2.2.0(@types/node@25.6.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) packages/iconify-collections: devDependencies: @@ -725,9 +731,12 @@ importers: packages/migrate-no-unchecked-indexed-access: dependencies: + '@typescript/native-preview': + specifier: 'catalog:' + version: 7.0.0-dev.20260422.1 typescript: specifier: 'catalog:' - version: 6.0.2 + version: 6.0.3 devDependencies: '@dify/tsconfig': specifier: workspace:* @@ -737,10 +746,10 @@ importers: version: 25.6.0 vite: specifier: npm:@voidzero-dev/vite-plus-core@0.1.19 - version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plus: specifier: 'catalog:' - version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) packages/tsconfig: {} @@ -751,43 +760,46 @@ importers: version: link:../../packages/tsconfig '@eslint/js': specifier: 'catalog:' - version: 10.0.1(eslint@10.2.0(jiti@2.6.1)) + version: 10.0.1(eslint@10.2.1(jiti@2.6.1)) '@types/node': specifier: 'catalog:' version: 25.6.0 '@typescript-eslint/eslint-plugin': specifier: 'catalog:' - version: 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 8.59.0(@typescript-eslint/parser@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/parser': specifier: 'catalog:' - version: 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@typescript/native-preview': + specifier: 'catalog:' + version: 7.0.0-dev.20260422.1 '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) eslint: specifier: 'catalog:' - version: 10.2.0(jiti@2.6.1) + version: 10.2.1(jiti@2.6.1) typescript: specifier: 'catalog:' - version: 6.0.2 + version: 6.0.3 vite: specifier: npm:@voidzero-dev/vite-plus-core@0.1.19 - version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plus: specifier: 'catalog:' - version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) vitest: specifier: npm:@voidzero-dev/vite-plus-test@0.1.19 - version: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' web: dependencies: '@amplitude/analytics-browser': specifier: 'catalog:' - version: 2.39.0 + version: 2.41.0 '@amplitude/plugin-session-replay-browser': specifier: 'catalog:' - version: 1.27.7(@amplitude/rrweb@2.0.0-alpha.37) + version: 1.27.10(@amplitude/rrweb@2.0.0-alpha.37) '@base-ui/react': specifier: 'catalog:' version: 1.4.1(@types/react@19.2.14)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) @@ -841,13 +853,13 @@ importers: version: 1.13.14 '@orpc/tanstack-query': specifier: 'catalog:' - version: 1.13.14(@orpc/client@1.13.14)(@tanstack/query-core@5.99.0) + version: 1.13.14(@orpc/client@1.13.14)(@tanstack/query-core@5.99.2) '@remixicon/react': specifier: 'catalog:' version: 4.9.0(react@19.2.5) '@sentry/react': specifier: 'catalog:' - version: 10.48.0(react@19.2.5) + version: 10.49.0(react@19.2.5) '@streamdown/math': specifier: 'catalog:' version: 1.0.2(react@19.2.5) @@ -856,19 +868,19 @@ importers: version: 3.2.5 '@t3-oss/env-nextjs': specifier: 'catalog:' - version: 0.13.11(typescript@6.0.2)(valibot@1.3.1(typescript@6.0.2))(zod@4.3.6) + version: 0.13.11(typescript@6.0.3)(valibot@1.3.1(typescript@6.0.3))(zod@4.3.6) '@tailwindcss/typography': specifier: 'catalog:' - version: 0.5.19(tailwindcss@4.2.2) + version: 0.5.19(tailwindcss@4.2.4) '@tanstack/react-form': specifier: 'catalog:' - version: 1.29.0(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + version: 1.29.1(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@tanstack/react-query': specifier: 'catalog:' - version: 5.99.0(react@19.2.5) + version: 5.99.2(react@19.2.5) '@tanstack/react-virtual': specifier: 'catalog:' - version: 3.13.23(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + version: 3.13.24(react-dom@19.2.5(react@19.2.5))(react@19.2.5) abcjs: specifier: 'catalog:' version: 6.6.2 @@ -898,7 +910,7 @@ importers: version: 10.6.0 dompurify: specifier: 'catalog:' - version: 3.4.0 + version: 3.4.1 echarts: specifier: 'catalog:' version: 6.0.0 @@ -919,7 +931,7 @@ importers: version: 5.6.0 es-toolkit: specifier: 'catalog:' - version: 1.45.1 + version: 1.46.0 fast-deep-equal: specifier: 'catalog:' version: 3.1.3 @@ -934,7 +946,7 @@ importers: version: 1.11.13 i18next: specifier: 'catalog:' - version: 26.0.4(typescript@6.0.2) + version: 26.0.6(typescript@6.0.3) i18next-resources-to-backend: specifier: 'catalog:' version: 1.2.1 @@ -961,7 +973,7 @@ importers: version: 0.16.45 ky: specifier: 'catalog:' - version: 2.0.0 + version: 2.0.2 lamejs: specifier: 'catalog:' version: 1.2.1 @@ -970,7 +982,7 @@ importers: version: 0.43.0 loro-crdt: specifier: 'catalog:' - version: 1.10.8 + version: 1.11.1 mermaid: specifier: 'catalog:' version: 11.14.0 @@ -985,13 +997,13 @@ importers: version: 1.0.0 next: specifier: 'catalog:' - version: 16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + version: 16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) next-themes: specifier: 'catalog:' version: 0.4.6(react-dom@19.2.5(react@19.2.5))(react@19.2.5) nuqs: specifier: 'catalog:' - version: 2.8.9(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5) + version: 2.8.9(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5) pinyin-pro: specifier: 'catalog:' version: 3.28.1 @@ -1018,7 +1030,7 @@ importers: version: 5.2.4(react-dom@19.2.5(react@19.2.5))(react@19.2.5) react-i18next: specifier: 'catalog:' - version: 16.5.8(i18next@26.0.4(typescript@6.0.2))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(typescript@6.0.2) + version: 16.5.8(i18next@26.0.6(typescript@6.0.3))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(typescript@6.0.3) react-multi-email: specifier: 'catalog:' version: 1.0.25(react-dom@19.2.5(react@19.2.5))(react@19.2.5) @@ -1091,7 +1103,7 @@ importers: devDependencies: '@antfu/eslint-config': specifier: 'catalog:' - version: 8.2.0(@eslint-react/eslint-plugin@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@next/eslint-plugin-next@16.2.3)(@types/node@25.6.0)(@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.2))(@typescript-eslint/utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(eslint-plugin-react-refresh@0.5.2(eslint@10.2.0(jiti@2.6.1)))(eslint@10.2.0(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 8.2.0(@eslint-react/eslint-plugin@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@next/eslint-plugin-next@16.2.4)(@types/node@25.6.0)(@typescript-eslint/typescript-estree@8.59.0(typescript@6.0.3))(@typescript-eslint/utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(eslint-plugin-react-refresh@0.5.2(eslint@10.2.1(jiti@2.6.1)))(eslint@10.2.1(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) '@chromatic-com/storybook': specifier: 'catalog:' version: 5.1.2(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) @@ -1103,10 +1115,10 @@ importers: version: link:../packages/tsconfig '@egoist/tailwindcss-icons': specifier: 'catalog:' - version: 1.9.2(tailwindcss@4.2.2) + version: 1.9.2(tailwindcss@4.2.4) '@eslint-react/eslint-plugin': specifier: 'catalog:' - version: 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@hono/node-server': specifier: 'catalog:' version: 1.19.14(hono@4.12.14) @@ -1130,16 +1142,16 @@ importers: version: 3.1.1 '@next/eslint-plugin-next': specifier: 'catalog:' - version: 16.2.3 + version: 16.2.4 '@next/mdx': specifier: 'catalog:' - version: 16.2.3(@mdx-js/loader@3.1.1)(@mdx-js/react@3.1.1(@types/react@19.2.14)(react@19.2.5)) + version: 16.2.4(@mdx-js/loader@3.1.1)(@mdx-js/react@3.1.1(@types/react@19.2.14)(react@19.2.5)) '@rgrove/parse-xml': specifier: 'catalog:' version: 4.2.0 '@storybook/addon-docs': specifier: 'catalog:' - version: 10.3.5(@types/react@19.2.14)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) + version: 10.3.5(@types/react@19.2.14)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) '@storybook/addon-links': specifier: 'catalog:' version: 10.3.5(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) @@ -1151,28 +1163,28 @@ importers: version: 10.3.5(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) '@storybook/nextjs-vite': specifier: 'catalog:' - version: 10.3.5(@babel/core@7.29.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) + version: 10.3.5(@babel/core@7.29.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) '@storybook/react': specifier: 'catalog:' - version: 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) + version: 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) '@tailwindcss/postcss': specifier: 'catalog:' - version: 4.2.2 + version: 4.2.4 '@tailwindcss/vite': specifier: 'catalog:' - version: 4.2.2(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + version: 4.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) '@tanstack/eslint-plugin-query': specifier: 'catalog:' - version: 5.99.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 5.99.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@tanstack/react-devtools': specifier: 'catalog:' version: 0.10.2(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(csstype@3.2.3)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@tanstack/react-form-devtools': specifier: 'catalog:' - version: 0.2.21(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11) + version: 0.2.22(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11) '@tanstack/react-query-devtools': specifier: 'catalog:' - version: 5.99.0(@tanstack/react-query@5.99.0(react@19.2.5))(react@19.2.5) + version: 5.99.2(@tanstack/react-query@5.99.2(react@19.2.5))(react@19.2.5) '@testing-library/dom': specifier: 'catalog:' version: 10.4.1 @@ -1187,13 +1199,13 @@ importers: version: 14.6.1(@testing-library/dom@10.4.1) '@tsslint/cli': specifier: 'catalog:' - version: 3.0.3(@tsslint/compat-eslint@3.0.3(jiti@2.6.1)(typescript@6.0.2))(typescript@6.0.2) + version: 3.0.4(@tsslint/compat-eslint@3.0.4(jiti@2.6.1)(typescript@6.0.3))(typescript@6.0.3) '@tsslint/compat-eslint': specifier: 'catalog:' - version: 3.0.3(jiti@2.6.1)(typescript@6.0.2) + version: 3.0.4(jiti@2.6.1)(typescript@6.0.3) '@tsslint/config': specifier: 'catalog:' - version: 3.0.3(@tsslint/compat-eslint@3.0.3(jiti@2.6.1)(typescript@6.0.2))(typescript@6.0.2) + version: 3.0.4(@tsslint/compat-eslint@3.0.4(jiti@2.6.1)(typescript@6.0.3))(typescript@6.0.3) '@types/js-cookie': specifier: 'catalog:' version: 3.0.6 @@ -1220,19 +1232,19 @@ importers: version: 1.15.9 '@typescript-eslint/parser': specifier: 'catalog:' - version: 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript/native-preview': specifier: 'catalog:' - version: 7.0.0-dev.20260413.1 + version: 7.0.0-dev.20260422.1 '@vitejs/plugin-react': specifier: 'catalog:' - version: 6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + version: 6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) '@vitejs/plugin-rsc': specifier: 'catalog:' - version: 0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5) + version: 0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5) '@vitest/coverage-v8': specifier: 'catalog:' - version: 4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) agentation: specifier: 'catalog:' version: 3.0.2(react-dom@19.2.5(react@19.2.5))(react@19.2.5) @@ -1241,31 +1253,31 @@ importers: version: 1.5.1 eslint: specifier: 'catalog:' - version: 10.2.0(jiti@2.6.1) + version: 10.2.1(jiti@2.6.1) eslint-markdown: specifier: 'catalog:' - version: 0.6.1(eslint@10.2.0(jiti@2.6.1)) + version: 0.6.1(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-better-tailwindcss: specifier: 'catalog:' - version: 4.4.1(eslint@10.2.0(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tailwindcss@4.2.2)(typescript@6.0.2) + version: 4.4.1(eslint@10.2.1(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tailwindcss@4.2.4)(typescript@6.0.3) eslint-plugin-hyoban: specifier: 'catalog:' - version: 0.14.1(eslint@10.2.0(jiti@2.6.1)) + version: 0.14.1(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-markdown-preferences: specifier: 'catalog:' - version: 0.41.1(@eslint/markdown@8.0.1)(eslint@10.2.0(jiti@2.6.1)) + version: 0.41.1(@eslint/markdown@8.0.1)(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-no-barrel-files: specifier: 'catalog:' - version: 1.3.1(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + version: 1.3.1(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) eslint-plugin-react-refresh: specifier: 'catalog:' - version: 0.5.2(eslint@10.2.0(jiti@2.6.1)) + version: 0.5.2(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-sonarjs: specifier: 'catalog:' - version: 4.0.2(eslint@10.2.0(jiti@2.6.1)) + version: 4.0.3(eslint@10.2.1(jiti@2.6.1)) eslint-plugin-storybook: specifier: 'catalog:' - version: 10.3.5(eslint@10.2.0(jiti@2.6.1))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) + version: 10.3.5(eslint@10.2.1(jiti@2.6.1))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) happy-dom: specifier: 'catalog:' version: 20.9.0 @@ -1274,10 +1286,10 @@ importers: version: 4.12.14 knip: specifier: 'catalog:' - version: 6.4.1(@emnapi/runtime@1.9.1) + version: 6.6.1(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2) postcss: specifier: 'catalog:' - version: 8.5.9 + version: 8.5.10 react-server-dom-webpack: specifier: 'catalog:' version: 19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5) @@ -1286,34 +1298,34 @@ importers: version: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) tailwindcss: specifier: 'catalog:' - version: 4.2.2 + version: 4.2.4 tsx: specifier: 'catalog:' version: 4.21.0 typescript: specifier: 'catalog:' - version: 6.0.2 + version: 6.0.3 uglify-js: specifier: 'catalog:' version: 3.19.3 vinext: specifier: 'catalog:' - version: 0.0.41(@mdx-js/rollup@3.1.1)(@vitejs/plugin-react@6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)))(@vitejs/plugin-rsc@0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5)(typescript@6.0.2) + version: 0.0.41(@mdx-js/rollup@3.1.1)(@vitejs/plugin-react@6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)))(@vitejs/plugin-rsc@0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5)(typescript@6.0.3) vite: specifier: npm:@voidzero-dev/vite-plus-core@0.1.19 - version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plugin-inspect: specifier: 'catalog:' - version: 12.0.0-beta.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2)(ws@8.20.0) + version: 12.0.0-beta.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3)(ws@8.20.0) vite-plus: specifier: 'catalog:' - version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + version: 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) vitest: specifier: npm:@voidzero-dev/vite-plus-test@0.1.19 - version: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + version: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vitest-canvas-mock: specifier: 'catalog:' - version: 1.1.4(@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + version: 1.1.4(@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) packages: @@ -1324,17 +1336,17 @@ packages: resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} engines: {node: '>=10'} - '@amplitude/analytics-browser@2.39.0': - resolution: {integrity: sha512-sTNGGjiubsDs1NqKsTXp0ykCaSIzjaGclMRHlnO7JBatqK0f/Knl0cfn1a7XBFuTVix/M5nrWATsKv6+0dSpMg==} + '@amplitude/analytics-browser@2.41.0': + resolution: {integrity: sha512-zCfsm4mvytJRCvXxc04vfI0gmDkVUsfFXwoPl6l3g6uo9xC6Z22heDWot4NLUpeqKbQGBWJLYSzaD08HigXZNA==} - '@amplitude/analytics-client-common@2.4.43': - resolution: {integrity: sha512-R5n3cfnVNLk32BE2DbCp4xpn39mfmjMUjvOO9kt5dLFdF0cozb9MCawVyZJQVfnJJT6k5NMoswdUBu7Ul0nbRw==} + '@amplitude/analytics-client-common@2.4.45': + resolution: {integrity: sha512-2lQRpLEiZp3hqFXSpGgzsOVeXCaDwW8hCKJZeXWB6GGcLsGn0ssEC7RNxLpUMNWCctCF7Dfr9a4MSVe54jtiPw==} '@amplitude/analytics-connector@1.6.4': resolution: {integrity: sha512-SpIv0IQMNIq6SH3UqFGiaZyGSc7PBZwRdq7lvP0pBxW8i4Ny+8zwI0pV+VMfMHQwWY3wdIbWw5WQphNjpdq1/Q==} - '@amplitude/analytics-core@2.45.0': - resolution: {integrity: sha512-vWRYbXu2Grs1GM+WHo03RPtbaPs5sJm21YQcAow9JASvtoY4xNqItIeRydCJQWtFHhbbxY41n+CVW6mzDP6aBA==} + '@amplitude/analytics-core@2.47.0': + resolution: {integrity: sha512-LLffKoq7nhEtFtXz/QGcimlcS3vYugEW14JdAeZE03k2empShrAhCzigHL3Xiz+ywW9KC3inUalnbxybVhU0YA==} '@amplitude/analytics-types@2.11.1': resolution: {integrity: sha512-wFEgb0t99ly2uJKm5oZ28Lti0Kh5RecR5XBkwfUpDzn84IoCIZ8GJTsMw/nThu8FZFc7xFDA4UAt76zhZKrs9A==} @@ -1342,26 +1354,29 @@ packages: '@amplitude/experiment-core@0.7.2': resolution: {integrity: sha512-Wc2NWvgQ+bLJLeF0A9wBSPIaw0XuqqgkPKsoNFQrmS7r5Djd56um75In05tqmVntPJZRvGKU46pAp8o5tdf4mA==} - '@amplitude/plugin-autocapture-browser@1.25.2': - resolution: {integrity: sha512-AWzIX0uit60Q742rH/96/n88e+3BaVZa4+7Xs+BeuuIOyrljOZlQKzH23Lxzkl0DgbNb5+MMqWds0pov3DV5TA==} + '@amplitude/plugin-autocapture-browser@1.26.0': + resolution: {integrity: sha512-LCLsMr8usQJK6R6VjCjmiJ3ZRICh0QJ6xbDEwAm5XhuLFGRNsB2b9eRHlvalsPrTXR+b4Hjr71/dh3XNYZ9rqw==} - '@amplitude/plugin-custom-enrichment-browser@0.1.4': - resolution: {integrity: sha512-vxuQocn8YGE2wMLZUmotRG8c6RijoaQAsHKDQEO56CNk3WhSecgSGMnlHcUcOYIzwfXKFj4MxRJS386kdDHV+Q==} + '@amplitude/plugin-custom-enrichment-browser@0.1.6': + resolution: {integrity: sha512-oAVR5biFh7kMm4XOji7r684TA/VOwK8N1OLMdACQdwBl8MPiBLJDIPWtkVW5iSXyIjwYkOlrjygtnkei1q2S8g==} - '@amplitude/plugin-network-capture-browser@1.9.13': - resolution: {integrity: sha512-8uzTQFbP+dvqJX+S39KqKw+EheJW8JCWT/xlXT55vtTU/ZTFeF074QnHFEKUPewpYXpwKXgJky8PDoMk0b46Qw==} + '@amplitude/plugin-event-property-attribution-browser@0.1.1': + resolution: {integrity: sha512-2YHF/O+WVX0VxTAh3Jh77Ib+LeUl1xbyF1qW2YzGurY8uBUeAd62+7qFaXQSBWk1qMiTguxjKXrbbtxssfWWWg==} - '@amplitude/plugin-page-url-enrichment-browser@0.7.5': - resolution: {integrity: sha512-0Q7P5vsue/s92i3zevVDVJf9AiHkbxGdwkB8iV2oWgkXtglzWugwr//qN+muHmXdi1ZWxRjm93CW+jQJVripgw==} + '@amplitude/plugin-network-capture-browser@1.9.15': + resolution: {integrity: sha512-PkFWjKyOkkzw/9yKKJ2sa19F2Uo9NiSAR0l0NmELcO8h4TVJdfc4HlvM68AnWJ15nkFHh+UoG7SHwb7vp7ZC3Q==} - '@amplitude/plugin-page-view-tracking-browser@2.9.6': - resolution: {integrity: sha512-/4lG2lXIB6qbQNf1VYQ5fDOnvInPEtYuOgvmyLfuZ6PvHVFUu4NZtoOVdAcy0R9x76rNyCpRXxdL78p9Ra1ANA==} + '@amplitude/plugin-page-url-enrichment-browser@0.7.7': + resolution: {integrity: sha512-P67Xmi5/oDFZOO2DfsAvvDS280WdzVsl6JTPvgJc4+WJ1YypbYFA7S87LUIiwtuvgnHXFsgOjNUI36bOEVTW4w==} - '@amplitude/plugin-session-replay-browser@1.27.7': - resolution: {integrity: sha512-KcGMFaBGqZAOm1Gdzio9d95IL3Nmp5J1xOu1PD0NAPYLfW1MyoyA5PFIIlMqqVf1DoCjmgqP7AY4swetU2tpWg==} + '@amplitude/plugin-page-view-tracking-browser@2.10.1': + resolution: {integrity: sha512-XEk0Z7ZfN6gV0h1R2hOZkby/SUTIbGU8SgWR8gt4O+DEx+pxfTQEuCM2ya1YaCV2h1SBrTK4bnIHgPax/4/HoA==} - '@amplitude/plugin-web-vitals-browser@1.1.28': - resolution: {integrity: sha512-gs4Y1eOuVUEDwYEJF82f/GmgQ7iM4Y/eZTkftJKjFsBNbrPro2CuLymfdAcC+QuVfyrp3qAiWcSGnjDXA6ZbQg==} + '@amplitude/plugin-session-replay-browser@1.27.10': + resolution: {integrity: sha512-AWvAtiQ9/T52DCXS3hcjtHQs4GvZxM7rxgs24DgxqFY2uwCTTnI78le4U7nPWhSrj02YK+3b8y7QN3mm23lHyQ==} + + '@amplitude/plugin-web-vitals-browser@1.1.30': + resolution: {integrity: sha512-nLZk2dTHG8pLd/fFH0zdIhWnu4u+oPc/DKBYXwZ4zk6YKOkl0V+sbDUNGNnZWlOWRykq+0rkOX/WnUyClvMtaQ==} '@amplitude/rrdom@2.0.0-alpha.37': resolution: {integrity: sha512-u4dSnBtlbJ8oU5P/Ywl2RLqvjqWbkl4ScMUbvQA7in4pWcx+0NRN+VVjLZXQcd8Fn7E/rcxjeUh7e7HfwvdasQ==} @@ -1395,8 +1410,8 @@ packages: '@amplitude/rrweb@2.0.0-alpha.37': resolution: {integrity: sha512-jJkSpPYiVgOZB422pb2jOJJn3pvb5E5f9vKK8CEmUlk2mVAl6kPQzW98mb05M65OJFj5nn9tSe9h5r5+Cl93ag==} - '@amplitude/session-replay-browser@1.36.0': - resolution: {integrity: sha512-HZpNRMRAiLbzGF84DzF+ZH5WztJH4tVe2e/FzYJ2r27Sgf2gftCmzCB9pN8BXXcHKYtQK8/Qol+PTmSIzvyvEw==} + '@amplitude/session-replay-browser@1.37.0': + resolution: {integrity: sha512-65KC35dK2yxHoBTDTZeJC8qPchj4lFqTuNjBbH1jaV3hzYoRrGA/xWXLZgxlFvc/7yvcGBbTUW2TeGMAeW6FUg==} '@amplitude/targeting@0.2.0': resolution: {integrity: sha512-/50ywTrC4hfcfJVBbh5DFbqMPPfaIOivZeb5Gb+OGM03QrA+lsUqdvtnKLNuWtceD4H6QQ2KFzPJ5aAJLyzVDA==} @@ -1634,8 +1649,8 @@ packages: '@cucumber/cucumber-expressions@19.0.0': resolution: {integrity: sha512-4FKoOQh2Uf6F6/Ln+1OxuK8LkTg6PyAqekhf2Ix8zqV2M54sH+m7XNJNLhOFOAW/t9nxzRbw2CcvXbCLjcvHZg==} - '@cucumber/cucumber@12.8.0': - resolution: {integrity: sha512-sRG2QMAgCic4Uq1q+5LRzApEHiNGX5rhQY/GuOJZ9BIySrGPA9pevB0imJsZvdzt9scaWyIM3c7dIf4Dp1YQRA==} + '@cucumber/cucumber@12.8.1': + resolution: {integrity: sha512-hCXxiStjbZsRVZlV+CMywkqBtJ6RZTQeXSBZGPHm1YoIOI6YB8pCo0KlnJMmxfKfoeUKagtQMNPnpJBXwhkUjQ==} engines: {node: 20 || 22 || >=24} hasBin: true @@ -1659,8 +1674,8 @@ packages: peerDependencies: '@cucumber/messages': '>=18' - '@cucumber/junit-xml-formatter@0.13.2': - resolution: {integrity: sha512-worYkxjeOWJV+b7WkgJekWgFHlIhbuocnFK3hP+pMYXqZMmkXsxAorYPjeF8KyLnZXajw5fKHS2bM9rQIUI7Zw==} + '@cucumber/junit-xml-formatter@0.13.3': + resolution: {integrity: sha512-w9ujOxiuKDtU6fLzJz+wp4Sgp5Xu6ba7ls00LHJccVmQU0Ba7zs+AHnv3iIgPjKZAQe1w8x93dr8Gaubh7Vqkg==} peerDependencies: '@cucumber/messages': '*' @@ -1678,8 +1693,8 @@ packages: '@cucumber/cucumber': '>=7.0.0' '@cucumber/messages': '*' - '@cucumber/query@14.7.0': - resolution: {integrity: sha512-fiqZ4gMEgYjmbuWproF/YeCdD5y+gD2BqgBIGbpihOsx6UlNsyzoDSfO+Tny0q65DxfK+pHo2UkPyEl7dO7wmQ==} + '@cucumber/query@15.0.1': + resolution: {integrity: sha512-FMfT3orJblRsOxvU2doECBvQmauizYlj+5JsM8atAKKPbnQTj7v2/OrnuykvQpfZNBf19DYbRq1e832vllRP/g==} peerDependencies: '@cucumber/messages': '*' @@ -1702,9 +1717,18 @@ packages: peerDependencies: tailwindcss: '*' + '@emnapi/core@1.9.2': + resolution: {integrity: sha512-UC+ZhH3XtczQYfOlu3lNEkdW/p4dsJ1r/bP7H8+rhao3TTTMO1ATq/4DdIi23XuGoFY+Cz0JmCbdVl0hz9jZcA==} + '@emnapi/runtime@1.9.1': resolution: {integrity: sha512-VYi5+ZVLhpgK4hQ0TAjiQiZ6ol0oe4mBx7mVv7IflsiEp0OWoVsp/+f9Vc1hOhE0TtkORVrI1GvzyreqpgWtkA==} + '@emnapi/runtime@1.9.2': + resolution: {integrity: sha512-3U4+MIWHImeyu1wnmVygh5WlgfYDtyf0k8AbLhMFxOipihf6nrWC4syIm/SwEeec0mNSafiiNnMJwbza/Is6Lw==} + + '@emnapi/wasi-threads@1.2.1': + resolution: {integrity: sha512-uTII7OYF+/Mes/MrcIOYp5yOtSMLBWSIoLPpcgwipoiKbli6k322tcoFsxoIIxPDqW01SQGAgko4EzZi2BNv2w==} + '@emoji-mart/data@1.2.1': resolution: {integrity: sha512-no2pQMWiBy6gpBEiqGeU77/bFejDqUTRY7KX+0+iur13op3bqUsXdnwoZs6Xb1zbv0gAj5VvS1PWoUUckSr5Dw==} @@ -1940,8 +1964,8 @@ packages: resolution: {integrity: sha512-OL0RJzC/CBzli0DrrR31qzj6d6i6Mm3HByuhflhl4LOBiWxN+3i6/t/ZQQNii4tjksXi8r2CRW1wMpWA2ULUEw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/config-array@0.23.4': - resolution: {integrity: sha512-lf19F24LSMfF8weXvW5QEtnLqW70u7kgit5e9PSx0MsHAFclGd1T9ynvWEMDT1w5J4Qt54tomGeAhdoAku1Xow==} + '@eslint/config-array@0.23.5': + resolution: {integrity: sha512-Y3kKLvC1dvTOT+oGlqNQ1XLqK6D1HU2YXPc52NmAlJZbMMWDzGYXMiPRJ8TYD39muD/OTjlZmNJ4ib7dvSrMBA==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} '@eslint/config-helpers@0.2.3': @@ -1952,6 +1976,10 @@ packages: resolution: {integrity: sha512-jJhqiY3wPMlWWO3370M86CPJ7pt8GmEwSLglMfQhjXal07RCvhmU0as4IuUEW5SJeunfItiEetHmSxCCe9lDBg==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} + '@eslint/config-helpers@0.5.5': + resolution: {integrity: sha512-eIJYKTCECbP/nsKaaruF6LW967mtbQbsw4JTtSVkUQc9MneSkbrgPJAbKl9nWr0ZeowV8BfsarBmPpBzGelA2w==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + '@eslint/core@0.14.0': resolution: {integrity: sha512-qIbV0/JZr7iSDjqAc60IqbLdsj9GDt16xQtWD+B78d/HAlvysGdZZ6rpJHGAc2T0FQx1X6thsSPdnoiGKdNtdg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -1968,6 +1996,10 @@ packages: resolution: {integrity: sha512-8FTGbNzTvmSlc4cZBaShkC6YvFMG0riksYWRFKXztqVdXaQbcZLXlFbSpC05s70sGEsXAw0qwhx69JiW7hQS7A==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} + '@eslint/core@1.2.1': + resolution: {integrity: sha512-MwcE1P+AZ4C6DWlpin/OmOA54mmIZ/+xZuJiQd4SyB29oAJjN30UW9wkKNptW2ctp4cEsvhlLY/CsQ1uoHDloQ==} + engines: {node: ^20.19.0 || ^22.13.0 || >=24} + '@eslint/css-tree@4.0.1': resolution: {integrity: sha512-2fCSKRwoUHntYq9J1Lm28s2zeoCSNh1Cbk6Tg7k7ViwOnveIfZwPRFGwBglz+dzw2MHe5w5Fo9+VJfqL9nco2w==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} @@ -2001,8 +2033,8 @@ packages: resolution: {integrity: sha512-VtAOaymWVfZcmZbp6E2mympDIHvyjXs/12LqWYjVw6qjrfF+VK+fyG33kChz3nnK+SU5/NeHOqrTEHS8sXO3OA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@eslint/object-schema@3.0.4': - resolution: {integrity: sha512-55lO/7+Yp0ISKRP0PsPtNTeNGapXaO085aELZmWCVc5SH3jfrqpuU6YgOdIxMS99ZHkQN1cXKE+cdIqwww9ptw==} + '@eslint/object-schema@3.0.5': + resolution: {integrity: sha512-vqTaUEgxzm+YDSdElad6PiRoX4t8VGDjCtt05zn4nU810UIx/uNEV7/lZJ6KwFThKZOzOxzXy48da+No7HZaMw==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} '@eslint/plugin-kit@0.3.5': @@ -2017,8 +2049,8 @@ packages: resolution: {integrity: sha512-iH1B076HoAshH1mLpHMgwdGeTs0CYwL0SPMkGuSebZrwBp16v415e9NZXg2jtrqPVQjf6IANe2Vtlr5KswtcZQ==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} - '@eslint/plugin-kit@0.7.0': - resolution: {integrity: sha512-ejvBr8MQCbVsWNZnCwDXjUKq40MDmHalq7cJ6e9s/qzTUFIIo/afzt1Vui9T97FM/V/pN4YsFVoed5NIa96RDg==} + '@eslint/plugin-kit@0.7.1': + resolution: {integrity: sha512-rZAP3aVgB9ds9KOeUSL+zZ21hPmo8dh6fnIFwRQj5EAZl9gzR7wxYbYXYysAM8CTqGmUGyp2S4kUdV17MnGuWQ==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} '@floating-ui/core@1.7.5': @@ -2403,20 +2435,26 @@ packages: '@emnapi/core': ^1.7.1 '@emnapi/runtime': ^1.7.1 + '@napi-rs/wasm-runtime@1.1.4': + resolution: {integrity: sha512-3NQNNgA1YSlJb/kMH1ildASP9HW7/7kYnRI2szWJaofaS1hWmbGI4H+d3+22aGzXXN9IJ+n+GiFVcGipJP18ow==} + peerDependencies: + '@emnapi/core': ^1.7.1 + '@emnapi/runtime': ^1.7.1 + '@neoconfetti/react@1.0.0': resolution: {integrity: sha512-klcSooChXXOzIm+SE5IISIAn3bYzYfPjbX7D7HoqZL84oAfgREeSg5vSIaSFH+DaGzzvImTyWe1OyrJ67vik4A==} '@next/env@16.0.0': resolution: {integrity: sha512-s5j2iFGp38QsG1LWRQaE2iUY3h1jc014/melHFfLdrsMJPqxqDQwWNwyQTcNoUSGZlCVZuM7t7JDMmSyRilsnA==} - '@next/env@16.2.3': - resolution: {integrity: sha512-ZWXyj4uNu4GCWQw9cjRxWlbD+33mcDszIo9iQxFnBX3Wmgq9ulaSJcl6VhuWx5pCWqqD+9W6Wfz7N0lM5lYPMA==} + '@next/env@16.2.4': + resolution: {integrity: sha512-dKkkOzOSwFYe5RX6y26fZgkSpVAlIOJKQHIiydQcrWH6y/97+RceSOAdjZ14Qa3zLduVUy0TXcn+EiM6t4rPgw==} - '@next/eslint-plugin-next@16.2.3': - resolution: {integrity: sha512-nE/b9mht28XJxjTwKs/yk7w4XTaU3t40UHVAky6cjiijdP/SEy3hGsnQMPxmXPTpC7W4/97okm6fngKnvCqVaA==} + '@next/eslint-plugin-next@16.2.4': + resolution: {integrity: sha512-tOX826JJ96gYK/go18sPUgMq9FK1tqxBFfUCEufJb5XIkWFFmpgU7mahJANKGkHs7F41ir3tReJ3Lv5La0RvhA==} - '@next/mdx@16.2.3': - resolution: {integrity: sha512-mm7XNfPagSIcN8jFtozB9toeh5ESES0KCLRoo0gu6xydijvnIrV7dRIK3akNL3Tecc8AHX1FNzYZOZTeFU6RCw==} + '@next/mdx@16.2.4': + resolution: {integrity: sha512-e/3bgla+/oF3vDlndI0eFPa0bnP47HPVA0InsAJi7Jr3DwV8WpEGuOcm/3PdI5/93FfNiBhMVeVHZpm1sFlmJw==} peerDependencies: '@mdx-js/loader': '>=0.15.0' '@mdx-js/react': '>=0.15.0' @@ -2426,54 +2464,54 @@ packages: '@mdx-js/react': optional: true - '@next/swc-darwin-arm64@16.2.3': - resolution: {integrity: sha512-u37KDKTKQ+OQLvY+z7SNXixwo4Q2/IAJFDzU1fYe66IbCE51aDSAzkNDkWmLN0yjTUh4BKBd+hb69jYn6qqqSg==} + '@next/swc-darwin-arm64@16.2.4': + resolution: {integrity: sha512-OXTFFox5EKN1Ym08vfrz+OXxmCcEjT4SFMbNRsWZE99dMqt2Kcusl5MqPXcW232RYkMLQTy0hqgAMEsfEd/l2A==} engines: {node: '>= 10'} cpu: [arm64] os: [darwin] - '@next/swc-darwin-x64@16.2.3': - resolution: {integrity: sha512-gHjL/qy6Q6CG3176FWbAKyKh9IfntKZTB3RY/YOJdDFpHGsUDXVH38U4mMNpHVGXmeYW4wj22dMp1lTfmu/bTQ==} + '@next/swc-darwin-x64@16.2.4': + resolution: {integrity: sha512-XhpVnUfmYWvD3YrXu55XdcAkQtOnvaI6wtQa8fuF5fGoKoxIUZ0kWPtcOfqJEWngFF/lOS9l3+O9CcownhiQxQ==} engines: {node: '>= 10'} cpu: [x64] os: [darwin] - '@next/swc-linux-arm64-gnu@16.2.3': - resolution: {integrity: sha512-U6vtblPtU/P14Y/b/n9ZY0GOxbbIhTFuaFR7F4/uMBidCi2nSdaOFhA0Go81L61Zd6527+yvuX44T4ksnf8T+Q==} + '@next/swc-linux-arm64-gnu@16.2.4': + resolution: {integrity: sha512-Mx/tjlNA3G8kg14QvuGAJ4xBwPk1tUHq56JxZ8CXnZwz1Etz714soCEzGQQzVMz4bEnGPowzkV6Xrp6wAkEWOQ==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] libc: [glibc] - '@next/swc-linux-arm64-musl@16.2.3': - resolution: {integrity: sha512-/YV0LgjHUmfhQpn9bVoGc4x4nan64pkhWR5wyEV8yCOfwwrH630KpvRg86olQHTwHIn1z59uh6JwKvHq1h4QEw==} + '@next/swc-linux-arm64-musl@16.2.4': + resolution: {integrity: sha512-iVMMp14514u7Nup2umQS03nT/bN9HurK8ufylC3FZNykrwjtx7V1A7+4kvhbDSCeonTVqV3Txnv0Lu+m2oDXNg==} engines: {node: '>= 10'} cpu: [arm64] os: [linux] libc: [musl] - '@next/swc-linux-x64-gnu@16.2.3': - resolution: {integrity: sha512-/HiWEcp+WMZ7VajuiMEFGZ6cg0+aYZPqCJD3YJEfpVWQsKYSjXQG06vJP6F1rdA03COD9Fef4aODs3YxKx+RDQ==} + '@next/swc-linux-x64-gnu@16.2.4': + resolution: {integrity: sha512-EZOvm1aQWgnI/N/xcWOlnS3RQBk0VtVav5Zo7n4p0A7UKyTDx047k8opDbXgBpHl4CulRqRfbw3QrX2w5UOXMQ==} engines: {node: '>= 10'} cpu: [x64] os: [linux] libc: [glibc] - '@next/swc-linux-x64-musl@16.2.3': - resolution: {integrity: sha512-Kt44hGJfZSefebhk/7nIdivoDr3Ugp5+oNz9VvF3GUtfxutucUIHfIO0ZYO8QlOPDQloUVQn4NVC/9JvHRk9hw==} + '@next/swc-linux-x64-musl@16.2.4': + resolution: {integrity: sha512-h9FxsngCm9cTBf71AR4fGznDEDx1hS7+kSEiIRjq5kO1oXWm07DxVGZjCvk0SGx7TSjlUqhI8oOyz7NfwAdPoA==} engines: {node: '>= 10'} cpu: [x64] os: [linux] libc: [musl] - '@next/swc-win32-arm64-msvc@16.2.3': - resolution: {integrity: sha512-O2NZ9ie3Tq6xj5Z5CSwBT3+aWAMW2PIZ4egUi9MaWLkwaehgtB7YZjPm+UpcNpKOme0IQuqDcor7BsW6QBiQBw==} + '@next/swc-win32-arm64-msvc@16.2.4': + resolution: {integrity: sha512-3NdJV5OXMSOeJYijX+bjaLge3mJBlh4ybydbT4GFoB/2hAojWHtMhl3CYlYoMrjPuodp0nzFVi4Tj2+WaMg+Ow==} engines: {node: '>= 10'} cpu: [arm64] os: [win32] - '@next/swc-win32-x64-msvc@16.2.3': - resolution: {integrity: sha512-Ibm29/GgB/ab5n7XKqlStkm54qqZE8v2FnijUPBgrd67FWrac45o/RsNlaOWjme/B5UqeWt/8KM4aWBwA1D2Kw==} + '@next/swc-win32-x64-msvc@16.2.4': + resolution: {integrity: sha512-kMVGgsqhO5YTYODD9IPGGhA6iprWidQckK3LmPeW08PIFENRmgfb4MjXHO+p//d+ts2rpjvK5gXWzXSMrPl9cw==} engines: {node: '>= 10'} cpu: [x64] os: [win32] @@ -2538,129 +2576,129 @@ packages: resolution: {integrity: sha512-XRO0zi2NIUKq2lUk3T1ecFSld1fMWRKE6naRFGkgkdeosx7IslyUKNv5Dcb5PJTja9tHJoFu0v/7yEpAkrkrTg==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} - '@oxc-parser/binding-android-arm-eabi@0.121.0': - resolution: {integrity: sha512-n07FQcySwOlzap424/PLMtOkbS7xOu8nsJduKL8P3COGHKgKoDYXwoAHCbChfgFpHnviehrLWIPX0lKGtbEk/A==} + '@oxc-parser/binding-android-arm-eabi@0.126.0': + resolution: {integrity: sha512-svyoHt25J4741QJ5aa4R+h0iiBeSRt63Lr3aAZcxy2c/NeSE1IfDeMnSij6rIg7EjxkdlXzz613wUjeCeilBNA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [android] - '@oxc-parser/binding-android-arm64@0.121.0': - resolution: {integrity: sha512-/Dd1xIXboYAicw+twT2utxPD7bL8qh7d3ej0qvaYIMj3/EgIrGR+tSnjCUkiCT6g6uTC0neSS4JY8LxhdSU/sA==} + '@oxc-parser/binding-android-arm64@0.126.0': + resolution: {integrity: sha512-hPEBRKgplp1mG9GkINFsr4JVMDNrGJLOqfDaadTWpAoTnzYR5Rmv8RMvB3hJZpiNvbk1aacopdHUP1pggMQ/cw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [android] - '@oxc-parser/binding-darwin-arm64@0.121.0': - resolution: {integrity: sha512-A0jNEvv7QMtCO1yk205t3DWU9sWUjQ2KNF0hSVO5W9R9r/R1BIvzG01UQAfmtC0dQm7sCrs5puixurKSfr2bRQ==} + '@oxc-parser/binding-darwin-arm64@0.126.0': + resolution: {integrity: sha512-ccRpu9sdYmznePJQG5halhs0FW5tw5a8zRSoZXOzM1OjoeZ4jiRRruFiPclsD59edoVAK1l83dvfjWz1nQi6lg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [darwin] - '@oxc-parser/binding-darwin-x64@0.121.0': - resolution: {integrity: sha512-SsHzipdxTKUs3I9EOAPmnIimEeJOemqRlRDOp9LIj+96wtxZejF51gNibmoGq8KoqbT1ssAI5po/E3J+vEtXGA==} + '@oxc-parser/binding-darwin-x64@0.126.0': + resolution: {integrity: sha512-CHB4zVjNSKqx8Fw9pHowzQQnjjuq04i4Ng0Avj+DixlwhwAoMYqlFbocYIlbg+q3zOLGlm7vEHm83jqEMitnyg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [darwin] - '@oxc-parser/binding-freebsd-x64@0.121.0': - resolution: {integrity: sha512-v1APOTkCp+RWOIDAHRoaeW/UoaHF15a60E8eUL6kUQXh+i4K7PBwq2Wi7jm8p0ymID5/m/oC1w3W31Z/+r7HQw==} + '@oxc-parser/binding-freebsd-x64@0.126.0': + resolution: {integrity: sha512-RQ3nEJdcDKBfBjmLJ3Vl1d0KQERPV1P8eUrnBm7+VTYyoaJSPLVFuPg1mlD1hk3n0/879VLFMfusFkBal4ssWQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [freebsd] - '@oxc-parser/binding-linux-arm-gnueabihf@0.121.0': - resolution: {integrity: sha512-PmqPQuqHZyFVWA4ycr0eu4VnTMmq9laOHZd+8R359w6kzuNZPvmmunmNJ8ybkm769A0nCoVp3TJ6dUz7B3FYIQ==} + '@oxc-parser/binding-linux-arm-gnueabihf@0.126.0': + resolution: {integrity: sha512-onipc2wCDA7Bauzb4KK1mab0GsEDf4ujiIfWECdnmY/2LlzAoX3xdQRLAUyEDB1kn3yilHBrkmXDdHluyHXxiw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxc-parser/binding-linux-arm-musleabihf@0.121.0': - resolution: {integrity: sha512-vF24htj+MOH+Q7y9A8NuC6pUZu8t/C2Fr/kDOi2OcNf28oogr2xadBPXAbml802E8wRAVfbta6YLDQTearz+jw==} + '@oxc-parser/binding-linux-arm-musleabihf@0.126.0': + resolution: {integrity: sha512-5BuJJPohrV5NJ8lmcYOMbfRCUGoYH5J9HZHeuqOLwkHXWAuPMN3X1h8bC/2mWjmosdbfTtmyIdX3spS/TkqKNg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm] os: [linux] - '@oxc-parser/binding-linux-arm64-gnu@0.121.0': - resolution: {integrity: sha512-wjH8cIG2Lu/3d64iZpbYr73hREMgKAfu7fqpXjgM2S16y2zhTfDIp8EQjxO8vlDtKP5Rc7waZW72lh8nZtWrpA==} + '@oxc-parser/binding-linux-arm64-gnu@0.126.0': + resolution: {integrity: sha512-r2KApRgm2pOJaduRm6GOT8x0whcr67AyejNkSdzPt34GJ+Y3axcXN2mwlTs+8lfO/SSmpO5ZJGYiHYnxEE0jkw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] libc: [glibc] - '@oxc-parser/binding-linux-arm64-musl@0.121.0': - resolution: {integrity: sha512-qT663J/W8yQFw3dtscbEi9LKJevr20V7uWs2MPGTnvNZ3rm8anhhE16gXGpxDOHeg9raySaSHKhd4IGa3YZvuw==} + '@oxc-parser/binding-linux-arm64-musl@0.126.0': + resolution: {integrity: sha512-FQ+MMh7MT0Dr/u8+RWmWKlfoeWPQyHDbhhxJShJlYtROXXPHsRs9EvmQOZZ3sx4Nn7JU8NX+oyw2YzQ7anBJcA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [linux] libc: [musl] - '@oxc-parser/binding-linux-ppc64-gnu@0.121.0': - resolution: {integrity: sha512-mYNe4NhVvDBbPkAP8JaVS8lC1dsoJZWH5WCjpw5E+sjhk1R08wt3NnXYUzum7tIiWPfgQxbCMcoxgeemFASbRw==} + '@oxc-parser/binding-linux-ppc64-gnu@0.126.0': + resolution: {integrity: sha512-Wv/T8C98hRQhGTlx2XFyLn5raRMp9U1lOQD+YnXNgAr7wHbJJpZ8mDBU7Rw+M3WytGcGTFcr6kqgfyQeHVtLbQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ppc64] os: [linux] libc: [glibc] - '@oxc-parser/binding-linux-riscv64-gnu@0.121.0': - resolution: {integrity: sha512-+QiFoGxhAbaI/amqX567784cDyyuZIpinBrJNxUzb+/L2aBRX67mN6Jv40pqduHf15yYByI+K5gUEygCuv0z9w==} + '@oxc-parser/binding-linux-riscv64-gnu@0.126.0': + resolution: {integrity: sha512-DHx1rT1zauW0ZbLHOiQh5AC9Xs3UkWx2XmfZHs+7nnWYr3sagrufoUQC+/XPwwjMIlCFXiFGM0sFh3TyOCZwqA==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] libc: [glibc] - '@oxc-parser/binding-linux-riscv64-musl@0.121.0': - resolution: {integrity: sha512-9ykEgyTa5JD/Uhv2sttbKnCfl2PieUfOjyxJC/oDL2UO0qtXOtjPLl7H8Kaj5G7p3hIvFgu3YWvAxvE0sqY+hQ==} + '@oxc-parser/binding-linux-riscv64-musl@0.126.0': + resolution: {integrity: sha512-umDc2mTShH0U2zcEYf8mIJ163seLJNn54ZUZYeI5jD4qlg9izPwoLrC2aNPKlMJTu6u/ysmQWiEvIiaAG+INkw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [riscv64] os: [linux] libc: [musl] - '@oxc-parser/binding-linux-s390x-gnu@0.121.0': - resolution: {integrity: sha512-DB1EW5VHZdc1lIRjOI3bW/wV6R6y0xlfvdVrqj6kKi7Ayu2U3UqUBdq9KviVkcUGd5Oq+dROqvUEEFRXGAM7EQ==} + '@oxc-parser/binding-linux-s390x-gnu@0.126.0': + resolution: {integrity: sha512-PXXeWayclRtO1pxQEeCpiqIglQdhK2mAI2VX5xnsWdImzSB5GpoQ8TNw7vTCKk2k+GZuxl+q1knncidjCyUP9w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [s390x] os: [linux] libc: [glibc] - '@oxc-parser/binding-linux-x64-gnu@0.121.0': - resolution: {integrity: sha512-s4lfobX9p4kPTclvMiH3gcQUd88VlnkMTF6n2MTMDAyX5FPNRhhRSFZK05Ykhf8Zy5NibV4PbGR6DnK7FGNN6A==} + '@oxc-parser/binding-linux-x64-gnu@0.126.0': + resolution: {integrity: sha512-wzocjxm34TbB3bFlqG65JiLtvf6ZDg2ZxRkLLbgXwDQUNU+0MPjQN8zy/0jBKNA5fnPLk3XeVdZ7Uin+7+CVkg==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] libc: [glibc] - '@oxc-parser/binding-linux-x64-musl@0.121.0': - resolution: {integrity: sha512-P9KlyTpuBuMi3NRGpJO8MicuGZfOoqZVRP1WjOecwx8yk4L/+mrCRNc5egSi0byhuReblBF2oVoDSMgV9Bj4Hw==} + '@oxc-parser/binding-linux-x64-musl@0.126.0': + resolution: {integrity: sha512-e83uftP60jmkPs2+CW6T6A1GYzN2H6IumDAiTntv9WyHR73PI3ImHNBkYqnA3ukeKI3xjcCbhSh9QeJWmufxGQ==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [linux] libc: [musl] - '@oxc-parser/binding-openharmony-arm64@0.121.0': - resolution: {integrity: sha512-R+4jrWOfF2OAPPhj3Eb3U5CaKNAH9/btMveMULIrcNW/hjfysFQlF8wE0GaVBr81dWz8JLgQlsxwctoL78JwXw==} + '@oxc-parser/binding-openharmony-arm64@0.126.0': + resolution: {integrity: sha512-4WiOILHnPrTDY2/L4mE6PZCYwLN1d3ghma6BuTJ452CCgzRMt3uFplCtR+o3r9zdUWJYb370UizpI9CUcWXr1A==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [openharmony] - '@oxc-parser/binding-wasm32-wasi@0.121.0': - resolution: {integrity: sha512-5TFISkPTymKvsmIlKasPVTPuWxzCcrT8pM+p77+mtQbIZDd1UC8zww4CJcRI46kolmgrEX6QpKO8AvWMVZ+ifw==} + '@oxc-parser/binding-wasm32-wasi@0.126.0': + resolution: {integrity: sha512-Y17hhnrQTrxgAxAyAq401vnN9URsAL4s5AjqpG1NDsXSlhe1yBNnns+rC2P6xcMoitgX5nKH2ryYt9oiFRlzLw==} engines: {node: '>=14.0.0'} cpu: [wasm32] - '@oxc-parser/binding-win32-arm64-msvc@0.121.0': - resolution: {integrity: sha512-V0pxh4mql4XTt3aiEtRNUeBAUFOw5jzZNxPABLaOKAWrVzSr9+XUaB095lY7jqMf5t8vkfh8NManGB28zanYKw==} + '@oxc-parser/binding-win32-arm64-msvc@0.126.0': + resolution: {integrity: sha512-Znug1u1iRvT4VC3jANz6nhGBHsFwEFMxuimYpJFwMtsB6H5FcEoZRMmH26tHkSTD03JvDmG+gB65W3ajLjPcSw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [arm64] os: [win32] - '@oxc-parser/binding-win32-ia32-msvc@0.121.0': - resolution: {integrity: sha512-4Ob1qvYMPnlF2N9rdmKdkQFdrq16QVcQwBsO8yiPZXof0fHKFF+LmQV501XFbi7lHyrKm8rlJRfQ/M8bZZPVLw==} + '@oxc-parser/binding-win32-ia32-msvc@0.126.0': + resolution: {integrity: sha512-qrw7mx5hFFTxVSXToOA40hpnjgNB/DJprZchtB4rDKNLKqkD3F26HbzaQeH1nxAKej0efSZfJd5Sw3qdtOLGhw==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [ia32] os: [win32] - '@oxc-parser/binding-win32-x64-msvc@0.121.0': - resolution: {integrity: sha512-BOp1KCzdboB1tPqoCPXgntgFs0jjeSyOXHzgxVFR7B/qfr3F8r4YDacHkTOUNXtDgM8YwKnkf3rE5gwALYX7NA==} + '@oxc-parser/binding-win32-x64-msvc@0.126.0': + resolution: {integrity: sha512-ibB1s+mPUFXvS7MFJO2jpw/aCNs/P6ifnWlRyTYB+WYBpniOiCcHQQskZneJtwcjQMDRol3RGG3ihoYnzXSY4w==} engines: {node: ^20.19.0 || >=22.12.0} cpu: [x64] os: [win32] @@ -2669,9 +2707,6 @@ packages: resolution: {integrity: sha512-oksjxfqDNmIYMGlIgLzYgnz5YjZax27RtQezsPpKEGo9AC5LOaIGHsivCCeaAWdCtPnRyjZXM/7svreCC8kZVQ==} engines: {node: ^20.19.0 || >=22.12.0} - '@oxc-project/types@0.121.0': - resolution: {integrity: sha512-CGtOARQb9tyv7ECgdAlFxi0Fv7lmzvmlm2rpD/RdijOO9rfk/JvB1CjT8EnoD+tjna/IYgKKw3IV7objRb+aYw==} - '@oxc-project/types@0.126.0': resolution: {integrity: sha512-oGfVtjAgwQVVpfBrbtk4e1XDyWHRFta6BS3GWVzrF8xYBT2VGQAk39yJS/wFSMrZqoiCU4oghT3Ch0HaHGIHcQ==} @@ -3375,32 +3410,32 @@ packages: rollup: optional: true - '@sentry-internal/browser-utils@10.48.0': - resolution: {integrity: sha512-SCiTLBXzugFKxev6NoKYBIhQoDk0gUh0AVVVepCBqfCJiWBG01Zvv0R5tCVohr4cWRllkQ8mlBdNQd/I7s9tdA==} + '@sentry-internal/browser-utils@10.49.0': + resolution: {integrity: sha512-n0QRx0Ysx6mPfIydTkz7VP0FmwM+/EqMZiRqdsU3aTYsngE9GmEDV0OL1bAy6a8N/C1xf9vntkuAtj6N/8Z51w==} engines: {node: '>=18'} - '@sentry-internal/feedback@10.48.0': - resolution: {integrity: sha512-tGkEyOM1HDS9qebDphUMEnyk3qq/50AnuTBiFmMJyjNzowylVGmRRk0sr3xkmbVHCDXQCiYnDmSVlJ2x4SDMrQ==} + '@sentry-internal/feedback@10.49.0': + resolution: {integrity: sha512-JNsUBGv0faCFE7MeZUH99Y9lU9qq3LBALbLxpE1x7ngNrQnVYRlcFgdqaD/btNBKr8awjYL8gmcSkHBWskGqLQ==} engines: {node: '>=18'} - '@sentry-internal/replay-canvas@10.48.0': - resolution: {integrity: sha512-9nWuN2z4O+iwbTfuYV5ZmngBgJU/ZxfOo47A5RJP3Nu/kl59aJ1lUhILYOKyeNOIC/JyeERmpIcTxnlPXQzZ3Q==} + '@sentry-internal/replay-canvas@10.49.0': + resolution: {integrity: sha512-7D/NrgH1Qwx5trDYaaTSSJmCb1yVQQLqFG4G/S9x2ltzl9876lSGJL8UeW8ReNQgF3CDAcwbmm/9aXaVSBUNZA==} engines: {node: '>=18'} - '@sentry-internal/replay@10.48.0': - resolution: {integrity: sha512-sevRTePfuk4PNuz9KAKpmTZEomAU0aLXyIhOwA0OnUDdxPhkY8kq5lwDbuxTHv6DQUjUX3YgFbY45VH1JEqHKA==} + '@sentry-internal/replay@10.49.0': + resolution: {integrity: sha512-IEy4lwHVMiRE3JAcn+kFKjsTgalDOCSTf20SoFd+nkt6rN/k1RDyr4xpdfF//Kj3UdeTmbuibYjK5H/FLhhnGg==} engines: {node: '>=18'} - '@sentry/browser@10.48.0': - resolution: {integrity: sha512-4jt2zX2ExgFcNe2x+W+/k81fmDUsOrquGtt028CiGuDuma6kEsWBI4JbooT1jhj2T+eeUxe3YGbM23Zhh7Ghhw==} + '@sentry/browser@10.49.0': + resolution: {integrity: sha512-bGCHc+wK2Dx67YoSbmtlt04alqWfQ+dasD/GVipVOq50gvw/BBIDHTEWRJEjACl+LrvszeY54V+24p8z4IgysA==} engines: {node: '>=18'} - '@sentry/core@10.48.0': - resolution: {integrity: sha512-h8F+fXVwYC9ro5ZaO8V+v3vqc0awlXHGblEAuVxSGgh4IV/oFX+QVzXeDTTrFOFS6v/Vn5vAyu240eJrJAS6/g==} + '@sentry/core@10.49.0': + resolution: {integrity: sha512-UaFeum3LUM1mB0d67jvKnqId1yWQjyqmaDV6kWngG03x+jqXb08tJdGpSoxjXZe13jFBbiBL/wKDDYIK7rCK4g==} engines: {node: '>=18'} - '@sentry/react@10.48.0': - resolution: {integrity: sha512-uc93vKjmu6gNns+JAX4qquuxWpAMit0uGPA1TYlMjct9NG1uX3TkDPJAr9Pgd1lOXx8mKqCmj5fK33QeExMpPw==} + '@sentry/react@10.49.0': + resolution: {integrity: sha512-WdfJve0orTiumr25Ozgs2p2KaJR9xV82Z5V9IYBi0TadsurSWK6xI6SAFjw84tQht9Fp8q4UCn3QYCnApF4BfA==} engines: {node: '>=18'} peerDependencies: react: ^16.14.0 || 17.x || 18.x || 19.x @@ -3634,69 +3669,69 @@ packages: zod: optional: true - '@tailwindcss/node@4.2.2': - resolution: {integrity: sha512-pXS+wJ2gZpVXqFaUEjojq7jzMpTGf8rU6ipJz5ovJV6PUGmlJ+jvIwGrzdHdQ80Sg+wmQxUFuoW1UAAwHNEdFA==} + '@tailwindcss/node@4.2.4': + resolution: {integrity: sha512-Ai7+yQPxz3ddrDQzFfBKdHEVBg0w3Zl83jnjuwxnZOsnH9pGn93QHQtpU0p/8rYWxvbFZHneni6p1BSLK4DkGA==} - '@tailwindcss/oxide-android-arm64@4.2.2': - resolution: {integrity: sha512-dXGR1n+P3B6748jZO/SvHZq7qBOqqzQ+yFrXpoOWWALWndF9MoSKAT3Q0fYgAzYzGhxNYOoysRvYlpixRBBoDg==} + '@tailwindcss/oxide-android-arm64@4.2.4': + resolution: {integrity: sha512-e7MOr1SAn9U8KlZzPi1ZXGZHeC5anY36qjNwmZv9pOJ8E4Q6jmD1vyEHkQFmNOIN7twGPEMXRHmitN4zCMN03g==} engines: {node: '>= 20'} cpu: [arm64] os: [android] - '@tailwindcss/oxide-darwin-arm64@4.2.2': - resolution: {integrity: sha512-iq9Qjr6knfMpZHj55/37ouZeykwbDqF21gPFtfnhCCKGDcPI/21FKC9XdMO/XyBM7qKORx6UIhGgg6jLl7BZlg==} + '@tailwindcss/oxide-darwin-arm64@4.2.4': + resolution: {integrity: sha512-tSC/Kbqpz/5/o/C2sG7QvOxAKqyd10bq+ypZNf+9Fi2TvbVbv1zNpcEptcsU7DPROaSbVgUXmrzKhurFvo5eDg==} engines: {node: '>= 20'} cpu: [arm64] os: [darwin] - '@tailwindcss/oxide-darwin-x64@4.2.2': - resolution: {integrity: sha512-BlR+2c3nzc8f2G639LpL89YY4bdcIdUmiOOkv2GQv4/4M0vJlpXEa0JXNHhCHU7VWOKWT/CjqHdTP8aUuDJkuw==} + '@tailwindcss/oxide-darwin-x64@4.2.4': + resolution: {integrity: sha512-yPyUXn3yO/ufR6+Kzv0t4fCg2qNr90jxXc5QqBpjlPNd0NqyDXcmQb/6weunH/MEDXW5dhyEi+agTDiqa3WsGg==} engines: {node: '>= 20'} cpu: [x64] os: [darwin] - '@tailwindcss/oxide-freebsd-x64@4.2.2': - resolution: {integrity: sha512-YUqUgrGMSu2CDO82hzlQ5qSb5xmx3RUrke/QgnoEx7KvmRJHQuZHZmZTLSuuHwFf0DJPybFMXMYf+WJdxHy/nQ==} + '@tailwindcss/oxide-freebsd-x64@4.2.4': + resolution: {integrity: sha512-BoMIB4vMQtZsXdGLVc2z+P9DbETkiopogfWZKbWwM8b/1Vinbs4YcUwo+kM/KeLkX3Ygrf4/PsRndKaYhS8Eiw==} engines: {node: '>= 20'} cpu: [x64] os: [freebsd] - '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2': - resolution: {integrity: sha512-FPdhvsW6g06T9BWT0qTwiVZYE2WIFo2dY5aCSpjG/S/u1tby+wXoslXS0kl3/KXnULlLr1E3NPRRw0g7t2kgaQ==} + '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.4': + resolution: {integrity: sha512-7pIHBLTHYRAlS7V22JNuTh33yLH4VElwKtB3bwchK/UaKUPpQ0lPQiOWcbm4V3WP2I6fNIJ23vABIvoy2izdwA==} engines: {node: '>= 20'} cpu: [arm] os: [linux] - '@tailwindcss/oxide-linux-arm64-gnu@4.2.2': - resolution: {integrity: sha512-4og1V+ftEPXGttOO7eCmW7VICmzzJWgMx+QXAJRAhjrSjumCwWqMfkDrNu1LXEQzNAwz28NCUpucgQPrR4S2yw==} + '@tailwindcss/oxide-linux-arm64-gnu@4.2.4': + resolution: {integrity: sha512-+E4wxJ0ZGOzSH325reXTWB48l42i93kQqMvDyz5gqfRzRZ7faNhnmvlV4EPGJU3QJM/3Ab5jhJ5pCRUsKn6OQw==} engines: {node: '>= 20'} cpu: [arm64] os: [linux] libc: [glibc] - '@tailwindcss/oxide-linux-arm64-musl@4.2.2': - resolution: {integrity: sha512-oCfG/mS+/+XRlwNjnsNLVwnMWYH7tn/kYPsNPh+JSOMlnt93mYNCKHYzylRhI51X+TbR+ufNhhKKzm6QkqX8ag==} + '@tailwindcss/oxide-linux-arm64-musl@4.2.4': + resolution: {integrity: sha512-bBADEGAbo4ASnppIziaQJelekCxdMaxisrk+fB7Thit72IBnALp9K6ffA2G4ruj90G9XRS2VQ6q2bCKbfFV82g==} engines: {node: '>= 20'} cpu: [arm64] os: [linux] libc: [musl] - '@tailwindcss/oxide-linux-x64-gnu@4.2.2': - resolution: {integrity: sha512-rTAGAkDgqbXHNp/xW0iugLVmX62wOp2PoE39BTCGKjv3Iocf6AFbRP/wZT/kuCxC9QBh9Pu8XPkv/zCZB2mcMg==} + '@tailwindcss/oxide-linux-x64-gnu@4.2.4': + resolution: {integrity: sha512-7Mx25E4WTfnht0TVRTyC00j3i0M+EeFe7wguMDTlX4mRxafznw0CA8WJkFjWYH5BlgELd1kSjuU2JiPnNZbJDA==} engines: {node: '>= 20'} cpu: [x64] os: [linux] libc: [glibc] - '@tailwindcss/oxide-linux-x64-musl@4.2.2': - resolution: {integrity: sha512-XW3t3qwbIwiSyRCggeO2zxe3KWaEbM0/kW9e8+0XpBgyKU4ATYzcVSMKteZJ1iukJ3HgHBjbg9P5YPRCVUxlnQ==} + '@tailwindcss/oxide-linux-x64-musl@4.2.4': + resolution: {integrity: sha512-2wwJRF7nyhOR0hhHoChc04xngV3iS+akccHTGtz965FwF0up4b2lOdo6kI1EbDaEXKgvcrFBYcYQQ/rrnWFVfA==} engines: {node: '>= 20'} cpu: [x64] os: [linux] libc: [musl] - '@tailwindcss/oxide-wasm32-wasi@4.2.2': - resolution: {integrity: sha512-eKSztKsmEsn1O5lJ4ZAfyn41NfG7vzCg496YiGtMDV86jz1q/irhms5O0VrY6ZwTUkFy/EKG3RfWgxSI3VbZ8Q==} + '@tailwindcss/oxide-wasm32-wasi@4.2.4': + resolution: {integrity: sha512-FQsqApeor8Fo6gUEklzmaa9994orJZZDBAlQpK2Mq+DslRKFJeD6AjHpBQ0kZFQohVr8o85PPh8eOy86VlSCmw==} engines: {node: '>=14.0.0'} cpu: [wasm32] bundledDependencies: @@ -3707,32 +3742,32 @@ packages: - '@emnapi/wasi-threads' - tslib - '@tailwindcss/oxide-win32-arm64-msvc@4.2.2': - resolution: {integrity: sha512-qPmaQM4iKu5mxpsrWZMOZRgZv1tOZpUm+zdhhQP0VhJfyGGO3aUKdbh3gDZc/dPLQwW4eSqWGrrcWNBZWUWaXQ==} + '@tailwindcss/oxide-win32-arm64-msvc@4.2.4': + resolution: {integrity: sha512-L9BXqxC4ToVgwMFqj3pmZRqyHEztulpUJzCxUtLjobMCzTPsGt1Fa9enKbOpY2iIyVtaHNeNvAK8ERP/64sqGQ==} engines: {node: '>= 20'} cpu: [arm64] os: [win32] - '@tailwindcss/oxide-win32-x64-msvc@4.2.2': - resolution: {integrity: sha512-1T/37VvI7WyH66b+vqHj/cLwnCxt7Qt3WFu5Q8hk65aOvlwAhs7rAp1VkulBJw/N4tMirXjVnylTR72uI0HGcA==} + '@tailwindcss/oxide-win32-x64-msvc@4.2.4': + resolution: {integrity: sha512-ESlKG0EpVJQwRjXDDa9rLvhEAh0mhP1sF7sap9dNZT0yyl9SAG6T7gdP09EH0vIv0UNTlo6jPWyujD6559fZvw==} engines: {node: '>= 20'} cpu: [x64] os: [win32] - '@tailwindcss/oxide@4.2.2': - resolution: {integrity: sha512-qEUA07+E5kehxYp9BVMpq9E8vnJuBHfJEC0vPC5e7iL/hw7HR61aDKoVoKzrG+QKp56vhNZe4qwkRmMC0zDLvg==} + '@tailwindcss/oxide@4.2.4': + resolution: {integrity: sha512-9El/iI069DKDSXwTvB9J4BwdO5JhRrOweGaK25taBAvBXyXqJAX+Jqdvs8r8gKpsI/1m0LeJLyQYTf/WLrBT1Q==} engines: {node: '>= 20'} - '@tailwindcss/postcss@4.2.2': - resolution: {integrity: sha512-n4goKQbW8RVXIbNKRB/45LzyUqN451deQK0nzIeauVEqjlI49slUlgKYJM2QyUzap/PcpnS7kzSUmPb1sCRvYQ==} + '@tailwindcss/postcss@4.2.4': + resolution: {integrity: sha512-wgAVj6nUWAolAu8YFvzT2cTBIElWHkjZwFYovF+xsqKsW2ADxM/X2opxj5NsF/qVccAOjRNe8X2IdPzMsWyHTg==} '@tailwindcss/typography@0.5.19': resolution: {integrity: sha512-w31dd8HOx3k9vPtcQh5QHP9GwKcgbMp87j58qi6xgiBnFFtKEAgCWnDw4qUT8aHwkCp8bKvb/KGKWWHedP0AAg==} peerDependencies: tailwindcss: '>=3.0.0 || insiders || >=4.0.0-alpha.20 || >=4.0.0-beta.1' - '@tailwindcss/vite@4.2.2': - resolution: {integrity: sha512-mEiF5HO1QqCLXoNEfXVA1Tzo+cYsrqV7w9Juj2wdUFyW07JRenqMG225MvPwr3ZD9N1bFQj46X7r33iHxLUW0w==} + '@tailwindcss/vite@4.2.4': + resolution: {integrity: sha512-pCvohwOCspk3ZFn6eJzrrX3g4n2JY73H6MmYC87XfGPyTty4YsCjYTMArRZm/zOI8dIt3+EcrLHAFPe5A4bgtw==} peerDependencies: vite: ^5.2.0 || ^6 || ^7 || ^8 @@ -3780,8 +3815,8 @@ packages: engines: {node: '>=18'} hasBin: true - '@tanstack/eslint-plugin-query@5.99.0': - resolution: {integrity: sha512-jVp1AEL7S7BeuQvH5SN1F5UdrNW/AbryKDeWUUMeAKNzh9C+Ik/bRSa/HeuJLlmaN+WOUkdDFbtCK0go7BxnUQ==} + '@tanstack/eslint-plugin-query@5.99.2': + resolution: {integrity: sha512-xiazL4CWOHJRDDgs5ZkfW98qlEAisakFDKh1Djc3BIk84tsvt3ow52AC2EiWSMY1q13IB4UI4jSo7yXlC3NL6g==} peerDependencies: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: ^5.4.0 || ^6.0.0 @@ -3789,11 +3824,11 @@ packages: typescript: optional: true - '@tanstack/form-core@1.29.0': - resolution: {integrity: sha512-uyeKEdJBfbj0bkBSwvSYVRtWLOaXvfNX3CeVw1HqGOXVLxpBBGAqWdYLc+UoX/9xcoFwFXrjR9QqMPzvwm2yyQ==} + '@tanstack/form-core@1.29.1': + resolution: {integrity: sha512-NIYPO36eEu7nSWvMpbFDQaBWyVtnH/C8fsZ3/XpJUT4uOWgmxsiUvHGbTbDNIQTXAKIkhwEl0sUrqBNn2SfUnw==} - '@tanstack/form-devtools@0.2.21': - resolution: {integrity: sha512-8mxR1/QDw37mNVSFsr4ZN8+bdamH9LU1/iQ3I7/sfTzFmMsNzUOysX3OZf053eaS4Gaw44PT0pH7U0FWD98QKw==} + '@tanstack/form-devtools@0.2.22': + resolution: {integrity: sha512-hMrKwu+73O2LeHj78vi48oaAH4jZi/U92hrHmkvxDy3E72c+PbxDJBbM9rXUK4h0GPbOzfaZ235SruJ0lfuOYA==} peerDependencies: solid-js: 1.9.11 @@ -3801,11 +3836,11 @@ packages: resolution: {integrity: sha512-y/xtNPNt/YeyoVxE/JCx+T7yjEzpezmbb+toK8DDD1P4m7Kzs5YR956+7OKexG3f8aXgC3rLZl7b1V+yNUSy5w==} engines: {node: '>=18'} - '@tanstack/query-core@5.99.0': - resolution: {integrity: sha512-3Jv3WQG0BCcH7G+7lf/bP8QyBfJOXeY+T08Rin3GZ1bshvwlbPt7NrDHMEzGdKIOmOzvIQmxjk28YEQX60k7pQ==} + '@tanstack/query-core@5.99.2': + resolution: {integrity: sha512-1HunU0bXVsR1ZJMZbcOPE6VtaBJxsW809RE9xPe4Gz7MlB0GWwQvuTPhMoEmQ/hIzFKJ/DWAuttIe7BOaWx0tA==} - '@tanstack/query-devtools@5.99.0': - resolution: {integrity: sha512-m4ufXaJ8FjWXw7xDtyzE/6fkZAyQFg9WrbMrUpt8ZecRJx58jiFOZ2lxZMphZdIpAnIeto/S8stbwLKLusyckQ==} + '@tanstack/query-devtools@5.99.2': + resolution: {integrity: sha512-TEF1d+RYO9l8oeCwgzmOHIgKwAzXQmw2s/ny2bW8qeg2OMkkLjALfVEivgCMR3OL/jVdMmeTPX56WrV+uvYJFg==} '@tanstack/react-devtools@0.10.2': resolution: {integrity: sha512-1BmZyxOrI5SqmRJ5MgkYZNNdnlLsJxQRI2YgorrAvcF2MxK6x5RcuStvD8+YlXoMw3JtNukPxoITirKAnKYDQA==} @@ -3816,13 +3851,13 @@ packages: react: '>=16.8' react-dom: '>=16.8' - '@tanstack/react-form-devtools@0.2.21': - resolution: {integrity: sha512-WBQ7NOcb3FM9UA4juZVyWUyJkyl62vHFbEBybZuvBFw3wq/v9pDGS01Ye8kepGXDg1+LQsOOxyDR65AKsdqSYQ==} + '@tanstack/react-form-devtools@0.2.22': + resolution: {integrity: sha512-CXa+U6QrF8QOGL+sCIIcwzHb1K+hfNjBA5PwSmxm32Oxpu8fK/60M3SbE9UM9439MR/GQiIoeBW2FFyKh73apw==} peerDependencies: react: ^17.0.0 || ^18.0.0 || ^19.0.0 - '@tanstack/react-form@1.29.0': - resolution: {integrity: sha512-jj425NNX0QKqbUzqSNiYI3HCPHSk2df47acXCJyXczWOTmG81ECZGkgofgqamFsSU9kMiH6Di5RLUnftrlhWSw==} + '@tanstack/react-form@1.29.1': + resolution: {integrity: sha512-hVHk4g0phd0HxRsv2ry6Xt8BqmalT55Q3cokhJBCC1St0hcGZhgwJJbohm9atao45BPG9e55DGvtbwExqZe35g==} peerDependencies: '@tanstack/react-start': '*' react: ^17.0.0 || ^18.0.0 || ^19.0.0 @@ -3830,14 +3865,14 @@ packages: '@tanstack/react-start': optional: true - '@tanstack/react-query-devtools@5.99.0': - resolution: {integrity: sha512-CqqX7LCU9yOfCY/vBURSx2YSD83ryfX+QkfkaKionTfg1s2Hdm572Ro99gW3QPoJjzvsj1HM4pnN4nbDy3MXKA==} + '@tanstack/react-query-devtools@5.99.2': + resolution: {integrity: sha512-8txkK9A9XBNTB8RoxVgfp6W3qwBr25tNP10L4yu3KuyhAdEvccECfIRzesSwMVk/wpVVioAr+hbMtUkMMF+WVw==} peerDependencies: - '@tanstack/react-query': ^5.99.0 + '@tanstack/react-query': ^5.99.2 react: ^18 || ^19 - '@tanstack/react-query@5.99.0': - resolution: {integrity: sha512-OY2bCqPemT1LlqJ8Y2CUau4KELnIhhG9Ol3ZndPbdnB095pRbPo1cHuXTndg8iIwtoHTgwZjyaDnQ0xD0mYwAw==} + '@tanstack/react-query@5.99.2': + resolution: {integrity: sha512-vM91UEe45QUS9ED6OklsVL15i8qKcRqNwpWzPTVWvRPRSEgDudDgHpvyTjcdlwHcrKNa80T+xXYcchT2noPnZA==} peerDependencies: react: ^18 || ^19 @@ -3847,8 +3882,8 @@ packages: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 - '@tanstack/react-virtual@3.13.23': - resolution: {integrity: sha512-XnMRnHQ23piOVj2bzJqHrRrLg4r+F86fuBcwteKfbIjJrtGxb4z7tIvPVAe4B+4UVwo9G4Giuz5fmapcrnZ0OQ==} + '@tanstack/react-virtual@3.13.24': + resolution: {integrity: sha512-aIJvz5OSkhNIhZIpYivrxrPTKYsjW9Uzy+sP/mx0S3sev2HyvPb7xmjbYvokzEpfgYHy/HjzJ2zFAETuUfgCpg==} peerDependencies: react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 @@ -3856,8 +3891,8 @@ packages: '@tanstack/store@0.9.3': resolution: {integrity: sha512-8reSzl/qGWGGVKhBoxXPMWzATSbZLZFWhwBAFO9NAyp0TxzfBP0mIrGb8CP8KrQTmvzXlR/vFPPUrHTLBGyFyw==} - '@tanstack/virtual-core@3.13.23': - resolution: {integrity: sha512-zSz2Z2HNyLjCplANTDyl3BcdQJc2k1+yyFoKhNRmCr7V7dY8o8q5m8uFTI1/Pg1kL+Hgrz6u3Xo6eFUB7l66cg==} + '@tanstack/virtual-core@3.14.0': + resolution: {integrity: sha512-JLANqGy/D6k4Ujmh8Tr25lGimuOXNiaVyXaCAZS0W+1390sADdGnyUdSWNIfd49gebtIxGMij4IktRVzrdr12Q==} '@teppeis/multimaps@3.0.0': resolution: {integrity: sha512-ID7fosbc50TbT0MK0EG12O+gAP3W3Aa/Pz4DaTtQtEvlc9Odaqi0de+xuZ7Li2GtK4HzEX7IuRWS/JmZLksR3Q==} @@ -3892,22 +3927,22 @@ packages: peerDependencies: '@testing-library/dom': '>=7.21.4' - '@tsslint/cli@3.0.3': - resolution: {integrity: sha512-Pt1AuEZoh+dK4QYt95oCjBdBp2h2iYY9pSerf9BTLgfsjeyEsNk7Juhn51sFlAuEnWDNvI8mLULzsIkayd0nUQ==} + '@tsslint/cli@3.0.4': + resolution: {integrity: sha512-jvSYZEJKhDp02CyvLe7thGYp/uMW860kC8hDIMnZAGp3JMDkM2dU1kl550li4qiYXFkS8v5AU1nR2RyIn3khvw==} engines: {node: '>=22.6.0'} hasBin: true peerDependencies: typescript: '*' - '@tsslint/compat-eslint@3.0.3': - resolution: {integrity: sha512-UGWrE4fu8fUCLkc+zMQNsEfuEkGHjndpa5oSQmzhmo9BQJYAqqH1s2kGIiDsAYwaQTUts4SjclXaITq3pZhkrA==} + '@tsslint/compat-eslint@3.0.4': + resolution: {integrity: sha512-zWurlYWaSfK62uf5n7GMa0C7pcYOXbYjMeBfd3w0RmCZzk5gBhNSJdSNXNmbDXUuM/3RH03PpqHuUIktCGB52g==} - '@tsslint/config@3.0.3': - resolution: {integrity: sha512-3yFyM4Sj+0LxwmcokwNPuS9pWUBMIhO8vwHiG4vGuquTvF4cgZqDPyQ3GN4hDb5qAZ56iqYtMoBEiSZXlJDYPQ==} + '@tsslint/config@3.0.4': + resolution: {integrity: sha512-2VfGdG35wrcosUxxsoUD46LOI1lEJWhQFpDROhos2JOwwVPIQqp66hl9MOYjkBpt8zYVWvdcDWIOIT9QIpDL3A==} engines: {node: '>=22.6.0'} hasBin: true peerDependencies: - '@tsslint/compat-eslint': 3.0.0-alpha.0 + '@tsslint/compat-eslint': ^3.0.0 tsl: ^1.0.28 peerDependenciesMeta: '@tsslint/compat-eslint': @@ -3915,12 +3950,12 @@ packages: tsl: optional: true - '@tsslint/core@3.0.3': - resolution: {integrity: sha512-EpCKw34f2XyypH5xlxKCwnTgPGpZxbPXfvpwddT3DCxsIzUDJY4SpVJULAZFPAjJd49vopG0kNhXn0C/b+kHcg==} + '@tsslint/core@3.0.4': + resolution: {integrity: sha512-hzvO/8zZfds9k7ZREyE5h2pnKkukZsAD81F7rq/k9AOv//Wmi2OxXyxmhmv98/ZoieOK5nSrrzh8+mh7GtkrEw==} engines: {node: '>=22.6.0'} - '@tsslint/types@3.0.3': - resolution: {integrity: sha512-3Jlb5UTPrzqu1D1qOrzjwy0QW2n41A1+ILKvzgViFrtiTwurM5Tav6V7Y4AFxO0xatCA0VHAzzifK0r5znaKbw==} + '@tsslint/types@3.0.4': + resolution: {integrity: sha512-z/LXFUSGCxrh/WfkVmlyRwCVjAr2H1/v6EDvVTuXX/3ZEO+Ss9UqgEGgnTnQn3TLSLJa2pEaIY3Hsz0Y9TsuyA==} '@tybys/wasm-util@0.10.1': resolution: {integrity: sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==} @@ -4142,6 +4177,14 @@ packages: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/eslint-plugin@8.59.0': + resolution: {integrity: sha512-HyAZtpdkgZwpq8Sz3FSUvCR4c+ScbuWa9AksK2Jweub7w4M3yTz4O11AqVJzLYjy/B9ZWPyc81I+mOdJU/bDQw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + '@typescript-eslint/parser': ^8.59.0 + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/parser@8.58.2': resolution: {integrity: sha512-/Zb/xaIDfxeJnvishjGdcR4jmr7S+bda8PKNhRGdljDM+elXhlvN0FyPSsMnLmJUrVG9aPO6dof80wjMawsASg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -4149,22 +4192,45 @@ packages: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/parser@8.59.0': + resolution: {integrity: sha512-TI1XGwKbDpo9tRW8UDIXCOeLk55qe9ZFGs8MTKU6/M08HWTw52DD/IYhfQtOEhEdPhLMT26Ka/x7p70nd3dzDg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/project-service@8.58.2': resolution: {integrity: sha512-Cq6UfpZZk15+r87BkIh5rDpi38W4b+Sjnb8wQCPPDDweS/LRCFjCyViEbzHk5Ck3f2QDfgmlxqSa7S7clDtlfg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/project-service@8.59.0': + resolution: {integrity: sha512-Lw5ITrR5s5TbC19YSvlr63ZfLaJoU6vtKTHyB0GQOpX0W7d5/Ir6vUahWi/8Sps/nOukZQ0IB3SmlxZnjaKVnw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/scope-manager@8.58.2': resolution: {integrity: sha512-SgmyvDPexWETQek+qzZnrG6844IaO02UVyOLhI4wpo82dpZJY9+6YZCKAMFzXb7qhx37mFK1QcPQ18tud+vo6Q==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/scope-manager@8.59.0': + resolution: {integrity: sha512-UzR16Ut8IpA3Mc4DbgAShlPPkVm8xXMWafXxB0BocaVRHs8ZGakAxGRskF7FId3sdk9lgGD73GSFaWmWFDE4dg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/tsconfig-utils@8.58.2': resolution: {integrity: sha512-3SR+RukipDvkkKp/d0jP0dyzuls3DbGmwDpVEc5wqk5f38KFThakqAAO0XMirWAE+kT00oTauTbzMFGPoAzB0A==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/tsconfig-utils@8.59.0': + resolution: {integrity: sha512-91Sbl3s4Kb3SybliIY6muFBmHVv+pYXfybC4Oolp3dvk8BvIE3wOPc+403CWIT7mJNkfQRGtdqghzs2+Z91Tqg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/type-utils@8.58.2': resolution: {integrity: sha512-Z7EloNR/B389FvabdGeTo2XMs4W9TjtPiO9DAsmT0yom0bwlPyRjkJ1uCdW1DvrrrYP50AJZ9Xc3sByZA9+dcg==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -4172,16 +4238,33 @@ packages: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/type-utils@8.59.0': + resolution: {integrity: sha512-3TRiZaQSltGqGeNrJzzr1+8YcEobKH9rHnqIp/1psfKFmhRQDNMGP5hBufanYTGznwShzVLs3Mz+gDN7HkWfXg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/types@8.58.2': resolution: {integrity: sha512-9TukXyATBQf/Jq9AMQXfvurk+G5R2MwfqQGDR2GzGz28HvY/lXNKGhkY+6IOubwcquikWk5cjlgPvD2uAA7htQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/types@8.59.0': + resolution: {integrity: sha512-nLzdsT1gdOgFxxxwrlNVUBzSNBEEHJ86bblmk4QAS6stfig7rcJzWKqCyxFy3YRRHXDWEkb2NralA1nOYkkm/A==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + '@typescript-eslint/typescript-estree@8.58.2': resolution: {integrity: sha512-ELGuoofuhhoCvNbQjFFiobFcGgcDCEm0ThWdmO4Z0UzLqPXS3KFvnEZ+SHewwOYHjM09tkzOWXNTv9u6Gqtyuw==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/typescript-estree@8.59.0': + resolution: {integrity: sha512-O9Re9P1BmBLFJyikRbQpLku/QA3/AueZNO9WePLBwQrvkixTmDe8u76B6CYUAITRl/rHawggEqUGn5QIkVRLMw==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/utils@8.58.2': resolution: {integrity: sha512-QZfjHNEzPY8+l0+fIXMvuQ2sJlplB4zgDZvA+NmvZsZv3EQwOcc1DuIU1VJUTWZ/RKouBMhDyNaBMx4sWvrzRA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} @@ -4189,47 +4272,58 @@ packages: eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/utils@8.59.0': + resolution: {integrity: sha512-I1R/K7V07XsMJ12Oaxg/O9GfrysGTmCRhvZJBv0RE0NcULMzjqVpR5kRRQjHsz3J/bElU7HwCO7zkqL+MSUz+g==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + eslint: ^8.57.0 || ^9.0.0 || ^10.0.0 + typescript: '>=4.8.4 <6.1.0' + '@typescript-eslint/visitor-keys@8.58.2': resolution: {integrity: sha512-f1WO2Lx8a9t8DARmcWAUPJbu0G20bJlj8L4z72K00TMeJAoyLr/tHhI/pzYBLrR4dXWkcxO1cWYZEOX8DKHTqA==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-CDgxIPvAWRCfOiQKvSk4wUkAoRW4Cy6vfAUBPNHSeLalIt43ToF0LOAsa5uLyRGsftjfMYY0A4qFOmgDvBhgzQ==} + '@typescript-eslint/visitor-keys@8.59.0': + resolution: {integrity: sha512-/uejZt4dSere1bx12WLlPfv8GktzcaDtuJ7s42/HEZ5zGj9oxRaD4bj7qwSunXkf+pbAhFt2zjpHYUiT5lHf0Q==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-W/lGgoEfbdI/QWYqcNP0fSa4DHQKKEMLzDPsE6fA64zmfCNsTO9M7ttK0acKiLsGB16pr0lubuMDRNN5kXyQ8w==} cpu: [arm64] os: [darwin] - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-oiMmUtNMaqBh+eUogX53ichcEf7d+7upC0qa7xS9zWl85XEPKlrZCZpZ79yixw1PkdpjqJJigI11bmCi/JVv+g==} + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-6tZ2yAcKLBIghwKyC74vDqb/7rB99fTpERv9f64iA1tMh6l+WHIuQb6z3mIFVOYBIl2pN9CYasURLroKYtUz1w==} cpu: [x64] os: [darwin] - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-hPKanfs9c+7953gIYw13CNxN0HqFAOfJjnWk4SHqSBe3Pj9pxoeJvvRWlofp5C833eOZK6gZB7ll0/uNb0djtA==} + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-7HL4E7kP0ociYB8R4+QuIbzfT3pjdesNY+ax/q6fP3IMd3/QNAL/qsm/NaokjXke+I7uYxKqQ8Qo/t5MSv/r+A==} cpu: [arm64] os: [linux] - '@typescript/native-preview-linux-arm@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-0lSXBzBVsxIGrFv/PxoswzMptsnU6BgSk7GMAUt/o1dVw36R2XrSs538vwKnujaJwt4iIdMS0uGdpUC5s9jkzQ==} + '@typescript/native-preview-linux-arm@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-EWP1Jq2I8MMSkoF9D6ztXgRmnUy2KcaZfL9FYcdm3Am6ZYuI6/SCR3HVIVYbaixAJXe/qUh5MN3LzJbl/4hefQ==} cpu: [arm] os: [linux] - '@typescript/native-preview-linux-x64@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-8Cr477HRmHZ5YyLfikNvw7qp3/WmnRjzIzJhUDrAx5173OBe8BdyV9jPemFHKDPqwI1AUMTijvptOFoQE7429w==} + '@typescript/native-preview-linux-x64@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-fDqkLf2Hv7X1Cy1B5OMcljPt/+8GpnTxFM9rDCFrYAPgOolIQJ9qwkb+xGfvAtxkkE5sZIvGPcqjP9PWQHt2qw==} cpu: [x64] os: [linux] - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-ulJD9ZbIQyTBIDx8zzAzQLtbvQDGHSWrNRgkgBU5Os2NTYADQRco4pU747R9wZPMLopy3IeNck6m8vwPoYMk1g==} + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-l1tDnyNQSqxFkKz683dD8EORQtcQqZyWkTDnRtHmaPg2mTRxhxSekL/HcsHx/1/DoGTfl310O+CmXzd2mTq3pQ==} cpu: [arm64] os: [win32] - '@typescript/native-preview-win32-x64@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-x7DsSXnLQBf5XBBR8luHf1Nc/T1eByUmrOSEThW6825UB7lHoPlqKdhIoUNnTnS4nXQMxLwcusD4P1EP23GPJw==} + '@typescript/native-preview-win32-x64@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-VQbDQlp1bjV5nnHagQLXQAhid3S48l1OToIBjvqlw18s0V0YSgoyNL6E/rE7FBdkGrTLf/rtKjo42IZnt3tvqA==} cpu: [x64] os: [win32] - '@typescript/native-preview@7.0.0-dev.20260413.1': - resolution: {integrity: sha512-twzr3V4QLEbXaESuI2DqdzutOVFGpkY3VZDR9sF8YlLsAXkwyQvZo58cKM77mZcsHoCR4lCYcdTatWTTa/+8tw==} + '@typescript/native-preview@7.0.0-dev.20260422.1': + resolution: {integrity: sha512-8CR8zHFlLpSL5OXY4Wbz2DmiDOoat1JBMkydZUHwQIS4cpoTN7SHjk2BN8i51XHUy0jMF5airL0TlY3GOfZmKg==} hasBin: true '@ungap/structured-clone@1.3.0': @@ -4297,10 +4391,10 @@ packages: react-server-dom-webpack: optional: true - '@vitest/coverage-v8@4.1.4': - resolution: {integrity: sha512-x7FptB5oDruxNPDNY2+S8tCh0pcq7ymCe1gTHcsp733jYjrJl8V1gMUlVysuCD9Kz46Xz9t1akkv08dPcYDs1w==} + '@vitest/coverage-v8@4.1.5': + resolution: {integrity: sha512-38C0/Ddb7HcRG0Z4/DUem8x57d2p9jYgp18mkaYswEOQBGsI1CG4f/hjm0ZCeaJfWhSZ4k7jgs29V1Zom7Ki9A==} peerDependencies: - '@vitest/browser': 4.1.4 + '@vitest/browser': 4.1.5 peerDependenciesMeta: '@vitest/browser': optional: true @@ -4324,8 +4418,8 @@ packages: '@vitest/pretty-format@3.2.4': resolution: {integrity: sha512-IVNZik8IVRJRTr9fxlitMKeJeXFFFN0JaB9PHPGQ8NKQbGpfjlTx9zO4RefN8gp7eqjNy8nyK3NZmBzOPeIxtA==} - '@vitest/pretty-format@4.1.4': - resolution: {integrity: sha512-ddmDHU0gjEUyEVLxtZa7xamrpIefdEETu3nZjWtHeZX4QxqJ7tRxSteHVXJOcr8jhiLoGAhkK4WJ3WqBpjx42A==} + '@vitest/pretty-format@4.1.5': + resolution: {integrity: sha512-7I3q6l5qr03dVfMX2wCo9FxwSJbPdwKjy2uu/YPpU3wfHvIL4QHwVRp57OfGrDFeUJ8/8QdfBKIV12FTtLn00g==} '@vitest/spy@3.2.4': resolution: {integrity: sha512-vAfasCOe6AIK70iP5UD11Ac4siNUNJ9i/9PZ3NKx07sG6sUxeag1LWdNrMWeKKYBLlzuK+Gn65Yd5nyL6ds+nw==} @@ -4333,8 +4427,8 @@ packages: '@vitest/utils@3.2.4': resolution: {integrity: sha512-fB2V0JFrQSMsCo9HiSq3Ezpdv4iYaXRG1Sx8edX3MwxfyNn83mKiGzOcH+Fkxt4MHxr3y42fQi1oeAInqgX2QA==} - '@vitest/utils@4.1.4': - resolution: {integrity: sha512-13QMT+eysM5uVGa1rG4kegGYNp6cnQcsTc67ELFbhNLQO+vgsygtYJx2khvdt4gVQqSSpC/KT5FZZxUpP3Oatw==} + '@vitest/utils@4.1.5': + resolution: {integrity: sha512-76wdkrmfXfqGjueGgnb45ITPyUi1ycZ4IHgC2bhPDUfWHklY/q3MdLOAB+TF1e6xfl8NxNY0ZYaPCFNWSsw3Ug==} '@voidzero-dev/vite-plus-core@0.1.19': resolution: {integrity: sha512-BTmz50juSDolIN4Vtu5iVaPONV1XSrMB5V+9IoBhhxdogfvp7PBhaHuAcPjTN2RTVowhLZXoo8mn+aHjq//bkw==} @@ -4702,8 +4796,8 @@ packages: caniuse-lite@1.0.30001781: resolution: {integrity: sha512-RdwNCyMsNBftLjW6w01z8bKEvT6e/5tpPVEgtn22TiLGlstHOVecsX2KHFkD5e/vRnIE4EGzpuIODb3mtswtkw==} - canvas@3.2.2: - resolution: {integrity: sha512-duEt4h1HHu9sJZyVKfLRXR6tsKPY7cEELzxSRJkwddOXYvQT3P/+es98SV384JA0zMOZ5s+9gatnGfM6sL4Drg==} + canvas@3.2.3: + resolution: {integrity: sha512-PzE5nJZPz72YUAfo8oTp0u3fqqY7IzlTubneAihqDYAUcBk7ryeCmBbdJBEdaH0bptSOe2VT2Zwcb3UaFyaSWw==} engines: {node: ^18.12.0 || >= 20.9.0} capital-case@1.0.4: @@ -5193,8 +5287,8 @@ packages: resolution: {integrity: sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==} engines: {node: '>=20'} - dompurify@3.4.0: - resolution: {integrity: sha512-nolgK9JcaUXMSmW+j1yaSvaEaoXYHwWyGJlkoCTghc97KgGDDSnpoU/PlEnw63Ah+TGKFOyY+X5LnxaWbCSfXg==} + dompurify@3.4.1: + resolution: {integrity: sha512-JahakDAIg1gyOm7dlgWSDjV4n7Ip2PKR55NIT6jrMfIgLFgWo81vdr1/QGqWtFNRqXP9UV71oVePtjqS2ebnPw==} domutils@3.2.2: resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} @@ -5288,8 +5382,8 @@ packages: es-module-lexer@2.0.0: resolution: {integrity: sha512-5POEcUuZybH7IdmGsD8wlf0AI55wMecM9rVBTI/qEAy2c1kTOm3DjFYjrBdI2K3BaJjJYfYFeRtM0t9ssnRuxw==} - es-toolkit@1.45.1: - resolution: {integrity: sha512-/jhoOj/Fx+A+IIyDNOvO3TItGmlMKhtX8ISAHKE90c4b/k1tqaqEZ+uUqfpU8DMnW5cgNJv606zS55jGvza0Xw==} + es-toolkit@1.46.0: + resolution: {integrity: sha512-IToJ6ct9OLl5zz6WsC/1vZEwfSZ7Myil+ygl5Tf30Xjn9AEkzNB4kqp2G7VUJKF1DtTx/ra5M5KLlXvzOg51BA==} esast-util-from-estree@2.0.0: resolution: {integrity: sha512-4CyanoAudUSBAn5K13H4JhsMH6L9ZP7XbLVe/dKybkxMO7eDyLsT8UHl9TRNrU2Gr9nz+FovfSIjuXWJ81uVwQ==} @@ -5499,8 +5593,8 @@ packages: peerDependencies: eslint: '>=9.38.0' - eslint-plugin-sonarjs@4.0.2: - resolution: {integrity: sha512-BTcT1zr1iTbmJtVlcesISwnXzh+9uhf9LEOr+RRNf4kR8xA0HQTPft4oiyOCzCOGKkpSJxjR8ZYF6H7VPyplyw==} + eslint-plugin-sonarjs@4.0.3: + resolution: {integrity: sha512-5drkJKLC9qQddIiaATV0e8+ygbUc7b0Ti6VB7M2d3jmKNh3X0RaiIJYTs3dr9xnlhlrxo+/s1FoO3Jgv6O/c7g==} peerDependencies: eslint: ^8.0.0 || ^9.0.0 || ^10.0.0 @@ -5577,8 +5671,8 @@ packages: resolution: {integrity: sha512-tD40eHxA35h0PEIZNeIjkHoDR4YjjJp34biM0mDvplBe//mB+IHCqHDGV7pxF+7MklTvighcCPPZC7ynWyjdTA==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} - eslint@10.2.0: - resolution: {integrity: sha512-+L0vBFYGIpSNIt/KWTpFonPrqYvgKw1eUI5Vn7mEogrQcWtWYtNQ7dNqC+px/J0idT3BAkiWrhfS7k+Tum8TUA==} + eslint@10.2.1: + resolution: {integrity: sha512-wiyGaKsDgqXvF40P8mDwiUp/KQjE1FdrIEJsM8PZ3XCiniTMXS3OHWWUe5FI5agoCnr8x4xPrTDZuxsBlNHl+Q==} engines: {node: ^20.19.0 || ^22.13.0 || >=24} hasBin: true peerDependencies: @@ -5790,6 +5884,9 @@ packages: get-tsconfig@4.13.7: resolution: {integrity: sha512-7tN6rFgBlMgpBML5j8typ92BKFi2sFQvIdpAqLA2beia5avZDrMs0FLZiM5etShWq5irVyGcGMEA1jcDaK7A/Q==} + get-tsconfig@4.14.0: + resolution: {integrity: sha512-yTb+8DXzDREzgvYmh6s9vHsSVCHeC0G3PI5bEXNBHtmshPnO+S5O7qgLEOn0I5QvMy6kpZN8K1NKGyilLb93wA==} + github-from-package@0.0.0: resolution: {integrity: sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==} @@ -5931,8 +6028,8 @@ packages: i18next-resources-to-backend@1.2.1: resolution: {integrity: sha512-okHbVA+HZ7n1/76MsfhPqDou0fptl2dAlhRDu2ideXloRRduzHsqDOznJBef+R3DFZnbvWoBW+KxJ7fnFjd6Yw==} - i18next@26.0.4: - resolution: {integrity: sha512-gXF7U9bfioXPLv7mw8Qt2nfO7vij5MyINvPgVv99pX3fL1Y01pw2mKBFrlYpRxRCl2wz3ISenj6VsMJT2isfuA==} + i18next@26.0.6: + resolution: {integrity: sha512-A4U6eCXodIbrhf8EarRurB9/4ebyaurH4+fu4gig9bqxmpSt+fCAFm/GpRQDcN1Xzu/LdFCx4nYHsnM1edIIbg==} peerDependencies: typescript: ^5 || ^6 peerDependenciesMeta: @@ -6188,8 +6285,8 @@ packages: khroma@2.1.0: resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} - knip@6.4.1: - resolution: {integrity: sha512-Ry+ywmDFSZvKp/jx7LxMgsZWRTs931alV84e60lh0Stf6kSRYqSIUTkviyyDFRcSO3yY1Kpbi83OirN+4lA2Xw==} + knip@6.6.1: + resolution: {integrity: sha512-SOmqh25vuAfdynGoDr/kMCxIuD5+PkMIfMSGQeMqfrxwuPTANvJKcVttLgGZjjkATALqukSe/hhDVqcwNkf92g==} engines: {node: ^20.19.0 || >=22.12.0} hasBin: true @@ -6199,8 +6296,8 @@ packages: kolorist@1.8.0: resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==} - ky@2.0.0: - resolution: {integrity: sha512-KzI4Vz5AbZFAUFYGx28PCSfFWUo6/qj9Br/P6KRwDieE1xfdz0tIONepJcLw/1xLocN13GgvfJGasa+pfSkbHg==} + ky@2.0.2: + resolution: {integrity: sha512-/GmXpo9F9W+f8n4Ivr2iH+7h7wL7jLbLKWkMlpflcCRb6kGjBfTlASEXaZ9qUgNTn4VgS0P2pwxxzQ4EM6Ulgg==} engines: {node: '>=22'} lamejs@1.2.1: @@ -6341,8 +6438,8 @@ packages: resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} hasBin: true - loro-crdt@1.10.8: - resolution: {integrity: sha512-GvH8fSJST1VDHRGzlQml80pBYoFbIP4ULeV1S8fD4ffmA8m+icoPORyVUW2AkJBY3dxKIcMMn0WqaJmpCmnbkQ==} + loro-crdt@1.11.1: + resolution: {integrity: sha512-R+Ksyy2FPYoOfJAkVY6BqGk11AtlgWZ1B91V/G7TaQxitxuvUvMd1URhO33LYfFUIT2CSn0Nikl+bbRZ2RGuZg==} loupe@3.2.1: resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} @@ -6608,6 +6705,10 @@ packages: resolution: {integrity: sha512-oRjTw/97aTBN0RHbYCdtF1MQfvusSIBQM0IZEgzl6426+8jSC0nF1a/GmnVLpfB9yyr6g6FTqWqiZVbxrtaCIg==} engines: {node: 18 || 20 || >=22} + minimatch@10.2.5: + resolution: {integrity: sha512-MULkVLfKGYDFYejP07QOurDLLQpcjk7Fw+7jXS2R2czRQzR56yHRveU5NDJEOviH+hETZKSkIk5c+T23GjFUMg==} + engines: {node: 18 || 20 || >=22} + minimatch@3.1.5: resolution: {integrity: sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==} @@ -6683,8 +6784,8 @@ packages: react: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc react-dom: ^16.8 || ^17 || ^18 || ^19 || ^19.0.0-rc - next@16.2.3: - resolution: {integrity: sha512-9V3zV4oZFza3PVev5/poB9g0dEafVcgNyQ8eTRop8GvxZjV2G15FC5ARuG1eFD42QgeYkzJBJzHghNP8Ad9xtA==} + next@16.2.4: + resolution: {integrity: sha512-kPvz56wF5frc+FxlHI5qnklCzbq53HTwORaWBGdT0vNoKh1Aya9XC8aPauH4NJxqtzbWsS5mAbctm4cr+EkQ2Q==} engines: {node: '>=20.9.0'} hasBin: true peerDependencies: @@ -6785,8 +6886,8 @@ packages: resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} engines: {node: '>= 0.8.0'} - oxc-parser@0.121.0: - resolution: {integrity: sha512-ek9o58+SCv6AV7nchiAcUJy1DNE2CC5WRdBcO0mF+W4oRjNQfPO7b3pLjTHSFECpHkKGOZSQxx3hk8viIL5YCg==} + oxc-parser@0.126.0: + resolution: {integrity: sha512-FktCvLby/mOHyuijZt22+nOt10dS24gGUZE3XwIbUg7Kf4+rer3/5T7RgwzazlNuVsCjPloZ3p8E+4ONT3A8Kw==} engines: {node: ^20.19.0 || >=22.12.0} oxc-resolver@11.19.1: @@ -6984,6 +7085,10 @@ packages: resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} engines: {node: ^10 || ^12 || >=14} + postcss@8.5.10: + resolution: {integrity: sha512-pMMHxBOZKFU6HgAZ4eyGnwXF/EvPGGqUr0MnZ5+99485wwW41kW91A4LOGxSHhgugZmSChL5AlElNdwlNgcnLQ==} + engines: {node: ^10 || ^12 || >=14} + postcss@8.5.9: resolution: {integrity: sha512-7a70Nsot+EMX9fFU3064K/kdHWZqGVY+BADLyXc8Dfv+mTLLVl6JzJpPaCZ2kQL9gIJvKXSLMHhqdRRjwQeFtw==} engines: {node: ^10 || ^12 || >=14} @@ -7638,8 +7743,8 @@ packages: tailwind-merge@3.5.0: resolution: {integrity: sha512-I8K9wewnVDkL1NTGoqWmVEIlUcB9gFriAEkXkfCjX5ib8ezGxtR3xD7iZIxrfArjEsH7F1CHD4RFUtxefdqV/A==} - tailwindcss@4.2.2: - resolution: {integrity: sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==} + tailwindcss@4.2.4: + resolution: {integrity: sha512-HhKppgO81FQof5m6TEnuBWCZGgfRAWbaeOaGT00KOy/Pf/j6oUihdvBpA7ltCeAvZpFhW3j0PTclkxsd4IXYDA==} tapable@2.3.2: resolution: {integrity: sha512-1MOpMXuhGzGL5TTCZFItxCc0AARf1EZFQkGqMm7ERKj8+Hgr5oLvJOVFcC+lRmR8hCe2S3jC4T5D7Vg/d7/fhA==} @@ -7816,8 +7921,8 @@ packages: resolution: {integrity: sha512-PlBfpQwiUvGViBNX84Yxwjsdhd1TUlXr6zjX7eoirtCPIr08NAmxwa+fcYBTeRQxHo9YC9wwF3m9i700sHma8g==} engines: {node: '>=20'} - typescript@6.0.2: - resolution: {integrity: sha512-bGdAIrZ0wiGDo5l8c++HWtbaNCWTS4UTv7RaTH/ThVIgjkveJt83m74bBHMJkuCbslY8ixgLBVZJIOiQlQTjfQ==} + typescript@6.0.3: + resolution: {integrity: sha512-y2TvuxSZPDyQakkFRPZHKFm+KKVqIisdg9/CZwm9ftvKXLP8NRWj38/ODjNbr43SsoXqNuAisEf1GdCxqWcdBw==} engines: {node: '>=14.17'} hasBin: true @@ -8289,27 +8394,28 @@ snapshots: '@alloc/quick-lru@5.2.0': {} - '@amplitude/analytics-browser@2.39.0': + '@amplitude/analytics-browser@2.41.0': dependencies: - '@amplitude/analytics-core': 2.45.0 - '@amplitude/plugin-autocapture-browser': 1.25.2 - '@amplitude/plugin-custom-enrichment-browser': 0.1.4 - '@amplitude/plugin-network-capture-browser': 1.9.13 - '@amplitude/plugin-page-url-enrichment-browser': 0.7.5 - '@amplitude/plugin-page-view-tracking-browser': 2.9.6 - '@amplitude/plugin-web-vitals-browser': 1.1.28 + '@amplitude/analytics-core': 2.47.0 + '@amplitude/plugin-autocapture-browser': 1.26.0 + '@amplitude/plugin-custom-enrichment-browser': 0.1.6 + '@amplitude/plugin-event-property-attribution-browser': 0.1.1 + '@amplitude/plugin-network-capture-browser': 1.9.15 + '@amplitude/plugin-page-url-enrichment-browser': 0.7.7 + '@amplitude/plugin-page-view-tracking-browser': 2.10.1 + '@amplitude/plugin-web-vitals-browser': 1.1.30 tslib: 2.8.1 - '@amplitude/analytics-client-common@2.4.43': + '@amplitude/analytics-client-common@2.4.45': dependencies: '@amplitude/analytics-connector': 1.6.4 - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 '@amplitude/analytics-types': 2.11.1 tslib: 2.8.1 '@amplitude/analytics-connector@1.6.4': {} - '@amplitude/analytics-core@2.45.0': + '@amplitude/analytics-core@2.47.0': dependencies: '@amplitude/analytics-connector': 1.6.4 '@types/zen-observable': 0.8.3 @@ -8323,48 +8429,53 @@ snapshots: dependencies: js-base64: 3.7.8 - '@amplitude/plugin-autocapture-browser@1.25.2': + '@amplitude/plugin-autocapture-browser@1.26.0': dependencies: - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 tslib: 2.8.1 - '@amplitude/plugin-custom-enrichment-browser@0.1.4': + '@amplitude/plugin-custom-enrichment-browser@0.1.6': dependencies: - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 tslib: 2.8.1 - '@amplitude/plugin-network-capture-browser@1.9.13': + '@amplitude/plugin-event-property-attribution-browser@0.1.1': dependencies: - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 tslib: 2.8.1 - '@amplitude/plugin-page-url-enrichment-browser@0.7.5': + '@amplitude/plugin-network-capture-browser@1.9.15': dependencies: - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 tslib: 2.8.1 - '@amplitude/plugin-page-view-tracking-browser@2.9.6': + '@amplitude/plugin-page-url-enrichment-browser@0.7.7': dependencies: - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 tslib: 2.8.1 - '@amplitude/plugin-session-replay-browser@1.27.7(@amplitude/rrweb@2.0.0-alpha.37)': + '@amplitude/plugin-page-view-tracking-browser@2.10.1': dependencies: - '@amplitude/analytics-client-common': 2.4.43 - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 + tslib: 2.8.1 + + '@amplitude/plugin-session-replay-browser@1.27.10(@amplitude/rrweb@2.0.0-alpha.37)': + dependencies: + '@amplitude/analytics-client-common': 2.4.45 + '@amplitude/analytics-core': 2.47.0 '@amplitude/analytics-types': 2.11.1 '@amplitude/rrweb-plugin-console-record': 2.0.0-alpha.36(@amplitude/rrweb@2.0.0-alpha.37) '@amplitude/rrweb-record': 2.0.0-alpha.36 - '@amplitude/session-replay-browser': 1.36.0(@amplitude/rrweb@2.0.0-alpha.37) + '@amplitude/session-replay-browser': 1.37.0(@amplitude/rrweb@2.0.0-alpha.37) idb-keyval: 6.2.2 tslib: 2.8.1 transitivePeerDependencies: - '@amplitude/rrweb' - rollup - '@amplitude/plugin-web-vitals-browser@1.1.28': + '@amplitude/plugin-web-vitals-browser@1.1.30': dependencies: - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-core': 2.47.0 tslib: 2.8.1 web-vitals: 5.1.0 @@ -8388,7 +8499,7 @@ snapshots: '@amplitude/rrweb-snapshot@2.0.0-alpha.37': dependencies: - postcss: 8.5.9 + postcss: 8.5.10 '@amplitude/rrweb-types@2.0.0-alpha.36': {} @@ -8409,10 +8520,10 @@ snapshots: base64-arraybuffer: 1.0.2 mitt: 3.0.1 - '@amplitude/session-replay-browser@1.36.0(@amplitude/rrweb@2.0.0-alpha.37)': + '@amplitude/session-replay-browser@1.37.0(@amplitude/rrweb@2.0.0-alpha.37)': dependencies: - '@amplitude/analytics-client-common': 2.4.43 - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-client-common': 2.4.45 + '@amplitude/analytics-core': 2.47.0 '@amplitude/analytics-types': 2.11.1 '@amplitude/experiment-core': 0.7.2 '@amplitude/rrweb-packer': 2.0.0-alpha.36 @@ -8430,56 +8541,56 @@ snapshots: '@amplitude/targeting@0.2.0': dependencies: - '@amplitude/analytics-client-common': 2.4.43 - '@amplitude/analytics-core': 2.45.0 + '@amplitude/analytics-client-common': 2.4.45 + '@amplitude/analytics-core': 2.47.0 '@amplitude/analytics-types': 2.11.1 '@amplitude/experiment-core': 0.7.2 idb: 8.0.0 tslib: 2.8.1 - '@antfu/eslint-config@8.2.0(@eslint-react/eslint-plugin@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@next/eslint-plugin-next@16.2.3)(@types/node@25.6.0)(@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.2))(@typescript-eslint/utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(eslint-plugin-react-refresh@0.5.2(eslint@10.2.0(jiti@2.6.1)))(eslint@10.2.0(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)': + '@antfu/eslint-config@8.2.0(@eslint-react/eslint-plugin@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@next/eslint-plugin-next@16.2.4)(@types/node@25.6.0)(@typescript-eslint/typescript-estree@8.59.0(typescript@6.0.3))(@typescript-eslint/utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(eslint-plugin-react-refresh@0.5.2(eslint@10.2.1(jiti@2.6.1)))(eslint@10.2.1(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)': dependencies: '@antfu/install-pkg': 1.1.0 '@clack/prompts': 1.2.0 - '@e18e/eslint-plugin': 0.3.0(eslint@10.2.0(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1)) - '@eslint-community/eslint-plugin-eslint-comments': 4.7.1(eslint@10.2.0(jiti@2.6.1)) + '@e18e/eslint-plugin': 0.3.0(eslint@10.2.1(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1)) + '@eslint-community/eslint-plugin-eslint-comments': 4.7.1(eslint@10.2.1(jiti@2.6.1)) '@eslint/markdown': 8.0.1 - '@stylistic/eslint-plugin': 5.10.0(eslint@10.2.0(jiti@2.6.1)) - '@typescript-eslint/eslint-plugin': 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@typescript-eslint/parser': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@vitest/eslint-plugin': 1.6.15(@types/node@25.6.0)(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(eslint@10.2.0(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + '@stylistic/eslint-plugin': 5.10.0(eslint@10.2.1(jiti@2.6.1)) + '@typescript-eslint/eslint-plugin': 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@typescript-eslint/parser': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@vitest/eslint-plugin': 1.6.15(@types/node@25.6.0)(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(eslint@10.2.1(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) ansis: 4.2.0 cac: 7.0.0 - eslint: 10.2.0(jiti@2.6.1) - eslint-config-flat-gitignore: 2.3.0(eslint@10.2.0(jiti@2.6.1)) + eslint: 10.2.1(jiti@2.6.1) + eslint-config-flat-gitignore: 2.3.0(eslint@10.2.1(jiti@2.6.1)) eslint-flat-config-utils: 3.1.0 - eslint-merge-processors: 2.0.0(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-antfu: 3.2.2(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-command: 3.5.2(@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.2))(@typescript-eslint/utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-import-lite: 0.6.0(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-jsdoc: 62.9.0(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-jsonc: 3.1.2(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-n: 17.24.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + eslint-merge-processors: 2.0.0(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-antfu: 3.2.2(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-command: 3.5.2(@typescript-eslint/typescript-estree@8.59.0(typescript@6.0.3))(@typescript-eslint/utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-import-lite: 0.6.0(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-jsdoc: 62.9.0(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-jsonc: 3.1.2(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-n: 17.24.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) eslint-plugin-no-only-tests: 3.3.0 - eslint-plugin-perfectionist: 5.8.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint-plugin-pnpm: 1.6.0(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-regexp: 3.1.0(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-toml: 1.3.1(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-unicorn: 64.0.0(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-unused-imports: 4.4.1(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1)) - eslint-plugin-vue: 10.8.0(@stylistic/eslint-plugin@5.10.0(eslint@10.2.0(jiti@2.6.1)))(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(vue-eslint-parser@10.4.0(eslint@10.2.0(jiti@2.6.1))) - eslint-plugin-yml: 3.3.1(eslint@10.2.0(jiti@2.6.1)) - eslint-processor-vue-blocks: 2.0.0(eslint@10.2.0(jiti@2.6.1)) + eslint-plugin-perfectionist: 5.8.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint-plugin-pnpm: 1.6.0(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-regexp: 3.1.0(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-toml: 1.3.1(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-unicorn: 64.0.0(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-unused-imports: 4.4.1(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1)) + eslint-plugin-vue: 10.8.0(@stylistic/eslint-plugin@5.10.0(eslint@10.2.1(jiti@2.6.1)))(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(vue-eslint-parser@10.4.0(eslint@10.2.1(jiti@2.6.1))) + eslint-plugin-yml: 3.3.1(eslint@10.2.1(jiti@2.6.1)) + eslint-processor-vue-blocks: 2.0.0(eslint@10.2.1(jiti@2.6.1)) globals: 17.5.0 local-pkg: 1.1.2 parse-gitignore: 2.0.0 toml-eslint-parser: 1.0.3 - vue-eslint-parser: 10.4.0(eslint@10.2.0(jiti@2.6.1)) + vue-eslint-parser: 10.4.0(eslint@10.2.1(jiti@2.6.1)) yaml-eslint-parser: 2.0.0 optionalDependencies: - '@eslint-react/eslint-plugin': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@next/eslint-plugin-next': 16.2.3 - eslint-plugin-react-refresh: 0.5.2(eslint@10.2.0(jiti@2.6.1)) + '@eslint-react/eslint-plugin': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@next/eslint-plugin-next': 16.2.4 + eslint-plugin-react-refresh: 0.5.2(eslint@10.2.1(jiti@2.6.1)) transitivePeerDependencies: - '@arethetypeswrong/core' - '@edge-runtime/vm' @@ -8756,7 +8867,7 @@ snapshots: dependencies: regexp-match-indices: 1.0.2 - '@cucumber/cucumber@12.8.0': + '@cucumber/cucumber@12.8.1': dependencies: '@cucumber/ci-environment': 13.0.0 '@cucumber/cucumber-expressions': 19.0.0 @@ -8764,10 +8875,10 @@ snapshots: '@cucumber/gherkin-streams': 6.0.0(@cucumber/gherkin@38.0.0)(@cucumber/message-streams@4.1.1(@cucumber/messages@32.2.0))(@cucumber/messages@32.2.0) '@cucumber/gherkin-utils': 11.0.0 '@cucumber/html-formatter': 23.0.0(@cucumber/messages@32.2.0) - '@cucumber/junit-xml-formatter': 0.13.2(@cucumber/messages@32.2.0) + '@cucumber/junit-xml-formatter': 0.13.3(@cucumber/messages@32.2.0) '@cucumber/message-streams': 4.1.1(@cucumber/messages@32.2.0) '@cucumber/messages': 32.2.0 - '@cucumber/pretty-formatter': 1.0.1(@cucumber/cucumber@12.8.0)(@cucumber/messages@32.2.0) + '@cucumber/pretty-formatter': 1.0.1(@cucumber/cucumber@12.8.1)(@cucumber/messages@32.2.0) '@cucumber/tag-expressions': 9.1.0 assertion-error-formatter: 3.0.0 capital-case: 1.0.4 @@ -8822,10 +8933,10 @@ snapshots: dependencies: '@cucumber/messages': 32.2.0 - '@cucumber/junit-xml-formatter@0.13.2(@cucumber/messages@32.2.0)': + '@cucumber/junit-xml-formatter@0.13.3(@cucumber/messages@32.2.0)': dependencies: '@cucumber/messages': 32.2.0 - '@cucumber/query': 14.7.0(@cucumber/messages@32.2.0) + '@cucumber/query': 15.0.1(@cucumber/messages@32.2.0) '@teppeis/multimaps': 3.0.0 luxon: 3.7.2 xmlbuilder: 15.1.1 @@ -8840,16 +8951,16 @@ snapshots: class-transformer: 0.5.1 reflect-metadata: 0.2.2 - '@cucumber/pretty-formatter@1.0.1(@cucumber/cucumber@12.8.0)(@cucumber/messages@32.2.0)': + '@cucumber/pretty-formatter@1.0.1(@cucumber/cucumber@12.8.1)(@cucumber/messages@32.2.0)': dependencies: - '@cucumber/cucumber': 12.8.0 + '@cucumber/cucumber': 12.8.1 '@cucumber/messages': 32.2.0 ansi-styles: 5.2.0 cli-table3: 0.6.5 figures: 3.2.0 ts-dedent: 2.2.0 - '@cucumber/query@14.7.0(@cucumber/messages@32.2.0)': + '@cucumber/query@15.0.1(@cucumber/messages@32.2.0)': dependencies: '@cucumber/messages': 32.2.0 '@teppeis/multimaps': 3.0.0 @@ -8857,29 +8968,45 @@ snapshots: '@cucumber/tag-expressions@9.1.0': {} - '@e18e/eslint-plugin@0.3.0(eslint@10.2.0(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1))': + '@e18e/eslint-plugin@0.3.0(eslint@10.2.1(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1))': dependencies: - eslint-plugin-depend: 1.5.0(eslint@10.2.0(jiti@2.6.1)) + eslint-plugin-depend: 1.5.0(eslint@10.2.1(jiti@2.6.1)) optionalDependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) oxlint: 1.60.0(oxlint-tsgolint@0.21.1) - '@egoist/tailwindcss-icons@1.9.2(tailwindcss@4.2.2)': + '@egoist/tailwindcss-icons@1.9.2(tailwindcss@4.2.4)': dependencies: '@iconify/utils': 3.1.0 - tailwindcss: 4.2.2 + tailwindcss: 4.2.4 + + '@emnapi/core@1.9.2': + dependencies: + '@emnapi/wasi-threads': 1.2.1 + tslib: 2.8.1 + optional: true '@emnapi/runtime@1.9.1': dependencies: tslib: 2.8.1 optional: true + '@emnapi/runtime@1.9.2': + dependencies: + tslib: 2.8.1 + optional: true + + '@emnapi/wasi-threads@1.2.1': + dependencies: + tslib: 2.8.1 + optional: true + '@emoji-mart/data@1.2.1': {} '@es-joy/jsdoccomment@0.84.0': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.58.2 + '@typescript-eslint/types': 8.59.0 comment-parser: 1.4.5 esquery: 1.7.0 jsdoc-type-pratt-parser: 7.1.1 @@ -8887,7 +9014,7 @@ snapshots: '@es-joy/jsdoccomment@0.86.0': dependencies: '@types/estree': 1.0.8 - '@typescript-eslint/types': 8.58.2 + '@typescript-eslint/types': 8.59.0 comment-parser: 1.4.6 esquery: 1.7.0 jsdoc-type-pratt-parser: 7.2.0 @@ -8972,15 +9099,15 @@ snapshots: '@esbuild/win32-x64@0.27.2': optional: true - '@eslint-community/eslint-plugin-eslint-comments@4.7.1(eslint@10.2.0(jiti@2.6.1))': + '@eslint-community/eslint-plugin-eslint-comments@4.7.1(eslint@10.2.1(jiti@2.6.1))': dependencies: escape-string-regexp: 4.0.0 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) ignore: 7.0.5 - '@eslint-community/eslint-utils@4.9.1(eslint@10.2.0(jiti@2.6.1))': + '@eslint-community/eslint-utils@4.9.1(eslint@10.2.1(jiti@2.6.1))': dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) eslint-visitor-keys: 3.4.3 '@eslint-community/eslint-utils@4.9.1(eslint@9.27.0(jiti@2.6.1))': @@ -8990,77 +9117,77 @@ snapshots: '@eslint-community/regexpp@4.12.2': {} - '@eslint-react/ast@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@eslint-react/ast@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.2) - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.3) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) string-ts: 2.3.1 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@eslint-react/core@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@eslint-react/core@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/var': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/var': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@eslint-react/eslint-plugin@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@eslint-react/eslint-plugin@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) - eslint-plugin-react-dom: 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint-plugin-react-naming-convention: 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint-plugin-react-rsc: 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint-plugin-react-web-api: 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint-plugin-react-x: 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - ts-api-utils: 2.5.0(typescript@6.0.2) - typescript: 6.0.2 + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) + eslint-plugin-react-dom: 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint-plugin-react-naming-convention: 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint-plugin-react-rsc: 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint-plugin-react-web-api: 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint-plugin-react-x: 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@eslint-react/shared@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@eslint-react/shared@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 zod: 4.3.6 transitivePeerDependencies: - supports-color - '@eslint-react/var@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@eslint-react/var@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@eslint/compat@2.0.3(eslint@10.2.0(jiti@2.6.1))': + '@eslint/compat@2.0.3(eslint@10.2.1(jiti@2.6.1))': dependencies: '@eslint/core': 1.2.0 optionalDependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) '@eslint/config-array@0.20.1': dependencies: @@ -9070,9 +9197,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@eslint/config-array@0.23.4': + '@eslint/config-array@0.23.5': dependencies: - '@eslint/object-schema': 3.0.4 + '@eslint/object-schema': 3.0.5 debug: 4.4.3(supports-color@8.1.1) minimatch: 10.2.4 transitivePeerDependencies: @@ -9084,6 +9211,10 @@ snapshots: dependencies: '@eslint/core': 1.2.0 + '@eslint/config-helpers@0.5.5': + dependencies: + '@eslint/core': 1.2.1 + '@eslint/core@0.14.0': dependencies: '@types/json-schema': 7.0.15 @@ -9100,6 +9231,10 @@ snapshots: dependencies: '@types/json-schema': 7.0.15 + '@eslint/core@1.2.1': + dependencies: + '@types/json-schema': 7.0.15 + '@eslint/css-tree@4.0.1': dependencies: mdn-data: 2.27.1 @@ -9119,9 +9254,9 @@ snapshots: transitivePeerDependencies: - supports-color - '@eslint/js@10.0.1(eslint@10.2.0(jiti@2.6.1))': + '@eslint/js@10.0.1(eslint@10.2.1(jiti@2.6.1))': optionalDependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) '@eslint/js@9.27.0': {} @@ -9157,7 +9292,7 @@ snapshots: '@eslint/object-schema@2.1.7': {} - '@eslint/object-schema@3.0.4': {} + '@eslint/object-schema@3.0.5': {} '@eslint/plugin-kit@0.3.5': dependencies: @@ -9174,9 +9309,9 @@ snapshots: '@eslint/core': 1.2.0 levn: 0.4.1 - '@eslint/plugin-kit@0.7.0': + '@eslint/plugin-kit@0.7.1': dependencies: - '@eslint/core': 1.2.0 + '@eslint/core': 1.2.1 levn: 0.4.1 '@floating-ui/core@1.7.5': @@ -9223,7 +9358,7 @@ snapshots: '@floating-ui/react': 0.26.28(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@react-aria/focus': 3.21.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@react-aria/interactions': 3.27.1(react-dom@19.2.5(react@19.2.5))(react@19.2.5) - '@tanstack/react-virtual': 3.13.23(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + '@tanstack/react-virtual': 3.13.24(react-dom@19.2.5(react@19.2.5))(react@19.2.5) react: 19.2.5 react-dom: 19.2.5(react@19.2.5) use-sync-external-store: 1.6.0(react@19.2.5) @@ -9390,13 +9525,13 @@ snapshots: dependencies: minipass: 7.1.3 - '@joshwooding/vite-plugin-react-docgen-typescript@0.7.0(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2)': + '@joshwooding/vite-plugin-react-docgen-typescript@0.7.0(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3)': dependencies: glob: 13.0.6 - react-docgen-typescript: 2.4.0(typescript@6.0.2) - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + react-docgen-typescript: 2.4.0(typescript@6.0.3) + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 '@jridgewell/gen-mapping@0.3.13': dependencies: @@ -9639,9 +9774,17 @@ snapshots: react: 19.2.5 react-dom: 19.2.5(react@19.2.5) - '@napi-rs/wasm-runtime@1.1.2(@emnapi/runtime@1.9.1)': + '@napi-rs/wasm-runtime@1.1.2(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2)': dependencies: - '@emnapi/runtime': 1.9.1 + '@emnapi/core': 1.9.2 + '@emnapi/runtime': 1.9.2 + '@tybys/wasm-util': 0.10.1 + optional: true + + '@napi-rs/wasm-runtime@1.1.4(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2)': + dependencies: + '@emnapi/core': 1.9.2 + '@emnapi/runtime': 1.9.2 '@tybys/wasm-util': 0.10.1 optional: true @@ -9649,41 +9792,41 @@ snapshots: '@next/env@16.0.0': {} - '@next/env@16.2.3': {} + '@next/env@16.2.4': {} - '@next/eslint-plugin-next@16.2.3': + '@next/eslint-plugin-next@16.2.4': dependencies: fast-glob: 3.3.1 - '@next/mdx@16.2.3(@mdx-js/loader@3.1.1)(@mdx-js/react@3.1.1(@types/react@19.2.14)(react@19.2.5))': + '@next/mdx@16.2.4(@mdx-js/loader@3.1.1)(@mdx-js/react@3.1.1(@types/react@19.2.14)(react@19.2.5))': dependencies: source-map: 0.7.6 optionalDependencies: '@mdx-js/loader': 3.1.1 '@mdx-js/react': 3.1.1(@types/react@19.2.14)(react@19.2.5) - '@next/swc-darwin-arm64@16.2.3': + '@next/swc-darwin-arm64@16.2.4': optional: true - '@next/swc-darwin-x64@16.2.3': + '@next/swc-darwin-x64@16.2.4': optional: true - '@next/swc-linux-arm64-gnu@16.2.3': + '@next/swc-linux-arm64-gnu@16.2.4': optional: true - '@next/swc-linux-arm64-musl@16.2.3': + '@next/swc-linux-arm64-musl@16.2.4': optional: true - '@next/swc-linux-x64-gnu@16.2.3': + '@next/swc-linux-x64-gnu@16.2.4': optional: true - '@next/swc-linux-x64-musl@16.2.3': + '@next/swc-linux-x64-musl@16.2.4': optional: true - '@next/swc-win32-arm64-msvc@16.2.3': + '@next/swc-win32-arm64-msvc@16.2.4': optional: true - '@next/swc-win32-x64-msvc@16.2.3': + '@next/swc-win32-x64-msvc@16.2.4': optional: true '@nodelib/fs.scandir@2.1.5': @@ -9756,85 +9899,82 @@ snapshots: transitivePeerDependencies: - '@opentelemetry/api' - '@orpc/tanstack-query@1.13.14(@orpc/client@1.13.14)(@tanstack/query-core@5.99.0)': + '@orpc/tanstack-query@1.13.14(@orpc/client@1.13.14)(@tanstack/query-core@5.99.2)': dependencies: '@orpc/client': 1.13.14 '@orpc/shared': 1.13.14 - '@tanstack/query-core': 5.99.0 + '@tanstack/query-core': 5.99.2 transitivePeerDependencies: - '@opentelemetry/api' '@ota-meshi/ast-token-store@0.3.0': {} - '@oxc-parser/binding-android-arm-eabi@0.121.0': + '@oxc-parser/binding-android-arm-eabi@0.126.0': optional: true - '@oxc-parser/binding-android-arm64@0.121.0': + '@oxc-parser/binding-android-arm64@0.126.0': optional: true - '@oxc-parser/binding-darwin-arm64@0.121.0': + '@oxc-parser/binding-darwin-arm64@0.126.0': optional: true - '@oxc-parser/binding-darwin-x64@0.121.0': + '@oxc-parser/binding-darwin-x64@0.126.0': optional: true - '@oxc-parser/binding-freebsd-x64@0.121.0': + '@oxc-parser/binding-freebsd-x64@0.126.0': optional: true - '@oxc-parser/binding-linux-arm-gnueabihf@0.121.0': + '@oxc-parser/binding-linux-arm-gnueabihf@0.126.0': optional: true - '@oxc-parser/binding-linux-arm-musleabihf@0.121.0': + '@oxc-parser/binding-linux-arm-musleabihf@0.126.0': optional: true - '@oxc-parser/binding-linux-arm64-gnu@0.121.0': + '@oxc-parser/binding-linux-arm64-gnu@0.126.0': optional: true - '@oxc-parser/binding-linux-arm64-musl@0.121.0': + '@oxc-parser/binding-linux-arm64-musl@0.126.0': optional: true - '@oxc-parser/binding-linux-ppc64-gnu@0.121.0': + '@oxc-parser/binding-linux-ppc64-gnu@0.126.0': optional: true - '@oxc-parser/binding-linux-riscv64-gnu@0.121.0': + '@oxc-parser/binding-linux-riscv64-gnu@0.126.0': optional: true - '@oxc-parser/binding-linux-riscv64-musl@0.121.0': + '@oxc-parser/binding-linux-riscv64-musl@0.126.0': optional: true - '@oxc-parser/binding-linux-s390x-gnu@0.121.0': + '@oxc-parser/binding-linux-s390x-gnu@0.126.0': optional: true - '@oxc-parser/binding-linux-x64-gnu@0.121.0': + '@oxc-parser/binding-linux-x64-gnu@0.126.0': optional: true - '@oxc-parser/binding-linux-x64-musl@0.121.0': + '@oxc-parser/binding-linux-x64-musl@0.126.0': optional: true - '@oxc-parser/binding-openharmony-arm64@0.121.0': + '@oxc-parser/binding-openharmony-arm64@0.126.0': optional: true - '@oxc-parser/binding-wasm32-wasi@0.121.0(@emnapi/runtime@1.9.1)': + '@oxc-parser/binding-wasm32-wasi@0.126.0': dependencies: - '@napi-rs/wasm-runtime': 1.1.2(@emnapi/runtime@1.9.1) - transitivePeerDependencies: - - '@emnapi/core' - - '@emnapi/runtime' + '@emnapi/core': 1.9.2 + '@emnapi/runtime': 1.9.2 + '@napi-rs/wasm-runtime': 1.1.4(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2) optional: true - '@oxc-parser/binding-win32-arm64-msvc@0.121.0': + '@oxc-parser/binding-win32-arm64-msvc@0.126.0': optional: true - '@oxc-parser/binding-win32-ia32-msvc@0.121.0': + '@oxc-parser/binding-win32-ia32-msvc@0.126.0': optional: true - '@oxc-parser/binding-win32-x64-msvc@0.121.0': + '@oxc-parser/binding-win32-x64-msvc@0.126.0': optional: true '@oxc-project/runtime@0.126.0': {} - '@oxc-project/types@0.121.0': {} - '@oxc-project/types@0.126.0': {} '@oxc-resolver/binding-android-arm-eabi@11.19.1': @@ -9885,9 +10025,9 @@ snapshots: '@oxc-resolver/binding-openharmony-arm64@11.19.1': optional: true - '@oxc-resolver/binding-wasm32-wasi@11.19.1(@emnapi/runtime@1.9.1)': + '@oxc-resolver/binding-wasm32-wasi@11.19.1(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2)': dependencies: - '@napi-rs/wasm-runtime': 1.1.2(@emnapi/runtime@1.9.1) + '@napi-rs/wasm-runtime': 1.1.2(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2) transitivePeerDependencies: - '@emnapi/core' - '@emnapi/runtime' @@ -10353,38 +10493,38 @@ snapshots: estree-walker: 2.0.2 picomatch: 4.0.4 - '@sentry-internal/browser-utils@10.48.0': + '@sentry-internal/browser-utils@10.49.0': dependencies: - '@sentry/core': 10.48.0 + '@sentry/core': 10.49.0 - '@sentry-internal/feedback@10.48.0': + '@sentry-internal/feedback@10.49.0': dependencies: - '@sentry/core': 10.48.0 + '@sentry/core': 10.49.0 - '@sentry-internal/replay-canvas@10.48.0': + '@sentry-internal/replay-canvas@10.49.0': dependencies: - '@sentry-internal/replay': 10.48.0 - '@sentry/core': 10.48.0 + '@sentry-internal/replay': 10.49.0 + '@sentry/core': 10.49.0 - '@sentry-internal/replay@10.48.0': + '@sentry-internal/replay@10.49.0': dependencies: - '@sentry-internal/browser-utils': 10.48.0 - '@sentry/core': 10.48.0 + '@sentry-internal/browser-utils': 10.49.0 + '@sentry/core': 10.49.0 - '@sentry/browser@10.48.0': + '@sentry/browser@10.49.0': dependencies: - '@sentry-internal/browser-utils': 10.48.0 - '@sentry-internal/feedback': 10.48.0 - '@sentry-internal/replay': 10.48.0 - '@sentry-internal/replay-canvas': 10.48.0 - '@sentry/core': 10.48.0 + '@sentry-internal/browser-utils': 10.49.0 + '@sentry-internal/feedback': 10.49.0 + '@sentry-internal/replay': 10.49.0 + '@sentry-internal/replay-canvas': 10.49.0 + '@sentry/core': 10.49.0 - '@sentry/core@10.48.0': {} + '@sentry/core@10.49.0': {} - '@sentry/react@10.48.0(react@19.2.5)': + '@sentry/react@10.49.0(react@19.2.5)': dependencies: - '@sentry/browser': 10.48.0 - '@sentry/core': 10.48.0 + '@sentry/browser': 10.49.0 + '@sentry/core': 10.49.0 react: 19.2.5 '@shikijs/core@4.0.2': @@ -10474,10 +10614,10 @@ snapshots: '@standard-schema/spec@1.1.0': {} - '@storybook/addon-docs@10.3.5(@types/react@19.2.14)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))': + '@storybook/addon-docs@10.3.5(@types/react@19.2.14)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))': dependencies: '@mdx-js/react': 3.1.1(@types/react@19.2.14)(react@19.2.5) - '@storybook/csf-plugin': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) + '@storybook/csf-plugin': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) '@storybook/icons': 2.0.1(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@storybook/react-dom-shim': 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) react: 19.2.5 @@ -10507,24 +10647,24 @@ snapshots: storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) ts-dedent: 2.2.0 - '@storybook/builder-vite@10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))': + '@storybook/builder-vite@10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))': dependencies: - '@storybook/csf-plugin': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) + '@storybook/csf-plugin': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) ts-dedent: 2.2.0 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - esbuild - rollup - webpack - '@storybook/csf-plugin@10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))': + '@storybook/csf-plugin@10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))': dependencies: storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) unplugin: 2.3.11 optionalDependencies: esbuild: 0.27.2 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' '@storybook/global@5.0.0': {} @@ -10533,20 +10673,20 @@ snapshots: react: 19.2.5 react-dom: 19.2.5(react@19.2.5) - '@storybook/nextjs-vite@10.3.5(@babel/core@7.29.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2)': + '@storybook/nextjs-vite@10.3.5(@babel/core@7.29.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3)': dependencies: - '@storybook/builder-vite': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) - '@storybook/react': 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) - '@storybook/react-vite': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) - next: 16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + '@storybook/builder-vite': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) + '@storybook/react': 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) + '@storybook/react-vite': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) + next: 16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) react: 19.2.5 react-dom: 19.2.5(react@19.2.5) storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) styled-jsx: 5.1.6(@babel/core@7.29.0)(react@19.2.5) - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' - vite-plugin-storybook-nextjs: 3.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' + vite-plugin-storybook-nextjs: 3.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - '@babel/core' - babel-plugin-macros @@ -10561,12 +10701,12 @@ snapshots: react-dom: 19.2.5(react@19.2.5) storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) - '@storybook/react-vite@10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2)': + '@storybook/react-vite@10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3)': dependencies: - '@joshwooding/vite-plugin-react-docgen-typescript': 0.7.0(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2) + '@joshwooding/vite-plugin-react-docgen-typescript': 0.7.0(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3) '@rollup/pluginutils': 5.3.0 - '@storybook/builder-vite': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) - '@storybook/react': 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2) + '@storybook/builder-vite': 10.3.5(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) + '@storybook/react': 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3) empathic: 2.0.0 magic-string: 0.30.21 react: 19.2.5 @@ -10575,7 +10715,7 @@ snapshots: resolve: 1.22.11 storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) tsconfig-paths: 4.2.0 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - esbuild - rollup @@ -10583,17 +10723,17 @@ snapshots: - typescript - webpack - '@storybook/react@10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2)': + '@storybook/react@10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3)': dependencies: '@storybook/global': 5.0.0 '@storybook/react-dom-shim': 10.3.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)) react: 19.2.5 react-docgen: 8.0.3 - react-docgen-typescript: 2.4.0(typescript@6.0.2) + react-docgen-typescript: 2.4.0(typescript@6.0.3) react-dom: 19.2.5(react@19.2.5) storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color @@ -10606,11 +10746,11 @@ snapshots: transitivePeerDependencies: - supports-color - '@stylistic/eslint-plugin@5.10.0(eslint@10.2.0(jiti@2.6.1))': + '@stylistic/eslint-plugin@5.10.0(eslint@10.2.1(jiti@2.6.1))': dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) - '@typescript-eslint/types': 8.58.2 - eslint: 10.2.0(jiti@2.6.1) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) + '@typescript-eslint/types': 8.59.0 + eslint: 10.2.1(jiti@2.6.1) eslint-visitor-keys: 4.2.1 espree: 10.4.0 estraverse: 5.3.0 @@ -10626,21 +10766,21 @@ snapshots: dependencies: tslib: 2.8.1 - '@t3-oss/env-core@0.13.11(typescript@6.0.2)(valibot@1.3.1(typescript@6.0.2))(zod@4.3.6)': + '@t3-oss/env-core@0.13.11(typescript@6.0.3)(valibot@1.3.1(typescript@6.0.3))(zod@4.3.6)': optionalDependencies: - typescript: 6.0.2 - valibot: 1.3.1(typescript@6.0.2) + typescript: 6.0.3 + valibot: 1.3.1(typescript@6.0.3) zod: 4.3.6 - '@t3-oss/env-nextjs@0.13.11(typescript@6.0.2)(valibot@1.3.1(typescript@6.0.2))(zod@4.3.6)': + '@t3-oss/env-nextjs@0.13.11(typescript@6.0.3)(valibot@1.3.1(typescript@6.0.3))(zod@4.3.6)': dependencies: - '@t3-oss/env-core': 0.13.11(typescript@6.0.2)(valibot@1.3.1(typescript@6.0.2))(zod@4.3.6) + '@t3-oss/env-core': 0.13.11(typescript@6.0.3)(valibot@1.3.1(typescript@6.0.3))(zod@4.3.6) optionalDependencies: - typescript: 6.0.2 - valibot: 1.3.1(typescript@6.0.2) + typescript: 6.0.3 + valibot: 1.3.1(typescript@6.0.3) zod: 4.3.6 - '@tailwindcss/node@4.2.2': + '@tailwindcss/node@4.2.4': dependencies: '@jridgewell/remapping': 2.3.5 enhanced-resolve: 5.20.1 @@ -10648,78 +10788,78 @@ snapshots: lightningcss: 1.32.0 magic-string: 0.30.21 source-map-js: 1.2.1 - tailwindcss: 4.2.2 + tailwindcss: 4.2.4 - '@tailwindcss/oxide-android-arm64@4.2.2': + '@tailwindcss/oxide-android-arm64@4.2.4': optional: true - '@tailwindcss/oxide-darwin-arm64@4.2.2': + '@tailwindcss/oxide-darwin-arm64@4.2.4': optional: true - '@tailwindcss/oxide-darwin-x64@4.2.2': + '@tailwindcss/oxide-darwin-x64@4.2.4': optional: true - '@tailwindcss/oxide-freebsd-x64@4.2.2': + '@tailwindcss/oxide-freebsd-x64@4.2.4': optional: true - '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.2': + '@tailwindcss/oxide-linux-arm-gnueabihf@4.2.4': optional: true - '@tailwindcss/oxide-linux-arm64-gnu@4.2.2': + '@tailwindcss/oxide-linux-arm64-gnu@4.2.4': optional: true - '@tailwindcss/oxide-linux-arm64-musl@4.2.2': + '@tailwindcss/oxide-linux-arm64-musl@4.2.4': optional: true - '@tailwindcss/oxide-linux-x64-gnu@4.2.2': + '@tailwindcss/oxide-linux-x64-gnu@4.2.4': optional: true - '@tailwindcss/oxide-linux-x64-musl@4.2.2': + '@tailwindcss/oxide-linux-x64-musl@4.2.4': optional: true - '@tailwindcss/oxide-wasm32-wasi@4.2.2': + '@tailwindcss/oxide-wasm32-wasi@4.2.4': optional: true - '@tailwindcss/oxide-win32-arm64-msvc@4.2.2': + '@tailwindcss/oxide-win32-arm64-msvc@4.2.4': optional: true - '@tailwindcss/oxide-win32-x64-msvc@4.2.2': + '@tailwindcss/oxide-win32-x64-msvc@4.2.4': optional: true - '@tailwindcss/oxide@4.2.2': + '@tailwindcss/oxide@4.2.4': optionalDependencies: - '@tailwindcss/oxide-android-arm64': 4.2.2 - '@tailwindcss/oxide-darwin-arm64': 4.2.2 - '@tailwindcss/oxide-darwin-x64': 4.2.2 - '@tailwindcss/oxide-freebsd-x64': 4.2.2 - '@tailwindcss/oxide-linux-arm-gnueabihf': 4.2.2 - '@tailwindcss/oxide-linux-arm64-gnu': 4.2.2 - '@tailwindcss/oxide-linux-arm64-musl': 4.2.2 - '@tailwindcss/oxide-linux-x64-gnu': 4.2.2 - '@tailwindcss/oxide-linux-x64-musl': 4.2.2 - '@tailwindcss/oxide-wasm32-wasi': 4.2.2 - '@tailwindcss/oxide-win32-arm64-msvc': 4.2.2 - '@tailwindcss/oxide-win32-x64-msvc': 4.2.2 + '@tailwindcss/oxide-android-arm64': 4.2.4 + '@tailwindcss/oxide-darwin-arm64': 4.2.4 + '@tailwindcss/oxide-darwin-x64': 4.2.4 + '@tailwindcss/oxide-freebsd-x64': 4.2.4 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.2.4 + '@tailwindcss/oxide-linux-arm64-gnu': 4.2.4 + '@tailwindcss/oxide-linux-arm64-musl': 4.2.4 + '@tailwindcss/oxide-linux-x64-gnu': 4.2.4 + '@tailwindcss/oxide-linux-x64-musl': 4.2.4 + '@tailwindcss/oxide-wasm32-wasi': 4.2.4 + '@tailwindcss/oxide-win32-arm64-msvc': 4.2.4 + '@tailwindcss/oxide-win32-x64-msvc': 4.2.4 - '@tailwindcss/postcss@4.2.2': + '@tailwindcss/postcss@4.2.4': dependencies: '@alloc/quick-lru': 5.2.0 - '@tailwindcss/node': 4.2.2 - '@tailwindcss/oxide': 4.2.2 - postcss: 8.5.9 - tailwindcss: 4.2.2 + '@tailwindcss/node': 4.2.4 + '@tailwindcss/oxide': 4.2.4 + postcss: 8.5.10 + tailwindcss: 4.2.4 - '@tailwindcss/typography@0.5.19(tailwindcss@4.2.2)': + '@tailwindcss/typography@0.5.19(tailwindcss@4.2.4)': dependencies: postcss-selector-parser: 6.0.10 - tailwindcss: 4.2.2 + tailwindcss: 4.2.4 - '@tailwindcss/vite@4.2.2(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))': + '@tailwindcss/vite@4.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))': dependencies: - '@tailwindcss/node': 4.2.2 - '@tailwindcss/oxide': 4.2.2 - tailwindcss: 4.2.2 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + '@tailwindcss/node': 4.2.4 + '@tailwindcss/oxide': 4.2.4 + tailwindcss: 4.2.4 + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' '@tanstack/devtools-client@0.0.6': dependencies: @@ -10765,26 +10905,26 @@ snapshots: - csstype - utf-8-validate - '@tanstack/eslint-plugin-query@5.99.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@tanstack/eslint-plugin-query@5.99.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@tanstack/form-core@1.29.0': + '@tanstack/form-core@1.29.1': dependencies: '@tanstack/devtools-event-client': 0.4.3 '@tanstack/pacer-lite': 0.1.1 '@tanstack/store': 0.9.3 - '@tanstack/form-devtools@0.2.21(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11)': + '@tanstack/form-devtools@0.2.22(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11)': dependencies: '@tanstack/devtools-ui': 0.5.1(csstype@3.2.3) '@tanstack/devtools-utils': 0.4.0(@types/react@19.2.14)(react@19.2.5)(solid-js@1.9.11) - '@tanstack/form-core': 1.29.0 + '@tanstack/form-core': 1.29.1 clsx: 2.1.1 dayjs: 1.11.20 goober: 2.1.18(csstype@3.2.3) @@ -10798,9 +10938,9 @@ snapshots: '@tanstack/pacer-lite@0.1.1': {} - '@tanstack/query-core@5.99.0': {} + '@tanstack/query-core@5.99.2': {} - '@tanstack/query-devtools@5.99.0': {} + '@tanstack/query-devtools@5.99.2': {} '@tanstack/react-devtools@0.10.2(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(csstype@3.2.3)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': dependencies: @@ -10814,10 +10954,10 @@ snapshots: - csstype - utf-8-validate - '@tanstack/react-form-devtools@0.2.21(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11)': + '@tanstack/react-form-devtools@0.2.22(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11)': dependencies: '@tanstack/devtools-utils': 0.4.0(@types/react@19.2.14)(react@19.2.5)(solid-js@1.9.11) - '@tanstack/form-devtools': 0.2.21(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11) + '@tanstack/form-devtools': 0.2.22(@types/react@19.2.14)(csstype@3.2.3)(react@19.2.5)(solid-js@1.9.11) react: 19.2.5 transitivePeerDependencies: - '@types/react' @@ -10826,23 +10966,23 @@ snapshots: - solid-js - vue - '@tanstack/react-form@1.29.0(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': + '@tanstack/react-form@1.29.1(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': dependencies: - '@tanstack/form-core': 1.29.0 + '@tanstack/form-core': 1.29.1 '@tanstack/react-store': 0.9.3(react-dom@19.2.5(react@19.2.5))(react@19.2.5) react: 19.2.5 transitivePeerDependencies: - react-dom - '@tanstack/react-query-devtools@5.99.0(@tanstack/react-query@5.99.0(react@19.2.5))(react@19.2.5)': + '@tanstack/react-query-devtools@5.99.2(@tanstack/react-query@5.99.2(react@19.2.5))(react@19.2.5)': dependencies: - '@tanstack/query-devtools': 5.99.0 - '@tanstack/react-query': 5.99.0(react@19.2.5) + '@tanstack/query-devtools': 5.99.2 + '@tanstack/react-query': 5.99.2(react@19.2.5) react: 19.2.5 - '@tanstack/react-query@5.99.0(react@19.2.5)': + '@tanstack/react-query@5.99.2(react@19.2.5)': dependencies: - '@tanstack/query-core': 5.99.0 + '@tanstack/query-core': 5.99.2 react: 19.2.5 '@tanstack/react-store@0.9.3(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': @@ -10852,15 +10992,15 @@ snapshots: react-dom: 19.2.5(react@19.2.5) use-sync-external-store: 1.6.0(react@19.2.5) - '@tanstack/react-virtual@3.13.23(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': + '@tanstack/react-virtual@3.13.24(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': dependencies: - '@tanstack/virtual-core': 3.13.23 + '@tanstack/virtual-core': 3.14.0 react: 19.2.5 react-dom: 19.2.5(react@19.2.5) '@tanstack/store@0.9.3': {} - '@tanstack/virtual-core@3.13.23': {} + '@tanstack/virtual-core@3.14.0': {} '@teppeis/multimaps@3.0.0': {} @@ -10898,46 +11038,46 @@ snapshots: dependencies: '@testing-library/dom': 10.4.1 - '@tsslint/cli@3.0.3(@tsslint/compat-eslint@3.0.3(jiti@2.6.1)(typescript@6.0.2))(typescript@6.0.2)': + '@tsslint/cli@3.0.4(@tsslint/compat-eslint@3.0.4(jiti@2.6.1)(typescript@6.0.3))(typescript@6.0.3)': dependencies: '@clack/prompts': 0.8.2 - '@tsslint/config': 3.0.3(@tsslint/compat-eslint@3.0.3(jiti@2.6.1)(typescript@6.0.2))(typescript@6.0.2) - '@tsslint/core': 3.0.3 + '@tsslint/config': 3.0.4(@tsslint/compat-eslint@3.0.4(jiti@2.6.1)(typescript@6.0.3))(typescript@6.0.3) + '@tsslint/core': 3.0.4 '@volar/language-core': 2.4.28 '@volar/language-hub': 0.0.1 '@volar/typescript': 2.4.28 minimatch: 10.2.4 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - '@tsslint/compat-eslint' - tsl - '@tsslint/compat-eslint@3.0.3(jiti@2.6.1)(typescript@6.0.2)': + '@tsslint/compat-eslint@3.0.4(jiti@2.6.1)(typescript@6.0.3)': dependencies: - '@tsslint/types': 3.0.3 - '@typescript-eslint/parser': 8.58.2(eslint@9.27.0(jiti@2.6.1))(typescript@6.0.2) + '@tsslint/types': 3.0.4 + '@typescript-eslint/parser': 8.59.0(eslint@9.27.0(jiti@2.6.1))(typescript@6.0.3) eslint: 9.27.0(jiti@2.6.1) transitivePeerDependencies: - jiti - supports-color - typescript - '@tsslint/config@3.0.3(@tsslint/compat-eslint@3.0.3(jiti@2.6.1)(typescript@6.0.2))(typescript@6.0.2)': + '@tsslint/config@3.0.4(@tsslint/compat-eslint@3.0.4(jiti@2.6.1)(typescript@6.0.3))(typescript@6.0.3)': dependencies: - '@tsslint/types': 3.0.3 + '@tsslint/types': 3.0.4 minimatch: 10.2.4 - ts-api-utils: 2.5.0(typescript@6.0.2) + ts-api-utils: 2.5.0(typescript@6.0.3) optionalDependencies: - '@tsslint/compat-eslint': 3.0.3(jiti@2.6.1)(typescript@6.0.2) + '@tsslint/compat-eslint': 3.0.4(jiti@2.6.1)(typescript@6.0.3) transitivePeerDependencies: - typescript - '@tsslint/core@3.0.3': + '@tsslint/core@3.0.4': dependencies: - '@tsslint/types': 3.0.3 + '@tsslint/types': 3.0.4 minimatch: 10.2.4 - '@tsslint/types@3.0.3': {} + '@tsslint/types@3.0.4': {} '@tybys/wasm-util@0.10.1': dependencies: @@ -11175,52 +11315,89 @@ snapshots: '@types/zen-observable@0.8.3': {} - '@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: '@eslint-community/regexpp': 4.12.2 - '@typescript-eslint/parser': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/parser': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/visitor-keys': 8.58.2 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) ignore: 7.0.5 natural-compare: 1.4.0 - ts-api-utils: 2.5.0(typescript@6.0.2) - typescript: 6.0.2 + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@typescript-eslint/eslint-plugin@8.59.0(@typescript-eslint/parser@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': + dependencies: + '@eslint-community/regexpp': 4.12.2 + '@typescript-eslint/parser': 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@typescript-eslint/scope-manager': 8.59.0 + '@typescript-eslint/type-utils': 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@typescript-eslint/utils': 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@typescript-eslint/visitor-keys': 8.59.0 + eslint: 10.2.1(jiti@2.6.1) + ignore: 7.0.5 + natural-compare: 1.4.0 + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: '@typescript-eslint/scope-manager': 8.58.2 '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.2) + '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.3) '@typescript-eslint/visitor-keys': 8.58.2 debug: 4.4.3(supports-color@8.1.1) - eslint: 10.2.0(jiti@2.6.1) - typescript: 6.0.2 + eslint: 10.2.1(jiti@2.6.1) + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/parser@8.58.2(eslint@9.27.0(jiti@2.6.1))(typescript@6.0.2)': + '@typescript-eslint/parser@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: - '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.2) - '@typescript-eslint/visitor-keys': 8.58.2 + '@typescript-eslint/scope-manager': 8.59.0 + '@typescript-eslint/types': 8.59.0 + '@typescript-eslint/typescript-estree': 8.59.0(typescript@6.0.3) + '@typescript-eslint/visitor-keys': 8.59.0 + debug: 4.4.3(supports-color@8.1.1) + eslint: 10.2.1(jiti@2.6.1) + typescript: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@8.59.0(eslint@9.27.0(jiti@2.6.1))(typescript@6.0.3)': + dependencies: + '@typescript-eslint/scope-manager': 8.59.0 + '@typescript-eslint/types': 8.59.0 + '@typescript-eslint/typescript-estree': 8.59.0(typescript@6.0.3) + '@typescript-eslint/visitor-keys': 8.59.0 debug: 4.4.3(supports-color@8.1.1) eslint: 9.27.0(jiti@2.6.1) - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/project-service@8.58.2(typescript@6.0.2)': + '@typescript-eslint/project-service@8.58.2(typescript@6.0.3)': dependencies: - '@typescript-eslint/tsconfig-utils': 8.58.2(typescript@6.0.2) + '@typescript-eslint/tsconfig-utils': 8.58.2(typescript@6.0.3) '@typescript-eslint/types': 8.58.2 debug: 4.4.3(supports-color@8.1.1) - typescript: 6.0.2 + typescript: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/project-service@8.59.0(typescript@6.0.3)': + dependencies: + '@typescript-eslint/tsconfig-utils': 8.59.0(typescript@6.0.3) + '@typescript-eslint/types': 8.59.0 + debug: 4.4.3(supports-color@8.1.1) + typescript: 6.0.3 transitivePeerDependencies: - supports-color @@ -11229,47 +11406,96 @@ snapshots: '@typescript-eslint/types': 8.58.2 '@typescript-eslint/visitor-keys': 8.58.2 - '@typescript-eslint/tsconfig-utils@8.58.2(typescript@6.0.2)': + '@typescript-eslint/scope-manager@8.59.0': dependencies: - typescript: 6.0.2 + '@typescript-eslint/types': 8.59.0 + '@typescript-eslint/visitor-keys': 8.59.0 - '@typescript-eslint/type-utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@typescript-eslint/tsconfig-utils@8.58.2(typescript@6.0.3)': + dependencies: + typescript: 6.0.3 + + '@typescript-eslint/tsconfig-utils@8.59.0(typescript@6.0.3)': + dependencies: + typescript: 6.0.3 + + '@typescript-eslint/type-utils@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': dependencies: '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.2) - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.3) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) debug: 4.4.3(supports-color@8.1.1) - eslint: 10.2.0(jiti@2.6.1) - ts-api-utils: 2.5.0(typescript@6.0.2) - typescript: 6.0.2 + eslint: 10.2.1(jiti@2.6.1) + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/type-utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': + dependencies: + '@typescript-eslint/types': 8.59.0 + '@typescript-eslint/typescript-estree': 8.59.0(typescript@6.0.3) + '@typescript-eslint/utils': 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + debug: 4.4.3(supports-color@8.1.1) + eslint: 10.2.1(jiti@2.6.1) + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 transitivePeerDependencies: - supports-color '@typescript-eslint/types@8.58.2': {} - '@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.2)': + '@typescript-eslint/types@8.59.0': {} + + '@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.3)': dependencies: - '@typescript-eslint/project-service': 8.58.2(typescript@6.0.2) - '@typescript-eslint/tsconfig-utils': 8.58.2(typescript@6.0.2) + '@typescript-eslint/project-service': 8.58.2(typescript@6.0.3) + '@typescript-eslint/tsconfig-utils': 8.58.2(typescript@6.0.3) '@typescript-eslint/types': 8.58.2 '@typescript-eslint/visitor-keys': 8.58.2 debug: 4.4.3(supports-color@8.1.1) minimatch: 10.2.4 semver: 7.7.4 tinyglobby: 0.2.15 - ts-api-utils: 2.5.0(typescript@6.0.2) - typescript: 6.0.2 + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 transitivePeerDependencies: - supports-color - '@typescript-eslint/utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2)': + '@typescript-eslint/typescript-estree@8.59.0(typescript@6.0.3)': dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@typescript-eslint/project-service': 8.59.0(typescript@6.0.3) + '@typescript-eslint/tsconfig-utils': 8.59.0(typescript@6.0.3) + '@typescript-eslint/types': 8.59.0 + '@typescript-eslint/visitor-keys': 8.59.0 + debug: 4.4.3(supports-color@8.1.1) + minimatch: 10.2.4 + semver: 7.7.4 + tinyglobby: 0.2.16 + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) '@typescript-eslint/scope-manager': 8.58.2 '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) - typescript: 6.0.2 + '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) + typescript: 6.0.3 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3)': + dependencies: + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) + '@typescript-eslint/scope-manager': 8.59.0 + '@typescript-eslint/types': 8.59.0 + '@typescript-eslint/typescript-estree': 8.59.0(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) + typescript: 6.0.3 transitivePeerDependencies: - supports-color @@ -11278,36 +11504,41 @@ snapshots: '@typescript-eslint/types': 8.58.2 eslint-visitor-keys: 5.0.1 - '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260413.1': + '@typescript-eslint/visitor-keys@8.59.0': + dependencies: + '@typescript-eslint/types': 8.59.0 + eslint-visitor-keys: 5.0.1 + + '@typescript/native-preview-darwin-arm64@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview-darwin-x64@7.0.0-dev.20260413.1': + '@typescript/native-preview-darwin-x64@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview-linux-arm64@7.0.0-dev.20260413.1': + '@typescript/native-preview-linux-arm64@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview-linux-arm@7.0.0-dev.20260413.1': + '@typescript/native-preview-linux-arm@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview-linux-x64@7.0.0-dev.20260413.1': + '@typescript/native-preview-linux-x64@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview-win32-arm64@7.0.0-dev.20260413.1': + '@typescript/native-preview-win32-arm64@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview-win32-x64@7.0.0-dev.20260413.1': + '@typescript/native-preview-win32-x64@7.0.0-dev.20260422.1': optional: true - '@typescript/native-preview@7.0.0-dev.20260413.1': + '@typescript/native-preview@7.0.0-dev.20260422.1': optionalDependencies: - '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260413.1 - '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260413.1 - '@typescript/native-preview-linux-arm': 7.0.0-dev.20260413.1 - '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260413.1 - '@typescript/native-preview-linux-x64': 7.0.0-dev.20260413.1 - '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260413.1 - '@typescript/native-preview-win32-x64': 7.0.0-dev.20260413.1 + '@typescript/native-preview-darwin-arm64': 7.0.0-dev.20260422.1 + '@typescript/native-preview-darwin-x64': 7.0.0-dev.20260422.1 + '@typescript/native-preview-linux-arm': 7.0.0-dev.20260422.1 + '@typescript/native-preview-linux-arm64': 7.0.0-dev.20260422.1 + '@typescript/native-preview-linux-x64': 7.0.0-dev.20260422.1 + '@typescript/native-preview-win32-arm64': 7.0.0-dev.20260422.1 + '@typescript/native-preview-win32-x64': 7.0.0-dev.20260422.1 '@ungap/structured-clone@1.3.0': {} @@ -11315,56 +11546,56 @@ snapshots: dependencies: unpic: 4.2.2 - '@unpic/react@1.0.2(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': + '@unpic/react@1.0.2(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)': dependencies: '@unpic/core': 1.0.3 react: 19.2.5 react-dom: 19.2.5(react@19.2.5) optionalDependencies: - next: 16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + next: 16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@upsetjs/venn.js@2.0.0': optionalDependencies: d3-selection: 3.0.0 d3-transition: 3.0.1(d3-selection@3.0.0) - '@valibot/to-json-schema@1.6.0(valibot@1.3.1(typescript@6.0.2))': + '@valibot/to-json-schema@1.6.0(valibot@1.3.1(typescript@6.0.3))': dependencies: - valibot: 1.3.1(typescript@6.0.2) + valibot: 1.3.1(typescript@6.0.3) '@vercel/og@0.8.6': dependencies: '@resvg/resvg-wasm': 2.4.0 satori: 0.16.0 - '@vitejs/devtools-kit@0.1.11(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2)(ws@8.20.0)': + '@vitejs/devtools-kit@0.1.11(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3)(ws@8.20.0)': dependencies: - '@vitejs/devtools-rpc': 0.1.11(typescript@6.0.2)(ws@8.20.0) + '@vitejs/devtools-rpc': 0.1.11(typescript@6.0.3)(ws@8.20.0) birpc: 4.0.0 ohash: 2.0.11 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - typescript - ws - '@vitejs/devtools-rpc@0.1.11(typescript@6.0.2)(ws@8.20.0)': + '@vitejs/devtools-rpc@0.1.11(typescript@6.0.3)(ws@8.20.0)': dependencies: birpc: 4.0.0 ohash: 2.0.11 p-limit: 7.3.0 structured-clone-es: 2.0.0 - valibot: 1.3.1(typescript@6.0.2) + valibot: 1.3.1(typescript@6.0.3) optionalDependencies: ws: 8.20.0 transitivePeerDependencies: - typescript - '@vitejs/plugin-react@6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))': + '@vitejs/plugin-react@6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))': dependencies: '@rolldown/pluginutils': 1.0.0-rc.7 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' - '@vitejs/plugin-rsc@0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5)': + '@vitejs/plugin-rsc@0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5)': dependencies: '@rolldown/pluginutils': 1.0.0-rc.15 es-module-lexer: 2.0.0 @@ -11375,15 +11606,15 @@ snapshots: srvx: 0.11.15 strip-literal: 3.1.0 turbo-stream: 3.2.0 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' - vitefu: 1.1.3(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' + vitefu: 1.1.3(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) optionalDependencies: react-server-dom-webpack: 19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5) - '@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)': + '@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)': dependencies: '@bcoe/v8-coverage': 1.0.2 - '@vitest/utils': 4.1.4 + '@vitest/utils': 4.1.5 ast-v8-to-istanbul: 1.0.0 istanbul-lib-coverage: 3.2.2 istanbul-lib-report: 3.0.1 @@ -11392,7 +11623,7 @@ snapshots: obug: 2.1.1 std-env: 4.0.0 tinyrainbow: 3.1.0 - vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - '@arethetypeswrong/core' - '@edge-runtime/vm' @@ -11422,15 +11653,15 @@ snapshots: - vite - yaml - '@vitest/eslint-plugin@1.6.15(@types/node@25.6.0)(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(eslint@10.2.0(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)': + '@vitest/eslint-plugin@1.6.15(@types/node@25.6.0)(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(eslint@10.2.1(jiti@2.6.1))(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)': dependencies: - '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) - vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + '@typescript-eslint/scope-manager': 8.59.0 + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) + vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - typescript: 6.0.2 + '@typescript-eslint/eslint-plugin': 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + typescript: 6.0.3 transitivePeerDependencies: - '@arethetypeswrong/core' - '@edge-runtime/vm' @@ -11473,7 +11704,7 @@ snapshots: dependencies: tinyrainbow: 2.0.0 - '@vitest/pretty-format@4.1.4': + '@vitest/pretty-format@4.1.5': dependencies: tinyrainbow: 3.1.0 @@ -11487,13 +11718,13 @@ snapshots: loupe: 3.2.1 tinyrainbow: 2.0.0 - '@vitest/utils@4.1.4': + '@vitest/utils@4.1.5': dependencies: - '@vitest/pretty-format': 4.1.4 + '@vitest/pretty-format': 4.1.5 convert-source-map: 2.0.0 tinyrainbow: 3.1.0 - '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)': + '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)': dependencies: '@oxc-project/runtime': 0.126.0 '@oxc-project/types': 0.126.0 @@ -11505,7 +11736,7 @@ snapshots: fsevents: 2.3.3 jiti: 2.6.1 tsx: 4.21.0 - typescript: 6.0.2 + typescript: 6.0.3 yaml: 2.8.3 '@voidzero-dev/vite-plus-darwin-arm64@0.1.19': @@ -11526,11 +11757,11 @@ snapshots: '@voidzero-dev/vite-plus-linux-x64-musl@0.1.19': optional: true - '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)': + '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)': dependencies: '@standard-schema/spec': 1.1.0 '@types/chai': 5.2.3 - '@voidzero-dev/vite-plus-core': 0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + '@voidzero-dev/vite-plus-core': 0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) es-module-lexer: 1.7.0 obug: 2.1.1 pixelmatch: 7.1.0 @@ -11540,11 +11771,11 @@ snapshots: tinybench: 2.9.0 tinyexec: 1.0.4 tinyglobby: 0.2.16 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' ws: 8.20.0 optionalDependencies: '@types/node': 25.6.0 - '@vitest/coverage-v8': 4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + '@vitest/coverage-v8': 4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) happy-dom: 20.9.0 transitivePeerDependencies: - '@arethetypeswrong/core' @@ -11774,7 +12005,7 @@ snapshots: caniuse-lite@1.0.30001781: {} - canvas@3.2.2: + canvas@3.2.3: dependencies: node-addon-api: 7.1.1 prebuild-install: 7.1.3 @@ -12279,7 +12510,7 @@ snapshots: optionalDependencies: '@types/trusted-types': 2.0.7 - dompurify@3.4.0: + dompurify@3.4.1: optionalDependencies: '@types/trusted-types': 2.0.7 @@ -12373,7 +12604,7 @@ snapshots: es-module-lexer@2.0.0: {} - es-toolkit@1.45.1: {} + es-toolkit@1.46.0: {} esast-util-from-estree@2.0.0: dependencies: @@ -12428,93 +12659,93 @@ snapshots: escape-string-regexp@5.0.0: {} - eslint-compat-utils@0.5.1(eslint@10.2.0(jiti@2.6.1)): + eslint-compat-utils@0.5.1(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) semver: 7.7.4 - eslint-config-flat-gitignore@2.3.0(eslint@10.2.0(jiti@2.6.1)): + eslint-config-flat-gitignore@2.3.0(eslint@10.2.1(jiti@2.6.1)): dependencies: - '@eslint/compat': 2.0.3(eslint@10.2.0(jiti@2.6.1)) - eslint: 10.2.0(jiti@2.6.1) + '@eslint/compat': 2.0.3(eslint@10.2.1(jiti@2.6.1)) + eslint: 10.2.1(jiti@2.6.1) eslint-flat-config-utils@3.1.0: dependencies: '@eslint/config-helpers': 0.5.4 pathe: 2.0.3 - eslint-json-compat-utils@0.2.3(eslint@10.2.0(jiti@2.6.1))(jsonc-eslint-parser@3.1.0): + eslint-json-compat-utils@0.2.3(eslint@10.2.1(jiti@2.6.1))(jsonc-eslint-parser@3.1.0): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) esquery: 1.7.0 jsonc-eslint-parser: 3.1.0 - eslint-markdown@0.6.1(eslint@10.2.0(jiti@2.6.1)): + eslint-markdown@0.6.1(eslint@10.2.1(jiti@2.6.1)): dependencies: '@eslint/markdown': 7.5.1 micromark-util-normalize-identifier: 2.0.1 parse5: 8.0.0 optionalDependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) transitivePeerDependencies: - supports-color - eslint-merge-processors@2.0.0(eslint@10.2.0(jiti@2.6.1)): + eslint-merge-processors@2.0.0(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) - eslint-plugin-antfu@3.2.2(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-antfu@3.2.2(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) - eslint-plugin-better-tailwindcss@4.4.1(eslint@10.2.0(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tailwindcss@4.2.2)(typescript@6.0.2): + eslint-plugin-better-tailwindcss@4.4.1(eslint@10.2.1(jiti@2.6.1))(oxlint@1.60.0(oxlint-tsgolint@0.21.1))(tailwindcss@4.2.4)(typescript@6.0.3): dependencies: '@eslint/css-tree': 4.0.1 - '@valibot/to-json-schema': 1.6.0(valibot@1.3.1(typescript@6.0.2)) + '@valibot/to-json-schema': 1.6.0(valibot@1.3.1(typescript@6.0.3)) enhanced-resolve: 5.20.1 jiti: 2.6.1 synckit: 0.11.12 tailwind-csstree: 0.3.1 - tailwindcss: 4.2.2 + tailwindcss: 4.2.4 tsconfig-paths-webpack-plugin: 4.2.0 - valibot: 1.3.1(typescript@6.0.2) + valibot: 1.3.1(typescript@6.0.3) optionalDependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) oxlint: 1.60.0(oxlint-tsgolint@0.21.1) transitivePeerDependencies: - '@eslint/css' - typescript - eslint-plugin-command@3.5.2(@typescript-eslint/typescript-estree@8.58.2(typescript@6.0.2))(@typescript-eslint/utils@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-command@3.5.2(@typescript-eslint/typescript-estree@8.59.0(typescript@6.0.3))(@typescript-eslint/utils@8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1)): dependencies: '@es-joy/jsdoccomment': 0.84.0 - '@typescript-eslint/typescript-estree': 8.58.2(typescript@6.0.2) - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/typescript-estree': 8.59.0(typescript@6.0.3) + '@typescript-eslint/utils': 8.59.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) - eslint-plugin-depend@1.5.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-depend@1.5.0(eslint@10.2.1(jiti@2.6.1)): dependencies: empathic: 2.0.0 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) module-replacements: 2.11.0 semver: 7.7.4 - eslint-plugin-es-x@7.8.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-es-x@7.8.0(eslint@10.2.1(jiti@2.6.1)): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.2 - eslint: 10.2.0(jiti@2.6.1) - eslint-compat-utils: 0.5.1(eslint@10.2.0(jiti@2.6.1)) + eslint: 10.2.1(jiti@2.6.1) + eslint-compat-utils: 0.5.1(eslint@10.2.1(jiti@2.6.1)) - eslint-plugin-hyoban@0.14.1(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-hyoban@0.14.1(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) - eslint-plugin-import-lite@0.6.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-import-lite@0.6.0(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) - eslint-plugin-jsdoc@62.9.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-jsdoc@62.9.0(eslint@10.2.1(jiti@2.6.1)): dependencies: '@es-joy/jsdoccomment': 0.86.0 '@es-joy/resolve.exports': 1.2.0 @@ -12522,7 +12753,7 @@ snapshots: comment-parser: 1.4.6 debug: 4.4.3(supports-color@8.1.1) escape-string-regexp: 4.0.0 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) espree: 11.2.0 esquery: 1.7.0 html-entities: 2.6.0 @@ -12534,27 +12765,27 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-jsonc@3.1.2(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-jsonc@3.1.2(eslint@10.2.1(jiti@2.6.1)): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) '@eslint/core': 1.2.0 '@eslint/plugin-kit': 0.6.1 '@ota-meshi/ast-token-store': 0.3.0 diff-sequences: 29.6.3 - eslint: 10.2.0(jiti@2.6.1) - eslint-json-compat-utils: 0.2.3(eslint@10.2.0(jiti@2.6.1))(jsonc-eslint-parser@3.1.0) + eslint: 10.2.1(jiti@2.6.1) + eslint-json-compat-utils: 0.2.3(eslint@10.2.1(jiti@2.6.1))(jsonc-eslint-parser@3.1.0) jsonc-eslint-parser: 3.1.0 natural-compare: 1.4.0 synckit: 0.11.12 transitivePeerDependencies: - '@eslint/json' - eslint-plugin-markdown-preferences@0.41.1(@eslint/markdown@8.0.1)(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-markdown-preferences@0.41.1(@eslint/markdown@8.0.1)(eslint@10.2.1(jiti@2.6.1)): dependencies: '@eslint/markdown': 8.0.1 diff-sequences: 29.6.3 emoji-regex-xs: 2.0.1 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) mdast-util-from-markdown: 2.0.3 mdast-util-frontmatter: 2.0.1 mdast-util-gfm: 3.1.0 @@ -12569,44 +12800,44 @@ snapshots: transitivePeerDependencies: - supports-color - eslint-plugin-n@17.24.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-n@17.24.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) enhanced-resolve: 5.20.1 - eslint: 10.2.0(jiti@2.6.1) - eslint-plugin-es-x: 7.8.0(eslint@10.2.0(jiti@2.6.1)) + eslint: 10.2.1(jiti@2.6.1) + eslint-plugin-es-x: 7.8.0(eslint@10.2.1(jiti@2.6.1)) get-tsconfig: 4.13.7 globals: 15.15.0 globrex: 0.1.2 ignore: 5.3.2 semver: 7.7.4 - ts-declaration-location: 1.0.7(typescript@6.0.2) + ts-declaration-location: 1.0.7(typescript@6.0.3) transitivePeerDependencies: - typescript - eslint-plugin-no-barrel-files@1.3.1(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-no-barrel-files@1.3.1(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) transitivePeerDependencies: - supports-color - typescript eslint-plugin-no-only-tests@3.3.0: {} - eslint-plugin-perfectionist@5.8.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-perfectionist@5.8.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) natural-orderby: 5.0.0 transitivePeerDependencies: - supports-color - typescript - eslint-plugin-pnpm@1.6.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-pnpm@1.6.0(eslint@10.2.1(jiti@2.6.1)): dependencies: empathic: 2.0.0 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) jsonc-eslint-parser: 3.1.0 pathe: 2.0.3 pnpm-workspace-yaml: 1.6.0 @@ -12614,150 +12845,150 @@ snapshots: yaml: 2.8.3 yaml-eslint-parser: 2.0.0 - eslint-plugin-react-dom@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-react-dom@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/core': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/var': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/core': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/var': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) compare-versions: 6.1.1 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-naming-convention@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-react-naming-convention@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/core': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/var': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/core': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/var': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) compare-versions: 6.1.1 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) string-ts: 2.3.1 ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-refresh@0.5.2(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-react-refresh@0.5.2(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) - eslint-plugin-react-rsc@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-react-rsc@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/var': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/var': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-web-api@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-react-web-api@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/core': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/var': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/core': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/var': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) birecord: 0.1.1 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - eslint-plugin-react-x@3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2): + eslint-plugin-react-x@3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3): dependencies: - '@eslint-react/ast': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/core': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/shared': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - '@eslint-react/var': 3.0.0(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@eslint-react/ast': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/core': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/shared': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + '@eslint-react/var': 3.0.0(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/scope-manager': 8.58.2 - '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/type-utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) '@typescript-eslint/types': 8.58.2 - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) compare-versions: 6.1.1 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) string-ts: 2.3.1 - ts-api-utils: 2.5.0(typescript@6.0.2) + ts-api-utils: 2.5.0(typescript@6.0.3) ts-pattern: 5.9.0 - typescript: 6.0.2 + typescript: 6.0.3 transitivePeerDependencies: - supports-color - eslint-plugin-regexp@3.1.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-regexp@3.1.0(eslint@10.2.1(jiti@2.6.1)): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.2 comment-parser: 1.4.6 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) jsdoc-type-pratt-parser: 7.2.0 refa: 0.12.1 regexp-ast-analysis: 0.7.1 scslre: 0.3.0 - eslint-plugin-sonarjs@4.0.2(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-sonarjs@4.0.3(eslint@10.2.1(jiti@2.6.1)): dependencies: '@eslint-community/regexpp': 4.12.2 builtin-modules: 3.3.0 bytes: 3.1.2 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) functional-red-black-tree: 1.0.1 globals: 17.5.0 jsx-ast-utils-x: 0.1.0 lodash.merge: 4.6.2 - minimatch: 10.2.4 + minimatch: 10.2.5 scslre: 0.3.0 semver: 7.7.4 - ts-api-utils: 2.5.0(typescript@6.0.2) - typescript: 6.0.2 + ts-api-utils: 2.5.0(typescript@6.0.3) + typescript: 6.0.3 - eslint-plugin-storybook@10.3.5(eslint@10.2.0(jiti@2.6.1))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2): + eslint-plugin-storybook@10.3.5(eslint@10.2.1(jiti@2.6.1))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3): dependencies: - '@typescript-eslint/utils': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) - eslint: 10.2.0(jiti@2.6.1) + '@typescript-eslint/utils': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) + eslint: 10.2.1(jiti@2.6.1) storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) transitivePeerDependencies: - supports-color - typescript - eslint-plugin-toml@1.3.1(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-toml@1.3.1(eslint@10.2.1(jiti@2.6.1)): dependencies: '@eslint/core': 1.2.0 '@eslint/plugin-kit': 0.6.1 '@ota-meshi/ast-token-store': 0.3.0 debug: 4.4.3(supports-color@8.1.1) - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) toml-eslint-parser: 1.0.3 transitivePeerDependencies: - supports-color - eslint-plugin-unicorn@64.0.0(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-unicorn@64.0.0(eslint@10.2.1(jiti@2.6.1)): dependencies: '@babel/helper-validator-identifier': 7.28.5 - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) change-case: 5.4.4 ci-info: 4.4.0 clean-regexp: 1.0.0 core-js-compat: 3.49.0 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) find-up-simple: 1.0.1 globals: 17.5.0 indent-string: 5.0.0 @@ -12769,27 +13000,27 @@ snapshots: semver: 7.7.4 strip-indent: 4.1.1 - eslint-plugin-unused-imports@4.4.1(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-unused-imports@4.4.1(@typescript-eslint/eslint-plugin@8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) optionalDependencies: - '@typescript-eslint/eslint-plugin': 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@typescript-eslint/eslint-plugin': 8.58.2(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) - eslint-plugin-vue@10.8.0(@stylistic/eslint-plugin@5.10.0(eslint@10.2.0(jiti@2.6.1)))(@typescript-eslint/parser@8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2))(eslint@10.2.0(jiti@2.6.1))(vue-eslint-parser@10.4.0(eslint@10.2.0(jiti@2.6.1))): + eslint-plugin-vue@10.8.0(@stylistic/eslint-plugin@5.10.0(eslint@10.2.1(jiti@2.6.1)))(@typescript-eslint/parser@8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3))(eslint@10.2.1(jiti@2.6.1))(vue-eslint-parser@10.4.0(eslint@10.2.1(jiti@2.6.1))): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) - eslint: 10.2.0(jiti@2.6.1) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) + eslint: 10.2.1(jiti@2.6.1) natural-compare: 1.4.0 nth-check: 2.1.1 postcss-selector-parser: 7.1.1 semver: 7.7.4 - vue-eslint-parser: 10.4.0(eslint@10.2.0(jiti@2.6.1)) + vue-eslint-parser: 10.4.0(eslint@10.2.1(jiti@2.6.1)) xml-name-validator: 4.0.0 optionalDependencies: - '@stylistic/eslint-plugin': 5.10.0(eslint@10.2.0(jiti@2.6.1)) - '@typescript-eslint/parser': 8.58.2(eslint@10.2.0(jiti@2.6.1))(typescript@6.0.2) + '@stylistic/eslint-plugin': 5.10.0(eslint@10.2.1(jiti@2.6.1)) + '@typescript-eslint/parser': 8.58.2(eslint@10.2.1(jiti@2.6.1))(typescript@6.0.3) - eslint-plugin-yml@3.3.1(eslint@10.2.0(jiti@2.6.1)): + eslint-plugin-yml@3.3.1(eslint@10.2.1(jiti@2.6.1)): dependencies: '@eslint/core': 1.2.0 '@eslint/plugin-kit': 0.6.1 @@ -12797,15 +13028,15 @@ snapshots: debug: 4.4.3(supports-color@8.1.1) diff-sequences: 29.6.3 escape-string-regexp: 5.0.0 - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) natural-compare: 1.4.0 yaml-eslint-parser: 2.0.0 transitivePeerDependencies: - supports-color - eslint-processor-vue-blocks@2.0.0(eslint@10.2.0(jiti@2.6.1)): + eslint-processor-vue-blocks@2.0.0(eslint@10.2.1(jiti@2.6.1)): dependencies: - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) eslint-scope@8.4.0: dependencies: @@ -12825,14 +13056,14 @@ snapshots: eslint-visitor-keys@5.0.1: {} - eslint@10.2.0(jiti@2.6.1): + eslint@10.2.1(jiti@2.6.1): dependencies: - '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.0(jiti@2.6.1)) + '@eslint-community/eslint-utils': 4.9.1(eslint@10.2.1(jiti@2.6.1)) '@eslint-community/regexpp': 4.12.2 - '@eslint/config-array': 0.23.4 - '@eslint/config-helpers': 0.5.4 - '@eslint/core': 1.2.0 - '@eslint/plugin-kit': 0.7.0 + '@eslint/config-array': 0.23.5 + '@eslint/config-helpers': 0.5.5 + '@eslint/core': 1.2.1 + '@eslint/plugin-kit': 0.7.1 '@humanfs/node': 0.16.7 '@humanwhocodes/module-importer': 1.0.1 '@humanwhocodes/retry': 0.4.3 @@ -13093,6 +13324,10 @@ snapshots: dependencies: resolve-pkg-maps: 1.0.0 + get-tsconfig@4.14.0: + dependencies: + resolve-pkg-maps: 1.0.0 + github-from-package@0.0.0: optional: true @@ -13330,11 +13565,11 @@ snapshots: dependencies: '@babel/runtime': 7.29.2 - i18next@26.0.4(typescript@6.0.2): + i18next@26.0.6(typescript@6.0.3): dependencies: '@babel/runtime': 7.29.2 optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 iconify-import-svg@0.2.0: dependencies: @@ -13518,20 +13753,19 @@ snapshots: khroma@2.1.0: {} - knip@6.4.1(@emnapi/runtime@1.9.1): + knip@6.6.1(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2): dependencies: - '@nodelib/fs.walk': 1.2.8 - fast-glob: 3.3.3 + fdir: 6.5.0(picomatch@4.0.4) formatly: 0.3.0 - get-tsconfig: 4.13.7 + get-tsconfig: 4.14.0 jiti: 2.6.1 minimist: 1.2.8 - oxc-parser: 0.121.0(@emnapi/runtime@1.9.1) - oxc-resolver: 11.19.1(@emnapi/runtime@1.9.1) - picocolors: 1.1.1 + oxc-parser: 0.126.0 + oxc-resolver: 11.19.1(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2) picomatch: 4.0.4 smol-toml: 1.6.1 strip-json-comments: 5.0.3 + tinyglobby: 0.2.16 unbash: 2.2.0 yaml: 2.8.3 zod: 4.3.6 @@ -13545,7 +13779,7 @@ snapshots: kolorist@1.8.0: {} - ky@2.0.0: {} + ky@2.0.2: {} lamejs@1.2.1: dependencies: @@ -13660,7 +13894,7 @@ snapshots: dependencies: js-tokens: 4.0.0 - loro-crdt@1.10.8: {} + loro-crdt@1.11.1: {} loupe@3.2.1: {} @@ -14246,6 +14480,10 @@ snapshots: dependencies: brace-expansion: 5.0.5 + minimatch@10.2.5: + dependencies: + brace-expansion: 5.0.5 + minimatch@3.1.5: dependencies: brace-expansion: 1.1.13 @@ -14308,9 +14546,9 @@ snapshots: react: 19.2.5 react-dom: 19.2.5(react@19.2.5) - next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5): + next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5): dependencies: - '@next/env': 16.2.3 + '@next/env': 16.2.4 '@swc/helpers': 0.5.15 baseline-browser-mapping: 2.10.12 caniuse-lite: 1.0.30001781 @@ -14319,14 +14557,14 @@ snapshots: react-dom: 19.2.5(react@19.2.5) styled-jsx: 5.1.6(@babel/core@7.29.0)(react@19.2.5) optionalDependencies: - '@next/swc-darwin-arm64': 16.2.3 - '@next/swc-darwin-x64': 16.2.3 - '@next/swc-linux-arm64-gnu': 16.2.3 - '@next/swc-linux-arm64-musl': 16.2.3 - '@next/swc-linux-x64-gnu': 16.2.3 - '@next/swc-linux-x64-musl': 16.2.3 - '@next/swc-win32-arm64-msvc': 16.2.3 - '@next/swc-win32-x64-msvc': 16.2.3 + '@next/swc-darwin-arm64': 16.2.4 + '@next/swc-darwin-x64': 16.2.4 + '@next/swc-linux-arm64-gnu': 16.2.4 + '@next/swc-linux-arm64-musl': 16.2.4 + '@next/swc-linux-x64-gnu': 16.2.4 + '@next/swc-linux-x64-musl': 16.2.4 + '@next/swc-win32-arm64-msvc': 16.2.4 + '@next/swc-win32-x64-msvc': 16.2.4 '@playwright/test': 1.59.1 sharp: 0.34.5 transitivePeerDependencies: @@ -14360,12 +14598,12 @@ snapshots: dependencies: boolbase: 1.0.0 - nuqs@2.8.9(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5): + nuqs@2.8.9(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5): dependencies: '@standard-schema/spec': 1.0.0 react: 19.2.5 optionalDependencies: - next: 16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + next: 16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) object-assign@4.1.1: {} @@ -14414,35 +14652,32 @@ snapshots: type-check: 0.4.0 word-wrap: 1.2.5 - oxc-parser@0.121.0(@emnapi/runtime@1.9.1): + oxc-parser@0.126.0: dependencies: - '@oxc-project/types': 0.121.0 + '@oxc-project/types': 0.126.0 optionalDependencies: - '@oxc-parser/binding-android-arm-eabi': 0.121.0 - '@oxc-parser/binding-android-arm64': 0.121.0 - '@oxc-parser/binding-darwin-arm64': 0.121.0 - '@oxc-parser/binding-darwin-x64': 0.121.0 - '@oxc-parser/binding-freebsd-x64': 0.121.0 - '@oxc-parser/binding-linux-arm-gnueabihf': 0.121.0 - '@oxc-parser/binding-linux-arm-musleabihf': 0.121.0 - '@oxc-parser/binding-linux-arm64-gnu': 0.121.0 - '@oxc-parser/binding-linux-arm64-musl': 0.121.0 - '@oxc-parser/binding-linux-ppc64-gnu': 0.121.0 - '@oxc-parser/binding-linux-riscv64-gnu': 0.121.0 - '@oxc-parser/binding-linux-riscv64-musl': 0.121.0 - '@oxc-parser/binding-linux-s390x-gnu': 0.121.0 - '@oxc-parser/binding-linux-x64-gnu': 0.121.0 - '@oxc-parser/binding-linux-x64-musl': 0.121.0 - '@oxc-parser/binding-openharmony-arm64': 0.121.0 - '@oxc-parser/binding-wasm32-wasi': 0.121.0(@emnapi/runtime@1.9.1) - '@oxc-parser/binding-win32-arm64-msvc': 0.121.0 - '@oxc-parser/binding-win32-ia32-msvc': 0.121.0 - '@oxc-parser/binding-win32-x64-msvc': 0.121.0 - transitivePeerDependencies: - - '@emnapi/core' - - '@emnapi/runtime' + '@oxc-parser/binding-android-arm-eabi': 0.126.0 + '@oxc-parser/binding-android-arm64': 0.126.0 + '@oxc-parser/binding-darwin-arm64': 0.126.0 + '@oxc-parser/binding-darwin-x64': 0.126.0 + '@oxc-parser/binding-freebsd-x64': 0.126.0 + '@oxc-parser/binding-linux-arm-gnueabihf': 0.126.0 + '@oxc-parser/binding-linux-arm-musleabihf': 0.126.0 + '@oxc-parser/binding-linux-arm64-gnu': 0.126.0 + '@oxc-parser/binding-linux-arm64-musl': 0.126.0 + '@oxc-parser/binding-linux-ppc64-gnu': 0.126.0 + '@oxc-parser/binding-linux-riscv64-gnu': 0.126.0 + '@oxc-parser/binding-linux-riscv64-musl': 0.126.0 + '@oxc-parser/binding-linux-s390x-gnu': 0.126.0 + '@oxc-parser/binding-linux-x64-gnu': 0.126.0 + '@oxc-parser/binding-linux-x64-musl': 0.126.0 + '@oxc-parser/binding-openharmony-arm64': 0.126.0 + '@oxc-parser/binding-wasm32-wasi': 0.126.0 + '@oxc-parser/binding-win32-arm64-msvc': 0.126.0 + '@oxc-parser/binding-win32-ia32-msvc': 0.126.0 + '@oxc-parser/binding-win32-x64-msvc': 0.126.0 - oxc-resolver@11.19.1(@emnapi/runtime@1.9.1): + oxc-resolver@11.19.1(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2): optionalDependencies: '@oxc-resolver/binding-android-arm-eabi': 11.19.1 '@oxc-resolver/binding-android-arm64': 11.19.1 @@ -14460,7 +14695,7 @@ snapshots: '@oxc-resolver/binding-linux-x64-gnu': 11.19.1 '@oxc-resolver/binding-linux-x64-musl': 11.19.1 '@oxc-resolver/binding-openharmony-arm64': 11.19.1 - '@oxc-resolver/binding-wasm32-wasi': 11.19.1(@emnapi/runtime@1.9.1) + '@oxc-resolver/binding-wasm32-wasi': 11.19.1(@emnapi/core@1.9.2)(@emnapi/runtime@1.9.2) '@oxc-resolver/binding-win32-arm64-msvc': 11.19.1 '@oxc-resolver/binding-win32-ia32-msvc': 11.19.1 '@oxc-resolver/binding-win32-x64-msvc': 11.19.1 @@ -14620,7 +14855,7 @@ snapshots: pdfjs-dist@4.4.168: optionalDependencies: - canvas: 3.2.2 + canvas: 3.2.3 path2d: 0.2.2 pend@1.2.0: {} @@ -14699,6 +14934,12 @@ snapshots: picocolors: 1.1.1 source-map-js: 1.2.1 + postcss@8.5.10: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + postcss@8.5.9: dependencies: nanoid: 3.3.11 @@ -14782,9 +15023,9 @@ snapshots: prop-types: 15.8.1 react: 19.2.5 - react-docgen-typescript@2.4.0(typescript@6.0.2): + react-docgen-typescript@2.4.0(typescript@6.0.3): dependencies: - typescript: 6.0.2 + typescript: 6.0.3 react-docgen@8.0.3: dependencies: @@ -14831,16 +15072,16 @@ snapshots: react: 19.2.5 react-dom: 19.2.5(react@19.2.5) - react-i18next@16.5.8(i18next@26.0.4(typescript@6.0.2))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(typescript@6.0.2): + react-i18next@16.5.8(i18next@26.0.6(typescript@6.0.3))(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(typescript@6.0.3): dependencies: '@babel/runtime': 7.29.2 html-parse-stringify: 3.0.1 - i18next: 26.0.4(typescript@6.0.2) + i18next: 26.0.6(typescript@6.0.3) react: 19.2.5 use-sync-external-store: 1.6.0(react@19.2.5) optionalDependencies: react-dom: 19.2.5(react@19.2.5) - typescript: 6.0.2 + typescript: 6.0.3 react-is@16.13.1: {} @@ -15493,7 +15734,7 @@ snapshots: tailwind-merge@3.5.0: {} - tailwindcss@4.2.2: {} + tailwindcss@4.2.4: {} tapable@2.3.2: {} @@ -15589,24 +15830,24 @@ snapshots: trough@2.2.0: {} - ts-api-utils@2.5.0(typescript@6.0.2): + ts-api-utils@2.5.0(typescript@6.0.3): dependencies: - typescript: 6.0.2 + typescript: 6.0.3 ts-debounce@4.0.0: {} - ts-declaration-location@1.0.7(typescript@6.0.2): + ts-declaration-location@1.0.7(typescript@6.0.3): dependencies: picomatch: 4.0.4 - typescript: 6.0.2 + typescript: 6.0.3 ts-dedent@2.2.0: {} ts-pattern@5.9.0: {} - tsconfck@3.1.6(typescript@6.0.2): + tsconfck@3.1.6(typescript@6.0.3): optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 tsconfig-paths-webpack-plugin@4.2.0: dependencies: @@ -15653,7 +15894,7 @@ snapshots: dependencies: tagged-tag: 1.0.0 - typescript@6.0.2: {} + typescript@6.0.3: {} ufo@1.6.3: {} @@ -15802,9 +16043,9 @@ snapshots: uuid@13.0.0: {} - valibot@1.3.1(typescript@6.0.2): + valibot@1.3.1(typescript@6.0.3): optionalDependencies: - typescript: 6.0.2 + typescript: 6.0.3 validate-npm-package-license@3.0.4: dependencies: @@ -15826,20 +16067,20 @@ snapshots: '@types/unist': 3.0.3 vfile-message: 4.0.3 - vinext@0.0.41(@mdx-js/rollup@3.1.1)(@vitejs/plugin-react@6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)))(@vitejs/plugin-rsc@0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5)(typescript@6.0.2): + vinext@0.0.41(@mdx-js/rollup@3.1.1)(@vitejs/plugin-react@6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)))(@vitejs/plugin-rsc@0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5)(typescript@6.0.3): dependencies: - '@unpic/react': 1.0.2(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + '@unpic/react': 1.0.2(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react-dom@19.2.5(react@19.2.5))(react@19.2.5) '@vercel/og': 0.8.6 - '@vitejs/plugin-react': 6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)) + '@vitejs/plugin-react': 6.0.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)) magic-string: 0.30.21 react: 19.2.5 react-dom: 19.2.5(react@19.2.5) - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' vite-plugin-commonjs: 0.10.4 - vite-tsconfig-paths: 6.1.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2) + vite-tsconfig-paths: 6.1.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3) optionalDependencies: '@mdx-js/rollup': 3.1.1 - '@vitejs/plugin-rsc': 0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5) + '@vitejs/plugin-rsc': 0.5.24(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(react-dom@19.2.5(react@19.2.5))(react-server-dom-webpack@19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(react@19.2.5) react-server-dom-webpack: 19.2.5(react-dom@19.2.5(react@19.2.5))(react@19.2.5) transitivePeerDependencies: - next @@ -15859,9 +16100,9 @@ snapshots: fast-glob: 3.3.3 magic-string: 0.30.21 - vite-plugin-inspect@12.0.0-beta.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2)(ws@8.20.0): + vite-plugin-inspect@12.0.0-beta.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3)(ws@8.20.0): dependencies: - '@vitejs/devtools-kit': 0.1.11(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2)(ws@8.20.0) + '@vitejs/devtools-kit': 0.1.11(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3)(ws@8.20.0) ansis: 4.2.0 error-stack-parser-es: 1.0.5 obug: 2.1.1 @@ -15870,31 +16111,31 @@ snapshots: perfect-debounce: 2.1.0 sirv: 3.0.2 unplugin-utils: 0.3.1 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - typescript - ws - vite-plugin-storybook-nextjs@3.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(next@16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.2): + vite-plugin-storybook-nextjs@3.2.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(next@16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(storybook@10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5))(typescript@6.0.3): dependencies: '@next/env': 16.0.0 image-size: 2.0.2 magic-string: 0.30.21 module-alias: 2.3.4 - next: 16.2.3(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) + next: 16.2.4(@babel/core@7.29.0)(@playwright/test@1.59.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) storybook: 10.3.5(@testing-library/dom@10.4.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5) ts-dedent: 2.2.0 - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' - vite-tsconfig-paths: 5.1.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2) + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' + vite-tsconfig-paths: 5.1.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3) transitivePeerDependencies: - supports-color - typescript - vite-plus@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3): + vite-plus@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3): dependencies: '@oxc-project/types': 0.126.0 - '@voidzero-dev/vite-plus-core': 0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) - '@voidzero-dev/vite-plus-test': 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3) + '@voidzero-dev/vite-plus-core': 0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) + '@voidzero-dev/vite-plus-test': 0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3) oxfmt: 0.45.0 oxlint: 1.60.0(oxlint-tsgolint@0.21.1) oxlint-tsgolint: 0.21.1 @@ -15937,36 +16178,36 @@ snapshots: - vite - yaml - vite-tsconfig-paths@5.1.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2): + vite-tsconfig-paths@5.1.4(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3): dependencies: debug: 4.4.3(supports-color@8.1.1) globrex: 0.1.2 - tsconfck: 3.1.6(typescript@6.0.2) + tsconfck: 3.1.6(typescript@6.0.3) optionalDependencies: - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - supports-color - typescript - vite-tsconfig-paths@6.1.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(typescript@6.0.2): + vite-tsconfig-paths@6.1.1(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(typescript@6.0.3): dependencies: debug: 4.4.3(supports-color@8.1.1) globrex: 0.1.2 - tsconfck: 3.1.6(typescript@6.0.2) - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + tsconfck: 3.1.6(typescript@6.0.3) + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' transitivePeerDependencies: - supports-color - typescript - vitefu@1.1.3(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)): + vitefu@1.1.3(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)): optionalDependencies: - vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vite: '@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' - vitest-browser-react@2.2.0(@types/node@25.6.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3): + vitest-browser-react@2.2.0(@types/node@25.6.0)(@types/react-dom@19.2.3(@types/react@19.2.14))(@types/react@19.2.14)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(react-dom@19.2.5(react@19.2.5))(react@19.2.5)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3): dependencies: react: 19.2.5 react-dom: 19.2.5(react@19.2.5) - vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' optionalDependencies: '@types/react': 19.2.14 '@types/react-dom': 19.2.3(@types/react@19.2.14) @@ -16000,11 +16241,11 @@ snapshots: - vite - yaml - vitest-canvas-mock@1.1.4(@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)): + vitest-canvas-mock@1.1.4(@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)): dependencies: cssfontparser: 1.2.1 moo-color: 1.0.3 - vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.4(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.2)(yaml@2.8.3)' + vitest: '@voidzero-dev/vite-plus-test@0.1.19(@types/node@25.6.0)(@vitest/coverage-v8@4.1.5(@types/node@25.6.0)(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(@voidzero-dev/vite-plus-core@0.1.19(@types/node@25.6.0)(esbuild@0.27.2)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3))(esbuild@0.27.2)(happy-dom@20.9.0)(jiti@2.6.1)(tsx@4.21.0)(typescript@6.0.3)(yaml@2.8.3)' void-elements@3.1.0: {} @@ -16025,10 +16266,10 @@ snapshots: vscode-uri@3.1.0: {} - vue-eslint-parser@10.4.0(eslint@10.2.0(jiti@2.6.1)): + vue-eslint-parser@10.4.0(eslint@10.2.1(jiti@2.6.1)): dependencies: debug: 4.4.3(supports-color@8.1.1) - eslint: 10.2.0(jiti@2.6.1) + eslint: 10.2.1(jiti@2.6.1) eslint-scope: 9.1.2 eslint-visitor-keys: 5.0.1 espree: 11.2.0 diff --git a/pnpm-workspace.yaml b/pnpm-workspace.yaml index 7a81789267..0d78fed290 100644 --- a/pnpm-workspace.yaml +++ b/pnpm-workspace.yaml @@ -47,12 +47,12 @@ overrides: yaml@>=2.0.0 <2.8.3: 2.8.3 yauzl@<3.2.1: 3.2.1 catalog: - '@amplitude/analytics-browser': 2.39.0 - '@amplitude/plugin-session-replay-browser': 1.27.7 + '@amplitude/analytics-browser': 2.41.0 + '@amplitude/plugin-session-replay-browser': 1.27.10 '@antfu/eslint-config': 8.2.0 '@base-ui/react': 1.4.1 '@chromatic-com/storybook': 5.1.2 - '@cucumber/cucumber': 12.8.0 + '@cucumber/cucumber': 12.8.1 '@egoist/tailwindcss-icons': 1.9.2 '@emoji-mart/data': 1.2.1 '@eslint-react/eslint-plugin': 3.0.0 @@ -75,8 +75,8 @@ catalog: '@mdx-js/react': 3.1.1 '@mdx-js/rollup': 3.1.1 '@monaco-editor/react': 4.7.0 - '@next/eslint-plugin-next': 16.2.3 - '@next/mdx': 16.2.3 + '@next/eslint-plugin-next': 16.2.4 + '@next/mdx': 16.2.4 '@orpc/client': 1.13.14 '@orpc/contract': 1.13.14 '@orpc/openapi-client': 1.13.14 @@ -84,7 +84,7 @@ catalog: '@playwright/test': 1.59.1 '@remixicon/react': 4.9.0 '@rgrove/parse-xml': 4.2.0 - '@sentry/react': 10.48.0 + '@sentry/react': 10.49.0 '@storybook/addon-docs': 10.3.5 '@storybook/addon-links': 10.3.5 '@storybook/addon-onboarding': 10.3.5 @@ -95,23 +95,23 @@ catalog: '@streamdown/math': 1.0.2 '@svgdotjs/svg.js': 3.2.5 '@t3-oss/env-nextjs': 0.13.11 - '@tailwindcss/postcss': 4.2.2 + '@tailwindcss/postcss': 4.2.4 '@tailwindcss/typography': 0.5.19 - '@tailwindcss/vite': 4.2.2 - '@tanstack/eslint-plugin-query': 5.99.0 + '@tailwindcss/vite': 4.2.4 + '@tanstack/eslint-plugin-query': 5.99.2 '@tanstack/react-devtools': 0.10.2 - '@tanstack/react-form': 1.29.0 - '@tanstack/react-form-devtools': 0.2.21 - '@tanstack/react-query': 5.99.0 - '@tanstack/react-query-devtools': 5.99.0 - '@tanstack/react-virtual': 3.13.23 + '@tanstack/react-form': 1.29.1 + '@tanstack/react-form-devtools': 0.2.22 + '@tanstack/react-query': 5.99.2 + '@tanstack/react-query-devtools': 5.99.2 + '@tanstack/react-virtual': 3.13.24 '@testing-library/dom': 10.4.1 '@testing-library/jest-dom': 6.9.1 '@testing-library/react': 16.3.2 '@testing-library/user-event': 14.6.1 - '@tsslint/cli': 3.0.3 - '@tsslint/compat-eslint': 3.0.3 - '@tsslint/config': 3.0.3 + '@tsslint/cli': 3.0.4 + '@tsslint/compat-eslint': 3.0.4 + '@tsslint/config': 3.0.4 '@types/js-cookie': 3.0.6 '@types/js-yaml': 4.0.9 '@types/negotiator': 0.6.4 @@ -120,12 +120,12 @@ catalog: '@types/react': 19.2.14 '@types/react-dom': 19.2.3 '@types/sortablejs': 1.15.9 - '@typescript-eslint/eslint-plugin': 8.58.2 - '@typescript-eslint/parser': 8.58.2 - '@typescript/native-preview': 7.0.0-dev.20260413.1 + '@typescript-eslint/eslint-plugin': 8.59.0 + '@typescript-eslint/parser': 8.59.0 + '@typescript/native-preview': 7.0.0-dev.20260422.1 '@vitejs/plugin-react': 6.0.1 '@vitejs/plugin-rsc': 0.5.24 - '@vitest/coverage-v8': 4.1.4 + '@vitest/coverage-v8': 4.1.5 abcjs: 6.6.2 agentation: 3.0.2 ahooks: 3.9.7 @@ -138,22 +138,22 @@ catalog: cron-parser: 5.5.0 dayjs: 1.11.20 decimal.js: 10.6.0 - dompurify: 3.4.0 + dompurify: 3.4.1 echarts: 6.0.0 echarts-for-react: 3.0.6 elkjs: 0.11.1 embla-carousel-autoplay: 8.6.0 embla-carousel-react: 8.6.0 emoji-mart: 5.6.0 - es-toolkit: 1.45.1 - eslint: 10.2.0 + es-toolkit: 1.46.0 + eslint: 10.2.1 eslint-markdown: 0.6.1 eslint-plugin-better-tailwindcss: 4.4.1 eslint-plugin-hyoban: 0.14.1 eslint-plugin-markdown-preferences: 0.41.1 eslint-plugin-no-barrel-files: 1.3.1 eslint-plugin-react-refresh: 0.5.2 - eslint-plugin-sonarjs: 4.0.2 + eslint-plugin-sonarjs: 4.0.3 eslint-plugin-storybook: 10.3.5 fast-deep-equal: 3.1.3 happy-dom: 20.9.0 @@ -161,7 +161,7 @@ catalog: hono: 4.12.14 html-entities: 2.6.0 html-to-image: 1.11.13 - i18next: 26.0.4 + i18next: 26.0.6 i18next-resources-to-backend: 1.2.1 iconify-import-svg: 0.2.0 immer: 11.1.4 @@ -171,21 +171,21 @@ catalog: js-yaml: 4.1.1 jsonschema: 1.5.0 katex: 0.16.45 - knip: 6.4.1 - ky: 2.0.0 + knip: 6.6.1 + ky: 2.0.2 lamejs: 1.2.1 lexical: 0.43.0 - loro-crdt: 1.10.8 + loro-crdt: 1.11.1 mermaid: 11.14.0 mime: 4.1.0 mitt: 3.0.1 negotiator: 1.0.0 - next: 16.2.3 + next: 16.2.4 next-themes: 0.4.6 nuqs: 2.8.9 pinyin-pro: 3.28.1 playwright: 1.59.1 - postcss: 8.5.9 + postcss: 8.5.10 qrcode.react: 4.2.0 qs: 6.15.1 react: 19.2.5 @@ -213,10 +213,10 @@ catalog: streamdown: 2.5.0 string-ts: 2.3.1 tailwind-merge: 3.5.0 - tailwindcss: 4.2.2 + tailwindcss: 4.2.4 tldts: 7.0.28 tsx: 4.21.0 - typescript: 6.0.2 + typescript: 6.0.3 uglify-js: 3.19.3 unist-util-visit: 5.1.0 use-context-selector: 2.0.0 diff --git a/sdks/nodejs-client/package.json b/sdks/nodejs-client/package.json index 28ebcb89c2..a8b9426e92 100644 --- a/sdks/nodejs-client/package.json +++ b/sdks/nodejs-client/package.json @@ -48,7 +48,7 @@ "build": "vp pack", "lint": "eslint", "lint:fix": "eslint --fix", - "type-check": "tsc", + "type-check": "tsgo", "test": "vp test", "test:coverage": "vp test --coverage", "publish:check": "./scripts/publish.sh --dry-run", @@ -60,6 +60,7 @@ "@types/node": "catalog:", "@typescript-eslint/eslint-plugin": "catalog:", "@typescript-eslint/parser": "catalog:", + "@typescript/native-preview": "catalog:", "@vitest/coverage-v8": "catalog:", "eslint": "catalog:", "typescript": "catalog:", diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx index b5da0e4ca5..002f8f3bf1 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/long-time-range-picker.tsx @@ -1,14 +1,17 @@ 'use client' import type { FC } from 'react' import type { PeriodParams } from '@/app/components/app/overview/app-chart' -import type { Item } from '@/app/components/base/select' import type { I18nKeysByPrefix } from '@/types/i18n' +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import dayjs from 'dayjs' import * as React from 'react' import { useTranslation } from 'react-i18next' -import { SimpleSelect } from '@/app/components/base/select' type TimePeriodName = I18nKeysByPrefix<'appLog', 'filter.period.'> +type TimePeriodOption = { + value: string + name: string +} type Props = { periodMapping: { [key: string]: { value: number, name: TimePeriodName } } @@ -24,8 +27,18 @@ const LongTimeRangePicker: FC = ({ queryDateFormat, }) => { const { t } = useTranslation() + const items = React.useMemo(() => { + return Object.entries(periodMapping).map(([key, period]) => ({ + value: key, + name: t(`filter.period.${period.name}`, { ns: 'appLog' }), + })) + }, [periodMapping, t]) + const [value, setValue] = React.useState('2') + const selectedItem = React.useMemo(() => { + return items.find(item => item.value === value) ?? null + }, [items, value]) - const handleSelect = React.useCallback((item: Item) => { + const handleSelect = React.useCallback((item: TimePeriodOption) => { const id = item.value const value = periodMapping[id]?.value ?? '-1' const name = item.name || t('filter.period.allTime', { ns: 'appLog' }) @@ -55,13 +68,30 @@ const LongTimeRangePicker: FC = ({ }, [onSelect, periodMapping, queryDateFormat, t]) return ( - ({ value: k, name: t(`filter.period.${v.name}`, { ns: 'appLog' }) }))} - className="mt-0 w-40!" - notClearable={true} - onSelect={handleSelect} - defaultValue="2" - /> + ) } export default React.memo(LongTimeRangePicker) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx index a89b77e9e3..c028a184ed 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/time-range-picker/range-selector.tsx @@ -1,19 +1,22 @@ 'use client' import type { FC } from 'react' import type { PeriodParamsWithTimeRange, TimeRange } from '@/app/components/app/overview/app-chart' -import type { Item } from '@/app/components/base/select' import type { I18nKeysByPrefix } from '@/types/i18n' import { cn } from '@langgenius/dify-ui/cn' -import { RiArrowDownSLine, RiCheckLine } from '@remixicon/react' +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' +import { RiArrowDownSLine } from '@remixicon/react' import dayjs from 'dayjs' import * as React from 'react' -import { useCallback } from 'react' +import { useCallback, useMemo, useState } from 'react' import { useTranslation } from 'react-i18next' -import { SimpleSelect } from '@/app/components/base/select' const today = dayjs() type TimePeriodName = I18nKeysByPrefix<'appLog', 'filter.period.'> +type TimePeriodOption = { + value: number + name: string +} type Props = { isCustomRange: boolean @@ -27,8 +30,19 @@ const RangeSelector: FC = ({ onSelect, }) => { const { t } = useTranslation() + const [open, setOpen] = useState(false) + const items = useMemo(() => { + return ranges.map(range => ({ + ...range, + name: t(`filter.period.${range.name}`, { ns: 'appLog' }), + })) + }, [ranges, t]) + const [value, setValue] = useState('0') + const selectedItem = useMemo(() => { + return items.find(item => String(item.value) === value) ?? null + }, [items, value]) - const handleSelectRange = useCallback((item: Item) => { + const handleSelectRange = useCallback((item: TimePeriodOption) => { const { name, value } = item let period: TimeRange | null = null if (value === 0) { @@ -42,44 +56,38 @@ const RangeSelector: FC = ({ onSelect({ query: period!, name }) }, [onSelect]) - const renderTrigger = useCallback((item: Item | null, isOpen: boolean) => { - return ( -
-
{isCustomRange ? t('filter.period.custom', { ns: 'appLog' }) : item?.name}
- -
- ) - }, [isCustomRange]) - - const renderOption = useCallback(({ item, selected }: { item: Item, selected: boolean }) => { - return ( - <> - {selected && ( - - - )} - {item.name} - - ) - }, []) return ( - ({ ...v, name: t(`filter.period.${v.name}`, { ns: 'appLog' }) }))} - className="mt-0 w-40!" - notClearable={true} - onSelect={handleSelectRange} - defaultValue={0} - wrapperClassName="h-8" - optionWrapClassName="w-[200px] translate-x-[-24px]" - renderTrigger={renderTrigger} - optionClassName="flex items-center py-0 pl-7 pr-2 h-8" - renderOption={renderOption} - /> + ) } export default React.memo(RangeSelector) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/provider-config-modal.spec.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/provider-config-modal.spec.tsx new file mode 100644 index 0000000000..f9e5ea28ee --- /dev/null +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/__tests__/provider-config-modal.spec.tsx @@ -0,0 +1,346 @@ +import type { AliyunConfig, ArizeConfig, DatabricksConfig, LangFuseConfig, LangSmithConfig, MLflowConfig, OpikConfig, PhoenixConfig, TencentConfig, WeaveConfig } from '../type' +import { toast } from '@langgenius/dify-ui/toast' +import { render, screen, waitFor } from '@testing-library/react' +import userEvent from '@testing-library/user-event' +import { addTracingConfig, removeTracingConfig, updateTracingConfig } from '@/service/apps' +import ConfigBtn from '../config-button' +import ProviderConfigModal from '../provider-config-modal' +import { TracingProvider } from '../type' + +vi.mock('@/service/apps', () => ({ + addTracingConfig: vi.fn(), + removeTracingConfig: vi.fn(), + updateTracingConfig: vi.fn(), +})) + +vi.mock('@langgenius/dify-ui/toast', () => ({ + toast: vi.fn(), +})) + +type ProviderPayload = AliyunConfig | ArizeConfig | DatabricksConfig | LangFuseConfig | LangSmithConfig | MLflowConfig | OpikConfig | PhoenixConfig | TencentConfig | WeaveConfig + +const validConfigs = { + [TracingProvider.arize]: { + api_key: 'arize-api-key', + space_id: 'space-id', + project: 'arize-project', + endpoint: 'https://otlp.arize.com', + }, + [TracingProvider.phoenix]: { + api_key: 'phoenix-api-key', + project: 'phoenix-project', + endpoint: 'https://app.phoenix.arize.com', + }, + [TracingProvider.langSmith]: { + api_key: 'langsmith-api-key', + project: 'langsmith-project', + endpoint: 'https://api.smith.langchain.com', + }, + [TracingProvider.langfuse]: { + public_key: 'public-key', + secret_key: 'secret-key', + host: 'https://cloud.langfuse.com', + }, + [TracingProvider.opik]: { + api_key: 'opik-api-key', + project: 'opik-project', + workspace: 'default', + url: 'https://www.comet.com/opik/api/', + }, + [TracingProvider.weave]: { + api_key: 'weave-api-key', + entity: 'wandb-entity', + project: 'weave-project', + endpoint: 'https://trace.wandb.ai/', + host: 'https://api.wandb.ai', + }, + [TracingProvider.aliyun]: { + app_name: 'aliyun-app', + license_key: 'license-key', + endpoint: 'https://tracing.arms.aliyuncs.com', + }, + [TracingProvider.mlflow]: { + tracking_uri: 'http://localhost:5000', + experiment_id: 'experiment-id', + username: 'mlflow-user', + password: 'mlflow-password', + }, + [TracingProvider.databricks]: { + experiment_id: 'experiment-id', + host: 'https://workspace.cloud.databricks.com', + client_id: 'client-id', + client_secret: 'client-secret', + personal_access_token: 'personal-access-token', + }, + [TracingProvider.tencent]: { + token: 'tencent-token', + endpoint: 'https://your-region.cls.tencentcs.com', + service_name: 'dify_app', + }, +} satisfies Record + +const providerFieldLabels = [ + [TracingProvider.arize, ['API Key', 'Space ID', 'app.tracing.configProvider.project', 'Endpoint']], + [TracingProvider.phoenix, ['API Key', 'app.tracing.configProvider.project', 'Endpoint']], + [TracingProvider.langSmith, ['API Key', 'app.tracing.configProvider.project', 'Endpoint']], + [TracingProvider.langfuse, ['app.tracing.configProvider.secretKey', 'app.tracing.configProvider.publicKey', 'Host']], + [TracingProvider.opik, ['API Key', 'app.tracing.configProvider.project', 'Workspace', 'Url']], + [TracingProvider.weave, ['API Key', 'app.tracing.configProvider.project', 'Entity', 'Endpoint', 'Host']], + [TracingProvider.aliyun, ['License Key', 'Endpoint', 'App Name']], + [TracingProvider.mlflow, ['app.tracing.configProvider.trackingUri', 'app.tracing.configProvider.experimentId', 'app.tracing.configProvider.username', 'app.tracing.configProvider.password']], + [TracingProvider.databricks, ['app.tracing.configProvider.experimentId', 'app.tracing.configProvider.databricksHost', 'app.tracing.configProvider.clientId', 'app.tracing.configProvider.clientSecret', 'app.tracing.configProvider.personalAccessToken']], + [TracingProvider.tencent, ['Token', 'Endpoint', 'Service Name']], +] as const + +const invalidConfigCases: Array<{ + provider: TracingProvider + payload: ProviderPayload + missingField: string +}> = [ + { provider: TracingProvider.arize, payload: { ...validConfigs[TracingProvider.arize], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.arize, payload: { ...validConfigs[TracingProvider.arize], space_id: '' }, missingField: 'Space ID' }, + { provider: TracingProvider.arize, payload: { ...validConfigs[TracingProvider.arize], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.phoenix, payload: { ...validConfigs[TracingProvider.phoenix], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.phoenix, payload: { ...validConfigs[TracingProvider.phoenix], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.langSmith, payload: { ...validConfigs[TracingProvider.langSmith], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.langSmith, payload: { ...validConfigs[TracingProvider.langSmith], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.langfuse, payload: { ...validConfigs[TracingProvider.langfuse], secret_key: '' }, missingField: 'app.tracing.configProvider.secretKey' }, + { provider: TracingProvider.langfuse, payload: { ...validConfigs[TracingProvider.langfuse], public_key: '' }, missingField: 'app.tracing.configProvider.publicKey' }, + { provider: TracingProvider.langfuse, payload: { ...validConfigs[TracingProvider.langfuse], host: '' }, missingField: 'Host' }, + { provider: TracingProvider.weave, payload: { ...validConfigs[TracingProvider.weave], api_key: '' }, missingField: 'API Key' }, + { provider: TracingProvider.weave, payload: { ...validConfigs[TracingProvider.weave], project: '' }, missingField: 'app.tracing.configProvider.project' }, + { provider: TracingProvider.aliyun, payload: { ...validConfigs[TracingProvider.aliyun], app_name: '' }, missingField: 'App Name' }, + { provider: TracingProvider.aliyun, payload: { ...validConfigs[TracingProvider.aliyun], license_key: '' }, missingField: 'License Key' }, + { provider: TracingProvider.aliyun, payload: { ...validConfigs[TracingProvider.aliyun], endpoint: '' }, missingField: 'Endpoint' }, + { provider: TracingProvider.mlflow, payload: { ...validConfigs[TracingProvider.mlflow], tracking_uri: '' }, missingField: 'Tracking URI' }, + { provider: TracingProvider.databricks, payload: { ...validConfigs[TracingProvider.databricks], experiment_id: '' }, missingField: 'Experiment ID' }, + { provider: TracingProvider.databricks, payload: { ...validConfigs[TracingProvider.databricks], host: '' }, missingField: 'Host' }, + { provider: TracingProvider.tencent, payload: { ...validConfigs[TracingProvider.tencent], token: '' }, missingField: 'Token' }, + { provider: TracingProvider.tencent, payload: { ...validConfigs[TracingProvider.tencent], endpoint: '' }, missingField: 'Endpoint' }, + { provider: TracingProvider.tencent, payload: { ...validConfigs[TracingProvider.tencent], service_name: '' }, missingField: 'Service Name' }, +] + +const renderConfigButton = () => { + return render( + + + , + ) +} + +const renderProviderConfigModal = ({ + type = TracingProvider.langfuse, + payload, +}: { + type?: TracingProvider + payload?: ProviderPayload | null +} = {}) => { + const callbacks = { + onCancel: vi.fn(), + onSaved: vi.fn(), + onChosen: vi.fn(), + onRemoved: vi.fn(), + } + + render( + , + ) + + return callbacks +} + +describe('ProviderConfigModal', () => { + beforeEach(() => { + vi.clearAllMocks() + vi.mocked(addTracingConfig).mockResolvedValue({ result: 'success' }) + vi.mocked(updateTracingConfig).mockResolvedValue({ result: 'success' }) + vi.mocked(removeTracingConfig).mockResolvedValue({ result: 'success' }) + }) + + describe('Nested Overlay Behavior', () => { + it('should keep the provider config modal open when clicking inside it', async () => { + const user = userEvent.setup() + renderConfigButton() + + await user.click(screen.getByRole('button', { name: 'Open tracing' })) + await waitFor(() => { + expect(screen.getByText('app.tracing.tracing')).toBeInTheDocument() + }) + + const configActions = screen.getAllByText('app.tracing.config') + expect(configActions.length).toBeGreaterThan(0) + await user.click(configActions[0]!) + await waitFor(() => { + expect(screen.getByText('app.tracing.configProvider.titleapp.tracing.langfuse.title')).toBeInTheDocument() + }) + expect(screen.getByRole('dialog')).toBeInTheDocument() + + await user.click(screen.getByPlaceholderText('https://cloud.langfuse.com')) + + expect(screen.getByText('app.tracing.tracing')).toBeInTheDocument() + expect(screen.getByText('app.tracing.configProvider.titleapp.tracing.langfuse.title')).toBeInTheDocument() + }) + }) + + describe('Rendering', () => { + it.each(providerFieldLabels)('should render %s fields when adding a provider', (provider, expectedLabels) => { + renderProviderConfigModal({ type: provider }) + + expect(screen.getByText(`app.tracing.configProvider.titleapp.tracing.${provider}.title`)).toBeInTheDocument() + expectedLabels.forEach((label) => { + expect(screen.getByText(label)).toBeInTheDocument() + }) + expect(screen.getByRole('button', { name: 'common.operation.saveAndEnable' })).toBeInTheDocument() + }) + }) + + describe('Saving', () => { + it('should add and choose the provider when saving a new config', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal({ type: TracingProvider.langfuse }) + const textboxes = screen.getAllByRole('textbox') + + await user.type(textboxes[0]!, 'secret-key') + await user.type(textboxes[1]!, 'public-key') + await user.type(textboxes[2]!, 'https://cloud.langfuse.com') + await user.click(screen.getByRole('button', { name: 'common.operation.saveAndEnable' })) + + await waitFor(() => { + expect(addTracingConfig).toHaveBeenCalledWith({ + appId: 'app-id', + body: { + tracing_provider: TracingProvider.langfuse, + tracing_config: validConfigs[TracingProvider.langfuse], + }, + }) + }) + expect(callbacks.onSaved).toHaveBeenCalledWith(validConfigs[TracingProvider.langfuse]) + expect(callbacks.onChosen).toHaveBeenCalledWith(TracingProvider.langfuse) + expect(toast).toHaveBeenCalledWith('common.api.success', { type: 'success' }) + }) + + it.each(Object.values(TracingProvider))('should update valid %s config in edit mode', async (provider) => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal({ + type: provider, + payload: validConfigs[provider], + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.save' })) + + await waitFor(() => { + expect(updateTracingConfig).toHaveBeenCalledWith({ + appId: 'app-id', + body: { + tracing_provider: provider, + tracing_config: validConfigs[provider], + }, + }) + }) + expect(callbacks.onSaved).toHaveBeenCalledWith(validConfigs[provider]) + expect(callbacks.onChosen).not.toHaveBeenCalled() + }) + + it.each(invalidConfigCases)('should reject $provider config when $missingField is missing', async ({ provider, payload, missingField }) => { + const user = userEvent.setup() + renderProviderConfigModal({ + type: provider, + payload, + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.save' })) + + expect(updateTracingConfig).not.toHaveBeenCalled() + expect(toast).toHaveBeenCalledWith( + expect.stringContaining(missingField), + { type: 'error' }, + ) + }) + }) + + describe('Closing And Removing', () => { + it('should cancel when the cancel button is clicked', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal() + + await user.click(screen.getByRole('button', { name: 'common.operation.cancel' })) + + expect(callbacks.onCancel).toHaveBeenCalledTimes(1) + }) + + it('should cancel when the dialog is closed with Escape', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal() + + await user.keyboard('{Escape}') + + await waitFor(() => { + expect(callbacks.onCancel).toHaveBeenCalledTimes(1) + }) + }) + + it('should remove an existing provider after confirmation', async () => { + const user = userEvent.setup() + const callbacks = renderProviderConfigModal({ + type: TracingProvider.langfuse, + payload: validConfigs[TracingProvider.langfuse], + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.remove' })) + expect(screen.getByText('app.tracing.configProvider.removeConfirmTitle:{"key":"app.tracing.langfuse.title"}')).toBeInTheDocument() + + await user.click(screen.getByRole('button', { name: 'common.operation.confirm' })) + + await waitFor(() => { + expect(removeTracingConfig).toHaveBeenCalledWith({ + appId: 'app-id', + provider: TracingProvider.langfuse, + }) + }) + expect(callbacks.onRemoved).toHaveBeenCalledTimes(1) + expect(toast).toHaveBeenCalledWith('common.api.remove', { type: 'success' }) + }) + + it('should return to the edit dialog when remove confirmation is canceled', async () => { + const user = userEvent.setup() + renderProviderConfigModal({ + type: TracingProvider.langfuse, + payload: validConfigs[TracingProvider.langfuse], + }) + + await user.click(screen.getByRole('button', { name: 'common.operation.remove' })) + await user.click(screen.getByRole('button', { name: 'common.operation.cancel' })) + + expect(removeTracingConfig).not.toHaveBeenCalled() + expect(screen.getByText('app.tracing.configProvider.titleapp.tracing.langfuse.title')).toBeInTheDocument() + }) + }) +}) diff --git a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx index 4f2497ad71..734b39bd41 100644 --- a/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx +++ b/web/app/(commonLayout)/app/(appDetailLayout)/[appId]/overview/tracing/provider-config-modal.tsx @@ -11,6 +11,10 @@ import { AlertDialogTitle, } from '@langgenius/dify-ui/alert-dialog' import { Button } from '@langgenius/dify-ui/button' +import { + Dialog, + DialogContent, +} from '@langgenius/dify-ui/dialog' import { toast } from '@langgenius/dify-ui/toast' import { useBoolean } from 'ahooks' import * as React from 'react' @@ -19,10 +23,6 @@ import { useTranslation } from 'react-i18next' import Divider from '@/app/components/base/divider' import { LinkExternal02 } from '@/app/components/base/icons/src/vender/line/general' import { Lock01 } from '@/app/components/base/icons/src/vender/solid/security' -import { - PortalToFollowElem, - PortalToFollowElemContent, -} from '@/app/components/base/portal-to-follow-elem' import { addTracingConfig, removeTracingConfig, updateTracingConfig } from '@/service/apps' import { docURL } from './config' import Field from './field' @@ -153,7 +153,11 @@ const ProviderConfigModal: FC = ({ return weaveConfigTemplate })()) - const [isShowRemoveConfirm, { + const [isConfigDialogOpen, { + set: setIsConfigDialogOpen, + }] = useBoolean(true) + const [isRemoveDialogOpen, { + set: setIsRemoveDialogOpen, setTrue: showRemoveConfirm, setFalse: hideRemoveConfirm, }] = useBoolean(false) @@ -291,13 +295,24 @@ const ProviderConfigModal: FC = ({ } }, [appId, checkValid, config, isAdd, isEdit, isSaving, onChosen, onSaved, t, type]) + // Defer onCancel to onOpenChangeComplete so the dialog's exit animation + // (scale/opacity transition) can finish before the parent unmounts this modal. + const handleConfigDialogOpenChangeComplete = useCallback((open: boolean) => { + if (!open) + onCancel() + }, [onCancel]) + return ( <> - {!isShowRemoveConfirm + {!isRemoveDialogOpen ? ( - - -
+ + +
@@ -650,7 +665,7 @@ const ProviderConfigModal: FC = ({ )} @@ -683,11 +698,11 @@ const ProviderConfigModal: FC = ({
- - + +
) : ( - !open && hideRemoveConfirm()}> +
diff --git a/web/app/account/oauth/authorize/page.tsx b/web/app/account/oauth/authorize/page.tsx index dd95dc04ba..55666db193 100644 --- a/web/app/account/oauth/authorize/page.tsx +++ b/web/app/account/oauth/authorize/page.tsx @@ -16,9 +16,9 @@ import { useEffect, useRef } from 'react' import { useTranslation } from 'react-i18next' import Loading from '@/app/components/base/loading' import { useLanguage } from '@/app/components/header/account-setting/model-provider-page/hooks' -import { setPostLoginRedirect } from '@/app/signin/utils/post-login-redirect' +import { setOAuthPendingRedirect } from '@/app/signin/utils/post-login-redirect' import { useRouter, useSearchParams } from '@/next/navigation' -import { isLegacyBase401, userProfileQueryOptions } from '@/service/use-common' +import { isLegacyBase401, useLogout, userProfileQueryOptions } from '@/service/use-common' import { useAuthorizeOAuthApp, useOAuthAppInfo } from '@/service/use-oauth' function buildReturnUrl(pathname: string, search: string) { @@ -73,14 +73,17 @@ export default function OAuthAuthorize() { const userProfile = userProfileResp?.profile const { data: authAppInfo, isLoading: isOAuthLoading, isError } = useOAuthAppInfo(client_id, redirect_uri) const { mutateAsync: authorize, isPending: authorizing } = useAuthorizeOAuthApp() + const { mutateAsync: logout } = useLogout() const hasNotifiedRef = useRef(false) const isLoading = isOAuthLoading || isProfileLoading - const onLoginSwitchClick = () => { + const onLoginSwitchClick = async () => { try { - const returnUrl = buildReturnUrl('/account/oauth/authorize', `?client_id=${encodeURIComponent(client_id)}&redirect_uri=${encodeURIComponent(redirect_uri)}`) - setPostLoginRedirect(returnUrl) - router.push('/signin') + const returnUrl = buildReturnUrl('/account/oauth/authorize', `?${searchParams.toString()}`) + setOAuthPendingRedirect(returnUrl) + if (isLoggedIn) + await logout() + router.push(`/signin?redirect_url=${encodeURIComponent(returnUrl)}`) } catch { router.push('/signin') diff --git a/web/app/components/app-initializer.tsx b/web/app/components/app-initializer.tsx index 2c50312590..3d2af1ce61 100644 --- a/web/app/components/app-initializer.tsx +++ b/web/app/components/app-initializer.tsx @@ -85,7 +85,7 @@ export const AppInitializer = ({ return } - const redirectUrl = resolvePostLoginRedirect() + const redirectUrl = resolvePostLoginRedirect(searchParams) if (redirectUrl) { location.replace(redirectUrl) return diff --git a/web/app/components/app-sidebar/index.tsx b/web/app/components/app-sidebar/index.tsx index 716b73abf5..4750d7919c 100644 --- a/web/app/components/app-sidebar/index.tsx +++ b/web/app/components/app-sidebar/index.tsx @@ -17,6 +17,15 @@ import DatasetSidebarDropdown from './dataset-sidebar-dropdown' import NavLink from './nav-link' import ToggleButton from './toggle-button' +const isShortcutFromInputArea = (target: EventTarget | null) => { + if (!(target instanceof HTMLElement)) + return false + + return target.tagName === 'INPUT' + || target.tagName === 'TEXTAREA' + || target.isContentEditable +} + type IAppDetailNavProps = { iconType?: 'app' | 'dataset' navigation: Array<{ @@ -70,6 +79,9 @@ const AppDetailNav = ({ }, [appSidebarExpand, setAppSidebarExpand]) useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.b`, (e) => { + if (isShortcutFromInputArea(e.target)) + return + e.preventDefault() handleToggle() }, { exactMatch: true, useCapture: true }) diff --git a/web/app/components/app/app-publisher/__tests__/index.spec.tsx b/web/app/components/app/app-publisher/__tests__/index.spec.tsx index aa9cda8e34..5df331767b 100644 --- a/web/app/components/app/app-publisher/__tests__/index.spec.tsx +++ b/web/app/components/app/app-publisher/__tests__/index.spec.tsx @@ -80,8 +80,11 @@ vi.mock('@/service/explore', () => ({ fetchInstalledAppList: (...args: unknown[]) => mockFetchInstalledAppList(...args), })) +const mockPublishToCreatorsPlatform = vi.fn() + vi.mock('@/service/apps', () => ({ fetchAppDetailDirect: (...args: unknown[]) => mockFetchAppDetailDirect(...args), + publishToCreatorsPlatform: (...args: unknown[]) => mockPublishToCreatorsPlatform(...args), })) vi.mock('@/service/use-workflow', () => ({ @@ -434,6 +437,76 @@ describe('AppPublisher', () => { }) }) + it('should show marketplace button and open redirect URL on success', async () => { + mockPublishToCreatorsPlatform.mockResolvedValue({ redirect_url: 'https://marketplace.example.com/publish?code=abc' }) + const windowOpenSpy = vi.spyOn(window, 'open').mockImplementation(() => null) + + renderWithSystemFeatures( + , + { systemFeatures: { webapp_auth: { enabled: true }, enable_creators_platform: true } }, + ) + + fireEvent.click(screen.getByText('common.publish')) + fireEvent.click(screen.getByText('common.publishToMarketplace')) + + await waitFor(() => { + expect(mockPublishToCreatorsPlatform).toHaveBeenCalledWith({ appID: 'app-1' }) + expect(windowOpenSpy).toHaveBeenCalledWith('https://marketplace.example.com/publish?code=abc', '_blank') + }) + + windowOpenSpy.mockRestore() + }) + + it('should show toast error when publish to marketplace fails', async () => { + mockPublishToCreatorsPlatform.mockRejectedValue(new Error('network error')) + + renderWithSystemFeatures( + , + { systemFeatures: { webapp_auth: { enabled: true }, enable_creators_platform: true } }, + ) + + fireEvent.click(screen.getByText('common.publish')) + fireEvent.click(screen.getByText('common.publishToMarketplace')) + + await waitFor(() => { + expect(mockToastError).toHaveBeenCalledWith('common.publishToMarketplaceFailed') + }) + }) + + it('should disable marketplace button when not yet published', () => { + renderWithSystemFeatures( + , + { systemFeatures: { webapp_auth: { enabled: true }, enable_creators_platform: true } }, + ) + + fireEvent.click(screen.getByText('common.publish')) + const marketplaceButton = screen.getByText('common.publishToMarketplace').closest('a, button, div[role="button"]') as HTMLElement + expect(marketplaceButton).toBeInTheDocument() + // clicking should not call the API because publishedAt is undefined + fireEvent.click(screen.getByText('common.publishToMarketplace')) + expect(mockPublishToCreatorsPlatform).not.toHaveBeenCalled() + }) + + it('should hide marketplace button when enable_creators_platform is false', () => { + render( + , + ) + + fireEvent.click(screen.getByText('common.publish')) + expect(screen.queryByText('common.publishToMarketplace')).not.toBeInTheDocument() + }) + it('should keep access control open when app detail is unavailable during confirmation', async () => { mockAppDetail = null diff --git a/web/app/components/app/app-publisher/index.tsx b/web/app/components/app/app-publisher/index.tsx index b85e888557..fe6fe5806f 100644 --- a/web/app/components/app/app-publisher/index.tsx +++ b/web/app/components/app/app-publisher/index.tsx @@ -5,6 +5,7 @@ import type { PublishWorkflowParams } from '@/types/workflow' import { Button } from '@langgenius/dify-ui/button' import { Popover, PopoverContent, PopoverTrigger } from '@langgenius/dify-ui/popover' import { toast } from '@langgenius/dify-ui/toast' +import { RiStoreLine } from '@remixicon/react' import { useSuspenseQuery } from '@tanstack/react-query' import { useKeyPress } from 'ahooks' import { @@ -26,7 +27,7 @@ import { useAsyncWindowOpen } from '@/hooks/use-async-window-open' import { useFormatTimeFromNow } from '@/hooks/use-format-time-from-now' import { AccessMode } from '@/models/access-control' import { useAppWhiteListSubjects, useGetUserCanAccessApp } from '@/service/access-control' -import { fetchAppDetailDirect } from '@/service/apps' +import { fetchAppDetailDirect, publishToCreatorsPlatform } from '@/service/apps' import { fetchInstalledAppList } from '@/service/explore' import { systemFeaturesQueryOptions } from '@/service/system-features' import { useInvalidateAppWorkflow } from '@/service/use-workflow' @@ -40,6 +41,7 @@ import { PublisherActionsSection, PublisherSummarySection, } from './sections' +import SuggestedAction from './suggested-action' import { getDisabledFunctionTooltip, getPublisherAppUrl, @@ -100,6 +102,7 @@ const AppPublisher = ({ const [showAppAccessControl, setShowAppAccessControl] = useState(false) const [embeddingModalOpen, setEmbeddingModalOpen] = useState(false) + const [publishingToMarketplace, setPublishingToMarketplace] = useState(false) const workflowStore = useContext(WorkflowContext) const appDetail = useAppStore(state => state.appDetail) @@ -219,6 +222,23 @@ const AppPublisher = ({ } }, [appDetail, setAppDetail]) + const handlePublishToMarketplace = useCallback(async () => { + if (!appDetail?.id || publishingToMarketplace) + return + setPublishingToMarketplace(true) + try { + const res = await publishToCreatorsPlatform({ appID: appDetail.id }) + if (res.redirect_url) + window.open(res.redirect_url, '_blank') + } + catch { + toast.error(t('common.publishToMarketplaceFailed', { ns: 'workflow' })) + } + finally { + setPublishingToMarketplace(false) + } + }, [appDetail?.id, publishingToMarketplace, t]) + useKeyPress(`${getKeyboardKeyCodeBySystem('ctrl')}.shift.p`, (e) => { e.preventDefault() if (publishDisabled || published) @@ -336,6 +356,19 @@ const AppPublisher = ({ workflowToolAvailable={workflowToolAvailable} workflowToolMessage={workflowToolMessage} /> + {systemFeatures.enable_creators_platform && ( +
+ } + disabled={!publishedAt || publishingToMarketplace} + onClick={handlePublishToMarketplace} + > + {publishingToMarketplace + ? t('common.publishingToMarketplace', { ns: 'workflow' }) + : t('common.publishToMarketplace', { ns: 'workflow' })} + +
+ )}
({ - FileUploaderInAttachmentWrapper: ({ onChange }: { onChange: (files: Array>) => void }) => ( - + FileUploaderInAttachmentWrapper: ({ + onChange, + value, + fileConfig, + }: { + onChange: (files?: Array>) => void + value: Array> + fileConfig: Record + }) => ( +
+ {JSON.stringify(value)} + {JSON.stringify(fileConfig)} + + +
), })) @@ -38,12 +53,6 @@ vi.mock('@/app/components/base/checkbox', () => ({ ), })) -vi.mock('@/app/components/base/select', () => ({ - default: ({ onSelect }: { onSelect: (item: { value: string }) => void }) => ( - - ), -})) - vi.mock('@langgenius/dify-ui/select', async (importOriginal) => { const actual = await importOriginal() @@ -52,6 +61,7 @@ vi.mock('@langgenius/dify-ui/select', async (importOriginal) => { Select: ({ value, onValueChange, children }: { value: string, onValueChange: (value: string) => void, children: ReactNode }) => (
+ {children}
), @@ -86,8 +96,8 @@ vi.mock('../../config-select', () => ({ })) vi.mock('../../config-string', () => ({ - default: ({ onChange }: { onChange: (value: number) => void }) => ( - + default: ({ onChange, maxLength }: { onChange: (value: number) => void, maxLength: number }) => ( + ), })) @@ -211,4 +221,150 @@ describe('ConfigModalFormFields', () => { fireEvent.click(screen.getByText('json-editor')) expect(jsonProps.onJSONSchemaChange).toHaveBeenCalledWith('{\n "type": "object"\n}') }) + + it('should update text input metadata and clear empty defaults for string inputs', () => { + const textProps = createBaseProps() + textProps.isStringInput = true + textProps.tempPayload = { + ...textProps.tempPayload, + type: InputVarType.textInput, + default: 'hello', + } + + render() + + const variableInput = screen.getByDisplayValue('question') + + fireEvent.click(screen.getByText('type-selector')) + fireEvent.change(variableInput, { target: { value: 'prompt' } }) + fireEvent.blur(variableInput) + fireEvent.change(screen.getByDisplayValue('Question'), { target: { value: 'Prompt Label' } }) + fireEvent.click(screen.getByText('config-string')) + fireEvent.change(screen.getByDisplayValue('hello'), { target: { value: '' } }) + + expect(textProps.onTypeChange).toHaveBeenCalledWith({ value: InputVarType.select }) + expect(textProps.onVarNameChange).toHaveBeenCalled() + expect(textProps.onVarKeyBlur).toHaveBeenCalled() + expect(textProps.payloadChangeHandlers.label).toHaveBeenCalledWith('Prompt Label') + expect(textProps.payloadChangeHandlers.max_length).toHaveBeenCalledWith(64) + expect(textProps.payloadChangeHandlers.default).toHaveBeenCalledWith(undefined) + }) + + it('should clear select defaults and apply uploader fallback values', () => { + const selectProps = createBaseProps() + selectProps.tempPayload = { ...selectProps.tempPayload, type: InputVarType.select, default: 'alpha' } + selectProps.options = ['alpha', ' ', 'beta'] + render() + + fireEvent.click(screen.getByText('ui-select-empty')) + expect(selectProps.payloadChangeHandlers.default).toHaveBeenCalledWith(undefined) + + const singleFallbackProps = createBaseProps() + singleFallbackProps.tempPayload = { + ...singleFallbackProps.tempPayload, + type: InputVarType.singleFile, + default: undefined, + } + render() + + expect(screen.getAllByTestId('file-uploader-value')[0]).toHaveTextContent('[]') + expect(screen.getAllByTestId('file-uploader-config')[0]).toHaveTextContent('"allowed_file_types":["document"]') + expect(screen.getAllByTestId('file-uploader-config')[0]).toHaveTextContent('"allowed_file_upload_methods":["remote_url"]') + expect(screen.getAllByTestId('file-uploader-config')[0]).toHaveTextContent('"number_limits":1') + fireEvent.click(screen.getAllByTestId('upload-empty-file')[0]!) + expect(singleFallbackProps.payloadChangeHandlers.default).toHaveBeenCalledWith(undefined) + + const multiFallbackProps = createBaseProps() + multiFallbackProps.tempPayload = { + ...multiFallbackProps.tempPayload, + type: InputVarType.multiFiles, + default: undefined, + max_length: undefined, + } + render() + + expect(screen.getAllByTestId('file-uploader-value')[1]).toHaveTextContent('[]') + expect(screen.getAllByTestId('file-uploader-config')[1]).toHaveTextContent('"number_limits":5') + fireEvent.click(screen.getAllByTestId('upload-empty-file')[1]!) + expect(multiFallbackProps.payloadChangeHandlers.default).toHaveBeenCalledWith(undefined) + }) + + it('should clear number defaults and skip rendering the default selector when options are missing', () => { + const numberProps = createBaseProps() + numberProps.tempPayload = { ...numberProps.tempPayload, type: InputVarType.number, default: '9' } + render() + + fireEvent.change(screen.getByDisplayValue('9'), { target: { value: '' } }) + expect(numberProps.payloadChangeHandlers.default).toHaveBeenCalledWith(undefined) + + const selectWithoutOptionsProps = createBaseProps() + selectWithoutOptionsProps.tempPayload = { ...selectWithoutOptionsProps.tempPayload, type: InputVarType.select } + selectWithoutOptionsProps.options = undefined + render() + + expect(screen.getAllByText('config-select')).toHaveLength(1) + expect(screen.queryByText('ui-select:__empty__')).not.toBeInTheDocument() + }) + + it('should preserve existing select and file defaults when present', () => { + const selectProps = createBaseProps() + selectProps.tempPayload = { ...selectProps.tempPayload, type: InputVarType.select, default: undefined } + selectProps.options = ['alpha', 'beta'] + render() + + expect(screen.getByText('ui-select:__empty__')).toBeInTheDocument() + + const existingFile = { fileId: 'existing-file', type: 'local_file', url: 'https://example.com/existing.png' } + const singleFileProps = createBaseProps() + singleFileProps.tempPayload = { + ...singleFileProps.tempPayload, + type: InputVarType.singleFile, + default: existingFile, + } + render() + + expect(screen.getAllByTestId('file-uploader-value')[0]).toHaveTextContent('"fileId":"existing-file"') + + const existingFiles = [ + { fileId: 'file-1', type: 'local_file', url: 'https://example.com/1.png' }, + { fileId: 'file-2', type: 'remote_url', url: 'https://example.com/2.png' }, + ] + const multiFileProps = createBaseProps() + multiFileProps.tempPayload = { + ...multiFileProps.tempPayload, + type: InputVarType.multiFiles, + default: existingFiles, + max_length: 2, + } + render() + + expect(screen.getAllByTestId('file-uploader-value')[1]).toHaveTextContent('"fileId":"file-1"') + expect(screen.getAllByTestId('file-uploader-config')[1]).toHaveTextContent('"number_limits":2') + }) + + it('should render empty fallback values for text, paragraph, and number defaults', () => { + const textProps = createBaseProps() + textProps.isStringInput = true + textProps.tempPayload = { ...textProps.tempPayload, type: InputVarType.textInput, default: undefined } + const textView = render() + + expect(screen.getAllByPlaceholderText('variableConfig.inputPlaceholder')[2]).toHaveValue('') + expect(screen.getByText('config-string')).toHaveAttribute('data-max-length', '256') + textView.unmount() + + const paragraphProps = createBaseProps() + paragraphProps.isStringInput = true + paragraphProps.tempPayload = { ...paragraphProps.tempPayload, type: InputVarType.paragraph, default: undefined } + const paragraphView = render() + + expect(screen.getByText('config-string')).toHaveAttribute('data-max-length', 'Infinity') + expect(paragraphView.container.querySelector('textarea')).toHaveValue('') + paragraphView.unmount() + + const numberProps = createBaseProps() + numberProps.tempPayload = { ...numberProps.tempPayload, type: InputVarType.number, default: undefined } + render() + + expect(screen.getByRole('spinbutton')).toHaveValue(null) + }) }) diff --git a/web/app/components/app/configuration/debug/__tests__/chat-user-input.spec.tsx b/web/app/components/app/configuration/debug/__tests__/chat-user-input.spec.tsx index 39db0da1ec..9fb79076d2 100644 --- a/web/app/components/app/configuration/debug/__tests__/chat-user-input.spec.tsx +++ b/web/app/components/app/configuration/debug/__tests__/chat-user-input.spec.tsx @@ -40,28 +40,49 @@ vi.mock('@/app/components/base/input', () => ({ ), })) -vi.mock('@/app/components/base/select', () => ({ - default: ({ defaultValue, onSelect, items, disabled, className }: { - defaultValue: string - onSelect: (item: { value: string }) => void - items: { name: string, value: string }[] - allowSearch?: boolean +vi.mock('@langgenius/dify-ui/select', async () => { + const React = await import('react') + const SelectContext = React.createContext<{ disabled?: boolean - className?: string - }) => ( - - ), -})) + onValueChange?: (value: string) => void + }>({}) + + return { + Select: ({ children, disabled, onValueChange }: { + children: React.ReactNode + disabled?: boolean + onValueChange?: (value: string) => void + }) => ( + +
{children}
+
+ ), + SelectTrigger: ({ children, className }: { children: React.ReactNode, className?: string }) => { + const context = React.useContext(SelectContext) + return ( +
+ + +
+ ) + }, + SelectContent: ({ children }: { children: React.ReactNode }) =>
{children}
, + SelectItem: ({ children, value }: { children: React.ReactNode, value: string }) => { + const context = React.useContext(SelectContext) + return ( + + ) + }, + SelectItemText: ({ children }: { children: React.ReactNode }) => <>{children}, + SelectItemIndicator: () => null, + } +}) vi.mock('@/app/components/base/textarea', () => ({ default: ({ value, onChange, placeholder, readOnly, className }: { @@ -410,11 +431,24 @@ describe('ChatUserInput', () => { })) render() - fireEvent.change(screen.getByTestId('select-input'), { target: { value: 'B' } }) + fireEvent.click(screen.getByTestId('select-B')) expect(mockSetInputs).toHaveBeenCalledWith({ choice: 'B' }) }) + it('should ignore empty select updates', () => { + mockUseContext.mockReturnValue(createContextValue({ + modelConfig: createModelConfig([ + createPromptVariable({ key: 'choice', name: 'Choice', type: 'select', options: ['A', 'B', 'C'] }), + ]), + })) + + render() + fireEvent.click(screen.getByTestId('select-empty')) + + expect(mockSetInputs).not.toHaveBeenCalled() + }) + it('should call setInputs when number input changes', () => { mockUseContext.mockReturnValue(createContextValue({ modelConfig: createModelConfig([ @@ -443,20 +477,30 @@ describe('ChatUserInput', () => { }) it('should not call setInputs for unknown keys', () => { + const filteredPromptVariables = { + length: 1, + forEach: vi.fn(), + map: (callback: (value: ExtendedPromptVariable, index: number) => unknown) => [ + callback(createPromptVariable({ key: 'name', name: 'Name', type: 'string' }), 0), + ], + } mockUseContext.mockReturnValue(createContextValue({ - modelConfig: createModelConfig([ - createPromptVariable({ key: 'name', name: 'Name', type: 'string' }), - ]), + modelConfig: { + ...createModelConfig(), + configs: { + prompt_template: '', + prompt_variables: { + filter: () => filteredPromptVariables, + } as unknown as PromptVariable[], + }, + }, })) render() - // The component filters by promptVariableObj, so unknown keys won't trigger updates - // This is tested indirectly - only valid keys should trigger setInputs fireEvent.change(screen.getByTestId('input-Name'), { target: { value: 'Valid' } }) - expect(mockSetInputs).toHaveBeenCalledTimes(1) - expect(mockSetInputs).toHaveBeenCalledWith({ name: 'Valid' }) + expect(mockSetInputs).not.toHaveBeenCalled() }) }) @@ -652,7 +696,7 @@ describe('ChatUserInput', () => { render() const select = screen.getByTestId('select-input') expect(select).toBeInTheDocument() - expect(select.children).toHaveLength(0) + expect(screen.queryAllByRole('option')).toHaveLength(0) }) it('should handle select with undefined options', () => { diff --git a/web/app/components/app/configuration/debug/chat-user-input.tsx b/web/app/components/app/configuration/debug/chat-user-input.tsx index b1285b712c..2eff7ac3ca 100644 --- a/web/app/components/app/configuration/debug/chat-user-input.tsx +++ b/web/app/components/app/configuration/debug/chat-user-input.tsx @@ -1,11 +1,11 @@ import type { Inputs } from '@/models/debug' import { cn } from '@langgenius/dify-ui/cn' +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import * as React from 'react' import { useEffect } from 'react' import { useTranslation } from 'react-i18next' import { useContext } from 'use-context-selector' import Input from '@/app/components/base/input' -import Select from '@/app/components/base/select' import Textarea from '@/app/components/base/textarea' import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input' import ConfigContext from '@/context/debug-configuration' @@ -102,13 +102,26 @@ const ChatUserInput = ({ )} {type === 'select' && ( )} {type === 'number' && ( ({ + Button: ({ + children, + onClick, + disabled, + className, + }: { + children: React.ReactNode + onClick?: () => void + disabled?: boolean + className?: string + }) => ( + + ), +})) + vi.mock('@/app/components/app/store', () => ({ useStore: (selector: (state: { setShowAppConfigureFeaturesModal: typeof mockSetShowAppConfigureFeaturesModal }) => unknown) => selector({ setShowAppConfigureFeaturesModal: mockSetShowAppConfigureFeaturesModal, @@ -24,15 +47,51 @@ vi.mock('@/app/components/base/features/new-feature-panel/feature-bar', () => ({ ), })) -vi.mock('@/app/components/base/select', () => ({ - default: ({ onSelect }: { onSelect: (item: { value: string }) => void }) => ( - - ), -})) +vi.mock('@langgenius/dify-ui/select', async () => { + const React = await import('react') + const SelectContext = React.createContext<{ + onValueChange?: (value: string) => void + }>({}) + + return { + Select: ({ children, onValueChange }: { + children: React.ReactNode + onValueChange?: (value: string) => void + }) => ( + +
{children}
+
+ ), + SelectTrigger: ({ children }: { children: React.ReactNode }) => { + const context = React.useContext(SelectContext) + return ( +
+ + +
+ ) + }, + SelectContent: ({ children }: { children: React.ReactNode }) =>
{children}
, + SelectItem: ({ children, value }: { children: React.ReactNode, value: string }) => { + const context = React.useContext(SelectContext) + return ( + + ) + }, + SelectItemText: ({ children }: { children: React.ReactNode }) => <>{children}, + SelectItemIndicator: () => null, + } +}) vi.mock('@/app/components/workflow/nodes/_base/components/before-run-form/bool-input', () => ({ - default: ({ onChange }: { onChange: (value: boolean) => void }) => ( - + default: ({ name, onChange }: { name: string, onChange: (value: boolean) => void }) => ( + ), })) @@ -121,7 +180,7 @@ describe('PromptValuePanel', () => { }) const runButton = screen.getByRole('button', { name: 'appDebug.inputs.run' }) - expect(runButton).not.toBeDisabled() + expect(runButton).toHaveAttribute('data-disabled', 'false') fireEvent.click(runButton) await waitFor(() => expect(mockOnSend).toHaveBeenCalledTimes(1)) }) @@ -137,9 +196,22 @@ describe('PromptValuePanel', () => { }) const runButton = screen.getByRole('button', { name: 'appDebug.inputs.run' }) - expect(runButton).toBeDisabled() - fireEvent.click(runButton) - expect(mockOnSend).not.toHaveBeenCalled() + expect(runButton).toHaveAttribute('data-disabled', 'true') + }) + + it('invokes the tooltip-branch run handler when the click callback is triggered', () => { + renderPanel({ + context: { + mode: AppModeEnum.CHAT, + }, + props: { + appType: AppModeEnum.CHAT, + }, + }) + + fireEvent.click(screen.getByRole('button', { name: 'appDebug.inputs.run' })) + + expect(mockOnSend).toHaveBeenCalledTimes(1) }) it('hydrates default values, supports advanced prompt gating, and toggles the feature panel', () => { @@ -163,12 +235,33 @@ describe('PromptValuePanel', () => { }) expect(mockSetInputs).toHaveBeenCalledWith({ textVar: 'default text' }) - expect(screen.getByRole('button', { name: 'appDebug.inputs.run' })).toBeDisabled() + expect(screen.getByRole('button', { name: 'appDebug.inputs.run' })).toHaveAttribute('data-disabled', 'true') fireEvent.click(screen.getByText('feature bar')) expect(mockSetShowAppConfigureFeaturesModal).toHaveBeenCalled() }) + it('disables run for advanced completion mode when the completion prompt is empty', () => { + renderPanel({ + context: { + isAdvancedMode: true, + modelModeType: ModelModeType.completion, + completionPromptConfig: { + prompt: { text: '' }, + conversation_histories_role: { user_prefix: 'user', assistant_prefix: 'assistant' }, + }, + modelConfig: { + configs: { + prompt_template: '', + prompt_variables: [], + }, + }, + }, + }) + + expect(screen.getByRole('button', { name: 'appDebug.inputs.run' })).toHaveAttribute('data-disabled', 'true') + }) + it('renders paragraph, select, number, checkbox, and vision inputs', () => { const onVisionFilesChange = vi.fn() renderPanel({ @@ -203,13 +296,13 @@ describe('PromptValuePanel', () => { }) fireEvent.change(screen.getByPlaceholderText('Paragraph Var'), { target: { value: 'updated paragraph' } }) - fireEvent.click(screen.getByText('select-input')) + fireEvent.click(screen.getByText('b')) fireEvent.change(screen.getByDisplayValue('1'), { target: { value: '2' } }) fireEvent.click(screen.getByText('bool-input')) fireEvent.click(screen.getByText('image-uploader')) expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ paragraphVar: 'updated paragraph' })) - expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ selectVar: 'selected-option' })) + expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ selectVar: 'b' })) expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ numberVar: '2' })) expect(mockSetInputs).toHaveBeenCalledWith(expect.objectContaining({ boolVar: true })) expect(onVisionFilesChange).toHaveBeenCalledWith([ @@ -222,6 +315,127 @@ describe('PromptValuePanel', () => { ]) }) + it('ignores empty select values when choosing prompt options', () => { + renderPanel({ + context: { + modelConfig: { + configs: { + prompt_template: 'prompt template', + prompt_variables: [ + { key: 'selectVar', name: 'Select Var', type: 'select', options: ['a', 'b'], required: false }, + ], + }, + }, + }, + props: { + inputs: { + selectVar: 'a', + }, + }, + }) + + fireEvent.click(screen.getByTestId('select-empty')) + + expect(mockSetInputs).not.toHaveBeenCalled() + }) + + it('ignores updates when the rendered field is not tracked in the prompt variable lookup', () => { + const filteredPromptVariables = { + length: 1, + forEach: vi.fn(), + map: (callback: (value: { key: string, name: string, type: string, required: boolean }, index: number) => unknown) => [ + callback({ key: 'textVar', name: 'Text Var', type: 'string', required: true }, 0), + ], + } + + renderPanel({ + context: { + modelConfig: { + configs: { + prompt_template: 'prompt template', + prompt_variables: { + filter: () => filteredPromptVariables, + }, + }, + }, + }, + props: { + inputs: { textVar: '' }, + }, + }) + + fireEvent.change(screen.getByPlaceholderText('Text Var'), { target: { value: 'ignored' } }) + + expect(mockSetInputs).not.toHaveBeenCalled() + }) + + it('renders empty select and number placeholders when no value is provided', () => { + renderPanel({ + context: { + modelConfig: { + configs: { + prompt_template: 'prompt template', + prompt_variables: [ + { key: 'selectVar', name: 'Select Var', type: 'select', required: false }, + { key: 'numberVar', name: 'Number Var', type: 'number', required: true }, + ], + }, + }, + }, + props: { + inputs: { + selectVar: '', + numberVar: '', + }, + }, + }) + + expect(screen.getByText('common.placeholder.select')).toBeInTheDocument() + expect(screen.getByPlaceholderText('Number Var')).toHaveValue(null) + expect(screen.queryAllByRole('option')).toHaveLength(0) + }) + + it('falls back to the checkbox key when the label is missing from the rendered collection', () => { + const filteredPromptVariables = { + length: 1, + forEach: vi.fn(), + map: (callback: (value: { key: string, name: string, type: string, required: boolean }, index: number) => unknown) => [ + callback({ key: 'boolVar', name: '', type: 'checkbox', required: false }, 0), + ], + } + + renderPanel({ + context: { + modelConfig: { + configs: { + prompt_template: 'prompt template', + prompt_variables: { + filter: () => filteredPromptVariables, + }, + }, + }, + }, + props: { + inputs: { + boolVar: false, + }, + }, + }) + + expect(screen.getByTestId('bool-input-boolVar')).toBeInTheDocument() + }) + + it('marks actions as disabled when readonly even if the prompt is runnable', () => { + renderPanel({ + context: { + readonly: true, + }, + }) + + expect(screen.getByRole('button', { name: 'common.operation.clear' })).toHaveAttribute('data-disabled', 'true') + expect(screen.getByRole('button', { name: 'appDebug.inputs.run' })).toHaveAttribute('data-disabled', 'true') + }) + it('collapses the user input panel and hides the clear and run actions', () => { renderPanel() diff --git a/web/app/components/app/configuration/prompt-value-panel/index.tsx b/web/app/components/app/configuration/prompt-value-panel/index.tsx index 94bc48da29..c3ba69bf34 100644 --- a/web/app/components/app/configuration/prompt-value-panel/index.tsx +++ b/web/app/components/app/configuration/prompt-value-panel/index.tsx @@ -4,6 +4,7 @@ import type { Inputs } from '@/models/debug' import type { VisionFile, VisionSettings } from '@/types/app' import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import { RiArrowDownSLine, RiArrowRightSLine, @@ -17,7 +18,6 @@ import { useStore as useAppStore } from '@/app/components/app/store' import FeatureBar from '@/app/components/base/features/new-feature-panel/feature-bar' import TextGenerationImageUploader from '@/app/components/base/image-uploader/text-generation-image-uploader' import Input from '@/app/components/base/input' -import Select from '@/app/components/base/select' import Textarea from '@/app/components/base/textarea' import Tooltip from '@/app/components/base/tooltip' import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input' @@ -156,14 +156,26 @@ const PromptValuePanel: FC = ({ )} {type === 'select' && ( )} {type === 'number' && ( { />, ) - expect(screen.getByText('importFromDSL'))!.toBeInTheDocument() + expect(screen.getByText('importApp'))!.toBeInTheDocument() await waitFor(() => { expect(screen.getByText('demo.yml'))!.toBeInTheDocument() @@ -161,7 +161,7 @@ describe('CreateFromDSLModal', () => { }) expect(screen.getByPlaceholderText('importFromDSLUrlPlaceholder'))!.toBeInTheDocument() - const closeTrigger = screen.getByText('importFromDSL').parentElement?.querySelector('.cursor-pointer.items-center') as HTMLElement + const closeTrigger = screen.getByText('importApp').parentElement?.querySelector('.cursor-pointer.items-center') as HTMLElement fireEvent.click(closeTrigger) expect(handleClose).toHaveBeenCalledTimes(1) }) diff --git a/web/app/components/app/create-from-dsl-modal/index.tsx b/web/app/components/app/create-from-dsl-modal/index.tsx index 4f99fe9027..bc5f352634 100644 --- a/web/app/components/app/create-from-dsl-modal/index.tsx +++ b/web/app/components/app/create-from-dsl-modal/index.tsx @@ -225,7 +225,7 @@ const CreateFromDSLModal = ({ show, onSuccess, onClose, activeTab = CreateFromDS onClose={noop} >
- {t('importFromDSL', { ns: 'app' })} + {t('importApp', { ns: 'app' })}
onClose()} diff --git a/web/app/components/app/overview/settings/index.tsx b/web/app/components/app/overview/settings/index.tsx index b8ac9af607..3a3eea4d5c 100644 --- a/web/app/components/app/overview/settings/index.tsx +++ b/web/app/components/app/overview/settings/index.tsx @@ -5,6 +5,7 @@ import type { AppDetailResponse } from '@/models/app' import type { AppIconType, AppSSO, Language } from '@/types/app' import { Button } from '@langgenius/dify-ui/button' import { cn } from '@langgenius/dify-ui/cn' +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import { Switch } from '@langgenius/dify-ui/switch' import { toast } from '@langgenius/dify-ui/toast' import { RiArrowRightSLine, RiCloseLine } from '@remixicon/react' @@ -19,7 +20,6 @@ import { SparklesSoft } from '@/app/components/base/icons/src/public/common' import Input from '@/app/components/base/input' import Modal from '@/app/components/base/modal' import PremiumBadge from '@/app/components/base/premium-badge' -import { SimpleSelect } from '@/app/components/base/select' import Textarea from '@/app/components/base/textarea' import Tooltip from '@/app/components/base/tooltip' import { ACCOUNT_SETTING_TAB } from '@/app/components/header/account-setting/constants' @@ -57,6 +57,10 @@ export type ConfigParams = { } const prefixSettings = 'overview.appInfo.settings' +type SelectOption = { + value: string + name: string +} const SettingsModal: FC = ({ isChat, @@ -110,6 +114,8 @@ const SettingsModal: FC = ({ const { enableBilling, plan, webappCopyrightEnabled } = useProviderContext() const { setShowPricingModal, setShowAccountSettingModal } = useModalContext() const isFreePlan = plan.type === 'sandbox' + const languageOptions: SelectOption[] = languages.filter(item => item.supported) + const selectedLanguage = languageOptions.find(item => item.value === language) const handlePlanClick = useCallback(() => { if (isFreePlan) setShowPricingModal() @@ -303,13 +309,26 @@ const SettingsModal: FC = ({ {/* language */}
{t(`${prefixSettings}.language`, { ns: 'appOverview' })}
- item.supported)} - defaultValue={language} - onSelect={item => setLanguage(item.value as Language)} - notClearable - /> +
{/* theme color */} {isChat && ( diff --git a/web/app/components/apps/__tests__/index.spec.tsx b/web/app/components/apps/__tests__/index.spec.tsx index 2e0d1bcc84..94fa9f3484 100644 --- a/web/app/components/apps/__tests__/index.spec.tsx +++ b/web/app/components/apps/__tests__/index.spec.tsx @@ -7,9 +7,21 @@ import { useContextSelector } from 'use-context-selector' import AppListContext from '@/context/app-list-context' import { fetchAppDetail } from '@/service/explore' import { AppModeEnum } from '@/types/app' - import Apps from '../index' +vi.mock('@/next/dynamic', () => ({ + default: (loader: () => Promise<{ default: React.ComponentType }>) => { + const LazyComp = React.lazy(loader) + return function DynamicWrapper(props: Record) { + return React.createElement( + React.Suspense, + { fallback: null }, + React.createElement(LazyComp, props), + ) + } + }, +})) + let documentTitleCalls: string[] = [] let educationInitCalls: number = 0 const mockHandleImportDSL = vi.fn() @@ -65,6 +77,16 @@ vi.mock('@/hooks/use-import-dsl', () => ({ }), })) +const mockReplace = vi.fn() +let mockSearchParams = new URLSearchParams() + +vi.mock('@/next/navigation', () => ({ + useRouter: () => ({ + replace: mockReplace, + }), + useSearchParams: () => mockSearchParams, +})) + vi.mock('../list', () => { const MockList = () => { const setShowTryAppPanel = useContextSelector(AppListContext, ctx => ctx.setShowTryAppPanel) @@ -129,6 +151,16 @@ vi.mock('../../app/create-from-dsl-modal/dsl-confirm-modal', () => ({ ), })) +vi.mock('../import-from-marketplace-template-modal', () => ({ + default: ({ templateId, onClose, onConfirm }: { templateId: string, onClose: () => void, onConfirm: (dsl: string) => void }) => ( +
+ {templateId} + + +
+ ), +})) + vi.mock('@/service/explore', () => ({ fetchAppDetail: vi.fn(), })) @@ -161,6 +193,8 @@ describe('Apps', () => { vi.clearAllMocks() documentTitleCalls = [] educationInitCalls = 0 + mockSearchParams = new URLSearchParams() + mockReplace.mockClear() mockFetchAppDetail.mockResolvedValue({ id: 'template-1', name: 'Sample App', @@ -304,6 +338,66 @@ describe('Apps', () => { }) }) + describe('Marketplace Template', () => { + it('should render the template modal when template-id is in search params', async () => { + mockSearchParams = new URLSearchParams('template-id=tpl-42') + renderWithClient() + + expect(await screen.findByTestId('marketplace-template-modal')).toBeInTheDocument() + expect(screen.getByTestId('template-id')).toHaveTextContent('tpl-42') + }) + + it('should not render the template modal when no template-id is present', () => { + renderWithClient() + + expect(screen.queryByTestId('marketplace-template-modal')).not.toBeInTheDocument() + }) + + it('should close the template modal and remove template-id from URL', async () => { + mockSearchParams = new URLSearchParams('template-id=tpl-42') + renderWithClient() + + fireEvent.click(await screen.findByTestId('close-template')) + + expect(mockReplace).toHaveBeenCalledTimes(1) + const replaceArg = mockReplace.mock.calls[0]![0] as string + expect(replaceArg).not.toContain('template-id') + }) + + it('should import DSL from marketplace template on confirm', async () => { + mockHandleImportDSL.mockImplementation(async (_payload: unknown, options: { onSuccess?: () => void }) => { + options.onSuccess?.() + }) + mockSearchParams = new URLSearchParams('template-id=tpl-42') + renderWithClient() + + fireEvent.click(await screen.findByTestId('confirm-template')) + + await waitFor(() => { + expect(mockHandleImportDSL).toHaveBeenCalledWith( + { mode: 'yaml-content', yaml_content: 'yaml-dsl-content' }, + expect.objectContaining({ onSuccess: expect.any(Function) }), + ) + expect(mockReplace).toHaveBeenCalled() + }) + }) + + it('should show DSL confirm modal when marketplace import is pending', async () => { + mockHandleImportDSL.mockImplementation(async (_payload: unknown, options: { onPending?: () => void }) => { + options.onPending?.() + }) + mockSearchParams = new URLSearchParams('template-id=tpl-42') + renderWithClient() + + fireEvent.click(await screen.findByTestId('confirm-template')) + + await waitFor(() => { + expect(screen.getByTestId('dsl-confirm-modal')).toBeInTheDocument() + expect(mockReplace).toHaveBeenCalled() + }) + }) + }) + describe('Styling', () => { it('should have overflow-y-auto class', () => { const { container } = renderWithClient() diff --git a/web/app/components/apps/import-from-marketplace-template-modal.tsx b/web/app/components/apps/import-from-marketplace-template-modal.tsx new file mode 100644 index 0000000000..a6a3dee8e4 --- /dev/null +++ b/web/app/components/apps/import-from-marketplace-template-modal.tsx @@ -0,0 +1,182 @@ +'use client' + +import { Button } from '@langgenius/dify-ui/button' +import { Dialog, DialogContent } from '@langgenius/dify-ui/dialog' +import { toast } from '@langgenius/dify-ui/toast' +import { RiCloseLine } from '@remixicon/react' +import { useCallback, useMemo, useRef, useState } from 'react' +import { useTranslation } from 'react-i18next' +import { MARKETPLACE_API_PREFIX } from '@/config' +import { + fetchMarketplaceTemplateDSL, + useMarketplaceTemplateDetail, +} from '@/service/marketplace-templates' + +type ImportFromMarketplaceTemplateModalProps = { + templateId: string + onClose: () => void + onConfirm: (dslContent: string) => void +} + +const ImportFromMarketplaceTemplateModal = ({ + templateId, + onClose, + onConfirm, +}: ImportFromMarketplaceTemplateModalProps) => { + const { t } = useTranslation() + const { data, isLoading, isError } = useMarketplaceTemplateDetail(templateId) + const template = data?.data + const [importing, setImporting] = useState(false) + const isImportingRef = useRef(false) + + const CATEGORY_I18N_MAP: Record = useMemo(() => ({ + marketing: t('marketplace.template.category.marketing', { ns: 'app' }), + sales: t('marketplace.template.category.sales', { ns: 'app' }), + support: t('marketplace.template.category.support', { ns: 'app' }), + operations: t('marketplace.template.category.operations', { ns: 'app' }), + it: t('marketplace.template.category.it', { ns: 'app' }), + knowledge: t('marketplace.template.category.knowledge', { ns: 'app' }), + design: t('marketplace.template.category.design', { ns: 'app' }), + }), [t]) + + const translateCategory = useCallback((slug: string) => { + return CATEGORY_I18N_MAP[slug] ?? slug + }, [CATEGORY_I18N_MAP]) + + const handleConfirm = useCallback(async () => { + if (isImportingRef.current) + return + isImportingRef.current = true + setImporting(true) + try { + const dsl = await fetchMarketplaceTemplateDSL(templateId) + onConfirm(dsl) + } + catch { + toast.error(t('marketplace.template.importFailed', { ns: 'app' })) + } + finally { + setImporting(false) + isImportingRef.current = false + } + }, [templateId, onConfirm, t]) + + return ( + { + if (!open) + onClose() + }} + > + +
+ {t('marketplace.template.modalTitle', { ns: 'app' })} +
+ +
+
+ +
+ {isLoading && ( +
+
Loading...
+
+ )} + + {isError && ( +
+
+ {t('marketplace.template.fetchFailed', { ns: 'app' })} +
+
+ )} + + {template && ( +
+
+ {template.icon_file_key + ? ( + {template.template_name} + ) + : ( +
+ {template.icon || '📄'} +
+ )} +
+
{template.template_name}
+
+ + {t('marketplace.template.publishedBy', { ns: 'app' })} + {' '} + {template.publisher_unique_handle} + + · + + {t('marketplace.template.usageCount', { ns: 'app' })} + {' '} + {template.usage_count} + +
+
+
+ + {template.overview && ( +
+
+ {t('marketplace.template.overview', { ns: 'app' })} +
+
+ {template.overview} +
+
+ )} + + {template.categories.length > 0 && ( +
+ {template.categories.map(cat => ( + + {translateCategory(cat)} + + ))} +
+ )} +
+ )} +
+ +
+ + +
+
+
+ ) +} + +export default ImportFromMarketplaceTemplateModal diff --git a/web/app/components/apps/index.tsx b/web/app/components/apps/index.tsx index 9bf07e81e6..9d74968605 100644 --- a/web/app/components/apps/index.tsx +++ b/web/app/components/apps/index.tsx @@ -9,6 +9,7 @@ import useDocumentTitle from '@/hooks/use-document-title' import { useImportDSL } from '@/hooks/use-import-dsl' import { DSLImportMode } from '@/models/app' import dynamic from '@/next/dynamic' +import { useRouter, useSearchParams } from '@/next/navigation' import { fetchAppDetail } from '@/service/explore' import { trackCreateApp } from '@/utils/create-app-tracking' import List from './list' @@ -16,9 +17,14 @@ import List from './list' const DSLConfirmModal = dynamic(() => import('../app/create-from-dsl-modal/dsl-confirm-modal'), { ssr: false }) const CreateAppModal = dynamic(() => import('../explore/create-app-modal'), { ssr: false }) const TryApp = dynamic(() => import('../explore/try-app'), { ssr: false }) +const ImportFromMarketplaceTemplateModal = dynamic(() => import('./import-from-marketplace-template-modal'), { ssr: false }) const Apps = () => { const { t } = useTranslation() + const searchParams = useSearchParams() + const { replace } = useRouter() + const templateId = searchParams.get('template-id') + const templateDismissedRef = useRef(false) useDocumentTitle(t('menus.apps', { ns: 'common' })) useEducationInit() @@ -58,6 +64,14 @@ const Apps = () => { const [showDSLConfirmModal, setShowDSLConfirmModal] = useState(false) + const handleCloseTemplateModal = useCallback(() => { + templateDismissedRef.current = true + const params = new URLSearchParams(searchParams.toString()) + params.delete('template-id') + const query = params.toString() + replace(query ? `?${query}` : window.location.pathname, { scroll: false }) + }, [searchParams, replace]) + const { handleImportDSL, handleImportDSLConfirm, @@ -74,6 +88,22 @@ const Apps = () => { }) }, [handleImportDSLConfirm, onSuccess, trackCurrentCreateApp]) + const handleMarketplaceTemplateConfirm = useCallback(async (dslContent: string) => { + await handleImportDSL({ + mode: DSLImportMode.YAML_CONTENT, + yaml_content: dslContent, + }, { + onSuccess: () => { + handleCloseTemplateModal() + onSuccess() + }, + onPending: () => { + handleCloseTemplateModal() + setShowDSLConfirmModal(true) + }, + }) + }, [handleImportDSL, handleCloseTemplateModal, onSuccess]) + const onCreate: CreateAppModalProps['onConfirm'] = useCallback(async ({ name, icon_type, @@ -152,6 +182,14 @@ const Apps = () => { onHide={() => setIsShowCreateModal(false)} /> )} + + {templateId && !templateDismissedRef.current && ( + + )}
) diff --git a/web/app/components/base/app-icon-picker/__tests__/index.spec.tsx b/web/app/components/base/app-icon-picker/__tests__/index.spec.tsx index 07dd809f41..7f452e64e9 100644 --- a/web/app/components/base/app-icon-picker/__tests__/index.spec.tsx +++ b/web/app/components/base/app-icon-picker/__tests__/index.spec.tsx @@ -1,3 +1,4 @@ +import type { ComponentProps } from 'react' import type { Area } from 'react-easy-crop' import type { ImageFile } from '@/types/app' import { fireEvent, render, screen, waitFor } from '@testing-library/react' @@ -122,11 +123,11 @@ describe('AppIconPicker', () => { }) } - const renderPicker = () => { + const renderPicker = (props: Partial> = {}) => { const onSelect = vi.fn() const onClose = vi.fn() - const { container } = render() + const { container } = render() return { onSelect, onClose, container } } @@ -220,6 +221,20 @@ describe('AppIconPicker', () => { expect(onSelect).not.toHaveBeenCalled() }) + + it('should submit the initial emoji when provided', async () => { + const { onSelect } = renderPicker({ initialEmoji: { icon: 'rabbit', background: '#E4FBCC' } }) + + await userEvent.click(screen.getByText(/ok/i)) + + await waitFor(() => { + expect(onSelect).toHaveBeenCalledWith({ + type: 'emoji', + icon: 'rabbit', + background: '#E4FBCC', + }) + }) + }) }) describe('Image Upload', () => { diff --git a/web/app/components/base/app-icon-picker/index.tsx b/web/app/components/base/app-icon-picker/index.tsx index 77bc0cd434..64a88f16e1 100644 --- a/web/app/components/base/app-icon-picker/index.tsx +++ b/web/app/components/base/app-icon-picker/index.tsx @@ -34,12 +34,17 @@ export type AppIconSelection = AppIconEmojiSelection | AppIconImageSelection type AppIconPickerProps = { onSelect?: (payload: AppIconSelection) => void onClose?: () => void + initialEmoji?: { + icon: string + background?: string | null + } className?: string } const AppIconPicker: FC = ({ onSelect, onClose, + initialEmoji, className, }) => { const { t } = useTranslation() @@ -138,7 +143,14 @@ const AppIconPicker: FC = ({
)} - {activeTab === 'emoji' && } + {activeTab === 'emoji' && ( + + )} {activeTab === 'image' && } diff --git a/web/app/components/base/chat/chat-with-history/inputs-form/__tests__/content.spec.tsx b/web/app/components/base/chat/chat-with-history/inputs-form/__tests__/content.spec.tsx index 6081024490..126ee77ae5 100644 --- a/web/app/components/base/chat/chat-with-history/inputs-form/__tests__/content.spec.tsx +++ b/web/app/components/base/chat/chat-with-history/inputs-form/__tests__/content.spec.tsx @@ -270,7 +270,7 @@ describe('InputsFormContent', () => { renderWithContext(, context) const selNodes = screen.getAllByText('Sel') expect(selNodes.length).toBeGreaterThan(0) - expect(screen.queryByText('existing')).toBeNull() + expect(screen.getByText('existing')).toBeInTheDocument() }) it('handles select input empty branches (no current value -> show placeholder)', () => { diff --git a/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx b/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx index 4baa46744d..380b914492 100644 --- a/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx +++ b/web/app/components/base/chat/chat-with-history/inputs-form/content.tsx @@ -1,9 +1,9 @@ +import { Select, SelectContent, SelectItem, SelectItemIndicator, SelectItemText, SelectTrigger } from '@langgenius/dify-ui/select' import * as React from 'react' import { memo, useCallback } from 'react' import { useTranslation } from 'react-i18next' import { FileUploaderInAttachmentWrapper } from '@/app/components/base/file-uploader' import Input from '@/app/components/base/input' -import { PortalSelect } from '@/app/components/base/select' import Textarea from '@/app/components/base/textarea' import BoolInput from '@/app/components/workflow/nodes/_base/components/before-run-form/bool-input' import CodeEditor from '@/app/components/workflow/nodes/_base/components/editor/code-editor' @@ -85,13 +85,22 @@ const InputsFormContent = ({ showTip }: Props) => { /> )} {form.type === InputVarType.select && ( - ({ value: option, name: option }))} - onSelect={item => handleFormChange(form.variable, item.value as string)} - placeholder={form.label} - /> + )} {form.type === InputVarType.singleFile && ( { /> )} {form.type === InputVarType.select && ( - ({ value: option, name: option }))} - onSelect={item => handleFormChange(form.variable, item.value as string)} - placeholder={form.label} - /> + )} {form.type === InputVarType.singleFile && ( { expect(mockCopy).toHaveBeenCalledWith('test content') }) - it('calls reset on mouse leave', () => { + it('does not reset on mouse leave (relies on hook timeout)', () => { render() const button = screen.getByRole('button') fireEvent.mouseLeave(button.firstChild as Element) - expect(mockReset).toHaveBeenCalledTimes(1) + expect(mockReset).not.toHaveBeenCalled() }) }) }) @@ -88,11 +88,11 @@ describe('CopyFeedbackNew', () => { expect(mockCopy).toHaveBeenCalledWith('test content') }) - it('calls reset on mouse leave', () => { + it('does not reset on mouse leave (relies on hook timeout)', () => { const { container } = render() const clickableArea = container.querySelector('.cursor-pointer')!.firstChild as HTMLElement fireEvent.mouseLeave(clickableArea) - expect(mockReset).toHaveBeenCalledTimes(1) + expect(mockReset).not.toHaveBeenCalled() }) }) }) diff --git a/web/app/components/base/copy-feedback/index.tsx b/web/app/components/base/copy-feedback/index.tsx index 5210066670..431b697a6a 100644 --- a/web/app/components/base/copy-feedback/index.tsx +++ b/web/app/components/base/copy-feedback/index.tsx @@ -19,7 +19,10 @@ const prefixEmbedded = 'overview.appInfo.embedded' const CopyFeedback = ({ content }: Props) => { const { t } = useTranslation() - const { copied, copy, reset } = useClipboard() + // Rely on useClipboard's own timer to flip `copied` back to false so the + // "Copied" tooltip stays visible long enough to be read, matching the + // KeyValueItem pattern. Do NOT reset on mouse leave. + const { copied, copy } = useClipboard({ timeout: 2000 }) const tooltipText = copied ? t(`${prefixEmbedded}.copied`, { ns: 'appOverview' }) @@ -36,10 +39,7 @@ const CopyFeedback = ({ content }: Props) => { popupContent={safeText} > -
+
{copied && } {!copied && }
@@ -52,7 +52,7 @@ export default CopyFeedback export const CopyFeedbackNew = ({ content, className }: Pick) => { const { t } = useTranslation() - const { copied, copy, reset } = useClipboard() + const { copied, copy } = useClipboard({ timeout: 2000 }) const tooltipText = copied ? t(`${prefixEmbedded}.copied`, { ns: 'appOverview' }) @@ -73,7 +73,6 @@ export const CopyFeedbackNew = ({ content, className }: Pick
diff --git a/web/app/components/base/date-and-time-picker/time-picker/index.tsx b/web/app/components/base/date-and-time-picker/time-picker/index.tsx index 3fcb88215e..34ec21f0a5 100644 --- a/web/app/components/base/date-and-time-picker/time-picker/index.tsx +++ b/web/app/components/base/date-and-time-picker/time-picker/index.tsx @@ -206,7 +206,7 @@ const TimePicker = ({ > = ({ + emoji, + background, onSelect, className, }) => { const { categories } = data as EmojiMartData - const [selectedEmoji, setSelectedEmoji] = useState('') - const [selectedBackground, setSelectedBackground] = useState(backgroundColors[0]) - const [showStyleColors, setShowStyleColors] = useState(false) + const [selectedEmoji, setSelectedEmoji] = useState(emoji || '') + const [selectedBackground, setSelectedBackground] = useState(background || backgroundColors[0]) + const [showStyleColors, setShowStyleColors] = useState(!!emoji) const [searchedEmojis, setSearchedEmojis] = useState([]) const [isSearching, setIsSearching] = useState(false) React.useEffect(() => { if (selectedEmoji) { - setShowStyleColors(true) /* v8 ignore next 2 - @preserve */ if (selectedBackground) onSelect?.(selectedEmoji, selectedBackground) @@ -105,6 +106,7 @@ const EmojiPickerInner: FC = ({ className="inline-flex h-10 w-10 items-center justify-center rounded-lg" onClick={() => { setSelectedEmoji(emoji) + setShowStyleColors(true) }} >
@@ -130,6 +132,7 @@ const EmojiPickerInner: FC = ({ className="inline-flex h-10 w-10 items-center justify-center rounded-lg" onClick={() => { setSelectedEmoji(emoji) + setShowStyleColors(true) }} >
diff --git a/web/app/components/base/emoji-picker/__tests__/Inner.spec.tsx b/web/app/components/base/emoji-picker/__tests__/Inner.spec.tsx index f0cf3091d7..41683d7af3 100644 --- a/web/app/components/base/emoji-picker/__tests__/Inner.spec.tsx +++ b/web/app/components/base/emoji-picker/__tests__/Inner.spec.tsx @@ -45,6 +45,15 @@ describe('EmojiPickerInner', () => { expect(screen.getByText('food'))!.toBeInTheDocument() expect(screen.getByPlaceholderText('Search emojis...'))!.toBeInTheDocument() }) + + it('initializes selected emoji and background when provided', async () => { + render() + + expect(screen.getByText('Choose Style'))!.toBeInTheDocument() + await waitFor(() => { + expect(mockOnSelect).toHaveBeenCalledWith('rabbit', '#E4FBCC') + }) + }) }) describe('User Interactions', () => { diff --git a/web/app/components/base/features/new-feature-panel/__tests__/follow-up-setting-modal.spec.tsx b/web/app/components/base/features/new-feature-panel/__tests__/follow-up-setting-modal.spec.tsx new file mode 100644 index 0000000000..9437a19824 --- /dev/null +++ b/web/app/components/base/features/new-feature-panel/__tests__/follow-up-setting-modal.spec.tsx @@ -0,0 +1,97 @@ +import type { SuggestedQuestionsAfterAnswer } from '@/app/components/base/features/types' +import { fireEvent, render, screen } from '@testing-library/react' +import userEvent from '@testing-library/user-event' +import FollowUpSettingModal from '../follow-up-setting-modal' + +vi.mock('@/app/components/header/account-setting/model-provider-page/hooks', () => ({ + useModelListAndDefaultModelAndCurrentProviderAndModel: () => ({ + defaultModel: { + provider: { + provider: 'openai', + }, + model: 'gpt-4o-mini', + }, + }), +})) + +vi.mock('@/app/components/header/account-setting/model-provider-page/model-parameter-modal', () => ({ + default: ({ provider, modelId }: { provider: string, modelId: string }) => ( +
{`${provider}:${modelId}`}
+ ), +})) + +const renderModal = (data: SuggestedQuestionsAfterAnswer = { enabled: true }) => { + const onSave = vi.fn() + const onCancel = vi.fn() + + render( + , + ) + + return { + onSave, + onCancel, + } +} + +describe('FollowUpSettingModal', () => { + beforeEach(() => { + vi.clearAllMocks() + }) + + describe('Default Prompt', () => { + it('should show the system default prompt and save without a custom prompt when no custom prompt is configured', async () => { + const user = userEvent.setup() + const { onSave } = renderModal() + + expect(screen.getByText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.defaultPromptOption')).toBeInTheDocument() + expect(screen.getByText(/Please predict the three most likely follow-up questions a user would ask/)).toBeInTheDocument() + + await user.click(screen.getByText(/common\.operation\.save/)) + + expect(onSave).toHaveBeenCalledWith(expect.objectContaining({ + prompt: undefined, + model: expect.objectContaining({ + provider: 'openai', + name: 'gpt-4o-mini', + }), + })) + }) + }) + + describe('Custom Prompt', () => { + it('should enable custom prompt input and save the custom prompt when selected', async () => { + const user = userEvent.setup() + const { onSave } = renderModal() + + await user.click(screen.getByText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.customPromptOption').closest('button')!) + + const textarea = screen.getByPlaceholderText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.promptPlaceholder') + expect(textarea).toHaveAttribute('maxLength', '1000') + + fireEvent.change( + textarea, + { target: { value: 'Use a custom follow-up prompt.' } }, + ) + + await user.click(screen.getByText(/common\.operation\.save/)) + + expect(onSave).toHaveBeenCalledWith(expect.objectContaining({ + prompt: 'Use a custom follow-up prompt.', + })) + }) + + it('should disable save when custom prompt is selected but empty', async () => { + const user = userEvent.setup() + renderModal() + + await user.click(screen.getByText('appDebug.feature.suggestedQuestionsAfterAnswer.modal.customPromptOption').closest('button')!) + + expect(screen.getByText(/common\.operation\.save/).closest('button')).toBeDisabled() + }) + }) +}) diff --git a/web/app/components/base/features/new-feature-panel/__tests__/follow-up.spec.tsx b/web/app/components/base/features/new-feature-panel/__tests__/follow-up.spec.tsx index 0e7c6aa558..323032249d 100644 --- a/web/app/components/base/features/new-feature-panel/__tests__/follow-up.spec.tsx +++ b/web/app/components/base/features/new-feature-panel/__tests__/follow-up.spec.tsx @@ -1,12 +1,55 @@ -import type { OnFeaturesChange } from '../../types' +import type { + OnFeaturesChange, + SuggestedQuestionsAfterAnswer, +} from '../../types' import { fireEvent, render, screen } from '@testing-library/react' import * as React from 'react' import { FeaturesProvider } from '../../context' import FollowUp from '../follow-up' -const renderWithProvider = (props: { disabled?: boolean, onChange?: OnFeaturesChange } = {}) => { +vi.mock('../follow-up-setting-modal', () => ({ + default: ({ onSave, onCancel }: { onSave: (newState: unknown) => void, onCancel: () => void }) => ( +
+ + +
+ ), +})) + +const renderWithProvider = ( + props: { + disabled?: boolean + onChange?: OnFeaturesChange + suggested?: SuggestedQuestionsAfterAnswer + } = {}, +) => { return render( - + , ) @@ -45,4 +88,44 @@ describe('FollowUp', () => { expect(() => fireEvent.click(screen.getByRole('switch'))).not.toThrow() }) + + it('should render edit button when enabled and hovering', () => { + renderWithProvider({ + suggested: { + enabled: true, + }, + }) + + fireEvent.mouseEnter(screen.getByText(/feature\.suggestedQuestionsAfterAnswer\.title/).closest('[class]')!) + + expect(screen.getByText(/operation\.settings/)).toBeInTheDocument() + }) + + it('should open settings modal and save follow-up config', () => { + const onChange = vi.fn() + renderWithProvider({ + onChange, + suggested: { + enabled: true, + }, + }) + + fireEvent.mouseEnter(screen.getByText(/feature\.suggestedQuestionsAfterAnswer\.title/).closest('[class]')!) + fireEvent.click(screen.getByText(/operation\.settings/)) + + expect(screen.getByTestId('follow-up-setting-modal')).toBeInTheDocument() + + fireEvent.click(screen.getByText('save-settings')) + + expect(onChange).toHaveBeenCalledWith(expect.objectContaining({ + suggested: expect.objectContaining({ + enabled: true, + prompt: 'test prompt', + model: expect.objectContaining({ + provider: 'openai', + name: 'gpt-4o-mini', + }), + }), + })) + }) }) diff --git a/web/app/components/base/features/new-feature-panel/follow-up-setting-modal.tsx b/web/app/components/base/features/new-feature-panel/follow-up-setting-modal.tsx new file mode 100644 index 0000000000..24e89c3517 --- /dev/null +++ b/web/app/components/base/features/new-feature-panel/follow-up-setting-modal.tsx @@ -0,0 +1,241 @@ +import type { SuggestedQuestionsAfterAnswer } from '@/app/components/base/features/types' +import type { FormValue } from '@/app/components/header/account-setting/model-provider-page/declarations' +import type { + CompletionParams, + Model, + ModelModeType, +} from '@/types/app' +import { Button } from '@langgenius/dify-ui/button' +import { cn } from '@langgenius/dify-ui/cn' +import { Dialog, DialogCloseButton, DialogContent, DialogTitle } from '@langgenius/dify-ui/dialog' +import { produce } from 'immer' +import { useCallback, useMemo, useState } from 'react' +import { useTranslation } from 'react-i18next' +import Radio from '@/app/components/base/radio/ui' +import Textarea from '@/app/components/base/textarea' +import { ModelTypeEnum } from '@/app/components/header/account-setting/model-provider-page/declarations' +import { useModelListAndDefaultModelAndCurrentProviderAndModel } from '@/app/components/header/account-setting/model-provider-page/hooks' +import ModelParameterModal from '@/app/components/header/account-setting/model-provider-page/model-parameter-modal' +import { ModelModeType as ModelModeTypeEnum } from '@/types/app' + +type FollowUpSettingModalProps = { + data: SuggestedQuestionsAfterAnswer + onSave: (newState: SuggestedQuestionsAfterAnswer) => void + onCancel: () => void +} + +const DEFAULT_COMPLETION_PARAMS: CompletionParams = { + temperature: 0.7, + max_tokens: 0, + top_p: 0, + echo: false, + stop: [], + presence_penalty: 0, + frequency_penalty: 0, +} + +const DEFAULT_FOLLOW_UP_PROMPT = `Please predict the three most likely follow-up questions a user would ask, keep each question under 20 characters, use the same language as the assistant's latest response, and output a JSON array like ["question1", "question2", "question3"].` +const CUSTOM_FOLLOW_UP_PROMPT_MAX_LENGTH = 1000 + +const getInitialModel = (model?: Model): Model => ({ + provider: model?.provider || '', + name: model?.name || '', + mode: model?.mode || ModelModeTypeEnum.chat, + completion_params: { + ...DEFAULT_COMPLETION_PARAMS, + ...(model?.completion_params || {}), + }, +}) + +const PROMPT_MODE = { + default: 'default', + custom: 'custom', +} as const + +type PromptMode = typeof PROMPT_MODE[keyof typeof PROMPT_MODE] + +const FollowUpSettingModal = ({ + data, + onSave, + onCancel, +}: FollowUpSettingModalProps) => { + const { t } = useTranslation() + const [model, setModel] = useState(() => getInitialModel(data.model)) + const [prompt, setPrompt] = useState(data.prompt || '') + const [promptMode, setPromptMode] = useState( + data.prompt ? PROMPT_MODE.custom : PROMPT_MODE.default, + ) + const { defaultModel } = useModelListAndDefaultModelAndCurrentProviderAndModel(ModelTypeEnum.textGeneration) + const selectedModel = useMemo(() => { + if (model.provider && model.name) + return model + + if (!defaultModel) + return model + + return { + ...model, + provider: defaultModel.provider.provider, + name: defaultModel.model, + } + }, [defaultModel, model]) + + const handleModelChange = useCallback((newValue: { modelId: string, provider: string, mode?: string, features?: string[] }) => { + setModel(prev => ({ + ...prev, + provider: newValue.provider, + name: newValue.modelId, + mode: (newValue.mode as ModelModeType) || prev.mode || ModelModeTypeEnum.chat, + })) + }, []) + + const handleCompletionParamsChange = useCallback((newParams: FormValue) => { + setModel({ + ...selectedModel, + completion_params: { + ...DEFAULT_COMPLETION_PARAMS, + ...(newParams as Partial), + }, + }) + }, [selectedModel]) + + const handleSave = useCallback(() => { + const trimmedPrompt = prompt.trim() + const nextFollowUpState = produce(data, (draft) => { + if (selectedModel.provider && selectedModel.name) + draft.model = selectedModel + else + draft.model = undefined + + draft.prompt = promptMode === PROMPT_MODE.custom + ? (trimmedPrompt || undefined) + : undefined + }) + onSave(nextFollowUpState) + }, [data, onSave, prompt, promptMode, selectedModel]) + + const isCustomPromptInvalid = promptMode === PROMPT_MODE.custom && !prompt.trim() + + return ( + { + if (!open) + onCancel() + }} + > + + + + {t('feature.suggestedQuestionsAfterAnswer.modal.title', { ns: 'appDebug' })} + +
+
+
+ {t('feature.suggestedQuestionsAfterAnswer.modal.modelLabel', { ns: 'appDebug' })} +
+ +
+
+
+ {t('feature.suggestedQuestionsAfterAnswer.modal.promptLabel', { ns: 'appDebug' })} +
+
+ +